blob: 7f9db47e8cc39c0c39feefb6d6b4c08e79216ff9 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarova07da6d2010-02-17 02:05:54 +000060#define DRV_MODULE_VERSION "1.52.1-6"
61#define DRV_MODULE_RELDATE "2010/02/16"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
127enum bnx2x_board_type {
128 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129 BCM57711 = 1,
130 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140};
141
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700142
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000206 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000219 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200220
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200224#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000227 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000251 mutex_lock(&bp->dmae_mutex);
252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253 *wb_comp = 0;
254
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256
257 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000263 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200264 break;
265 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700273
274 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275}
276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200278{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000279 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000294 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200299#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200303#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326 *wb_comp = 0;
327
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329
330 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000335 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336 break;
337 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700338 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348
349 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200350}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351
Eilon Greenstein573f2032009-08-12 08:24:14 +0000352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
375}
376
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200393
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200402
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419 }
420 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
503 }
504 }
505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506 return rc;
507}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000512 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800516 mark = ((mark + 0x3) & ~0x3);
Joe Perches7995c642010-02-17 15:01:52 +0000517 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200518
Joe Perches7995c642010-02-17 15:01:52 +0000519 pr_err("");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000525 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000532 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200533 }
Joe Perches7995c642010-02-17 15:01:52 +0000534 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 BNX2X_ERR("begin crash dump -----------------\n");
546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000556 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 struct bnx2x_fastpath *fp = &bp->fp[i];
558
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
571
572 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000573 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700582 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700583 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200585
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 /* Rings */
587 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000588 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000589 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000593 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600
Eilon Greenstein3196a882008-08-13 15:58:49 -0700601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000603 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700609 }
610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618 }
619 }
620
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000622 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000641 }
642 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800649static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200656
657 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
Eilon Greenstein8badd272009-02-12 08:36:15 +0000673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800675
676 REG_WR(bp, addr, val);
677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
684 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800709static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700711 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
Eilon Greenstein8badd272009-02-12 08:36:15 +0000723 /* flush all outstanding writes */
724 mmiowb();
725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000734 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
744 /* make sure all ISRs are done */
745 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000748#ifdef BCM_CNIC
749 offset++;
750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000752 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759}
760
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762
763/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 */
766
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 u8 storm, u16 index, u8 op, u8 update)
769{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
Eilon Greenstein5c862842008-08-13 15:51:48 -0700781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200788}
789
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200791{
792 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200793
794 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200797}
798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804
Eilon Greenstein5c862842008-08-13 15:51:48 -0700805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200807
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000821}
822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200834 int nbd;
835
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847
Eilon Greensteinca003922009-08-12 22:53:28 -0700848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852 bnx2x_panic();
853 }
854#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700855 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Eilon Greensteinca003922009-08-12 22:53:28 -0700857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700882 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000883 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700887 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888}
889
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200891{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892 s16 used;
893 u16 prod;
894 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700896 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200903
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700904#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700908#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200909
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700910 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911}
912
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200924{
925 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000926 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000931 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932#endif
933
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000934 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200946 hw_cons, sw_cons, pkt_cons);
947
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000961 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200962
Eilon Greenstein60447352009-03-02 07:59:24 +0000963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
968 */
969 smp_mb();
970
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000971 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700972 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000974 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000976 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200977}
978
Michael Chan993ac7b2009-10-10 13:46:56 +0000979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
985{
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000992 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700993 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200994
995 bp->spq_left++;
996
Eilon Greenstein0626b892009-02-12 08:38:14 +0000997 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
1012 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001016 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001017 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001018 return;
1019 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1026
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036 break;
1037
Michael Chan993ac7b2009-10-10 13:46:56 +00001038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001044
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001048 bp->set_mac_pending--;
1049 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001050 break;
1051
Eliezer Tamir49d66772008-02-28 11:53:13 -08001052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001054 bp->set_mac_pending--;
1055 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001056 break;
1057
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001061 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001063 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064}
1065
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001066static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1076
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1084}
1085
1086static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1088{
1089 int i;
1090
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1093}
1094
1095static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1097{
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1102
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1105
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001107 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1111 }
1112
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119 return 0;
1120}
1121
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1124{
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1129
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1133
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001135 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1139 }
1140
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147 return 0;
1148}
1149
1150/* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1154 */
1155static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1157{
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001167
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1172}
1173
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001174static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1176{
1177 u16 last_max = fp->last_max_sge;
1178
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1181}
1182
1183static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184{
1185 int i, j;
1186
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1189
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1193 }
1194 }
1195}
1196
1197static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1199{
1200 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001202 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001203 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1207
1208 if (!sge_len)
1209 return;
1210
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1229
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1234
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1237 }
1238
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243 }
1244
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1248}
1249
1250static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251{
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
Eilon Greenstein33471622008-08-13 15:59:08 -07001256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1261}
1262
1263static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1265{
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1271
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293#ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295#else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297#endif
1298 fp->tpa_queue_used);
1299#endif
1300}
1301
1302static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1306{
1307 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1312
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001315
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001319 max(frag_size, (u32)len_on_bd));
1320
1321#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1330 }
1331#endif
1332
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001340 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001341 old_rx_pg = *rx_pg;
1342
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001347 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001348 return err;
1349 }
1350
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001354
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1361
1362 frag_size -= frag_len;
1363 }
1364
1365 return 0;
1366}
1367
1368static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1371{
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001382
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001383 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001386#ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001393
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1396
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001397#ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1404 }
1405#endif
1406
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1409
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413 {
1414 struct iphdr *iph;
1415
1416 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001417#ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425 }
1426
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436#endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1442 }
1443
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001444
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1447
1448 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001449 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001452 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001453 }
1454
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456}
1457
1458static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1462{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001463 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001464 int i;
1465
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1470
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001471 /*
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1478 */
1479 wmb();
1480
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001484 ((u32 *)&rx_prods)[i]);
1485
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001486 mmiowb(); /* keep prod updates ordered */
1487
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001488 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001491}
1492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001493static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494{
1495 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1499
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503#endif
1504
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1510
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001513 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1516
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1519 */
1520 rmb();
1521
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001524 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001525
1526 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001527 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001530 u8 cqe_fp_flags;
1531 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001532
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1536
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1543
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001553
1554 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1558
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1567
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001573 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001574
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1579
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1583 }
1584
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1589
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1593
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600#ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001602 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001603#endif
1604
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1608 }
1609 }
1610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1617
1618 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001620 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001623 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624 goto reuse_rx;
1625 }
1626
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1629 */
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1633
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001638 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001639 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001640 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001641 goto reuse_rx;
1642 }
1643
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1649
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652 skb = new_skb;
1653
Eilon Greensteina119a062009-08-12 08:23:23 +00001654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001658 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1662
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001665 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001666 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001667 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001668reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1671 }
1672
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001676 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001679 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001680 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001681 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001682 }
1683
Eilon Greenstein748e5432009-02-12 08:36:37 +00001684 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001685
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001686#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001694 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001696
1697next_rx:
1698 rx_buf->skb = NULL;
1699
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001704next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001708 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001709 break;
1710 } /* while */
1711
1712 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001713 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1716
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1723
1724 return rx_pkt;
1725}
1726
1727static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728{
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001731
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1736 }
1737
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001739 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741
1742#ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745#endif
1746
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001753
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001759 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001760 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001761 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001762 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001764 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1768 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001771 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1775 }
1776
Eilon Greenstein3196a882008-08-13 15:58:49 -07001777#ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780#endif
1781
Eilon Greensteinca003922009-08-12 22:53:28 -07001782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001784
Eilon Greensteinca003922009-08-12 22:53:28 -07001785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001795 status &= ~mask;
1796 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001797 }
1798
Michael Chan993ac7b2009-10-10 13:46:56 +00001799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001814 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001825
1826 return IRQ_HANDLED;
1827}
1828
1829/* end of fast path */
1830
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001832
1833/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834
1835/*
1836 * General service functions
1837 */
1838
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001840{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001845 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001846
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
1854
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
Eliezer Tamirf1410642008-02-28 11:51:50 -08001862 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001863 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
1869
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001872 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001875 if (lock_status & resource_bit)
1876 return 0;
1877
1878 msleep(5);
1879 }
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
1883
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001890
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
Eliezer Tamirf1410642008-02-28 11:51:50 -08001906 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001907 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
1912 }
1913
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001914 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001915 return 0;
1916}
1917
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001918/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001920{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001921 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001922
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001925}
1926
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001927static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001928{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001932 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001933}
1934
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1974
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
1979
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
1992
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
2000
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
2007
2008 default:
2009 break;
2010 }
2011
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002014
2015 return 0;
2016}
2017
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
Eliezer Tamirf1410642008-02-28 11:51:50 -08002064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065{
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
2068
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
2073 }
2074
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
2086
Eilon Greenstein6378c022008-08-13 15:59:25 -07002087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
2093
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
2099
2100 default:
2101 break;
2102 }
2103
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002106
2107 return 0;
2108}
2109
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002111{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002116 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002117 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002118
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002121 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002122 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002123
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002125 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002126 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002127
Eliezer Tamirf1410642008-02-28 11:51:50 -08002128 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002131 break;
2132 }
2133}
2134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002135static void bnx2x_link_report(struct bnx2x *bp)
2136{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002137 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002138 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002139 netdev_err(bp->dev, "NIC Link is Down\n");
Eilon Greenstein2691d512009-08-12 08:22:08 +00002140 return;
2141 }
2142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002144 u16 line_speed;
2145
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002148 netdev_info(bp->dev, "NIC Link is Up, ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002149
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
Joe Perches7995c642010-02-17 15:01:52 +00002160 pr_cont("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002161
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002162 if (bp->link_vars.duplex == DUPLEX_FULL)
Joe Perches7995c642010-02-17 15:01:52 +00002163 pr_cont("full duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002164 else
Joe Perches7995c642010-02-17 15:01:52 +00002165 pr_cont("half duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002166
David S. Millerc0700f92008-12-16 23:53:20 -08002167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Joe Perches7995c642010-02-17 15:01:52 +00002169 pr_cont(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
Joe Perches7995c642010-02-17 15:01:52 +00002172 pr_cont("& transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002173 } else {
Joe Perches7995c642010-02-17 15:01:52 +00002174 pr_cont(", transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002175 }
Joe Perches7995c642010-02-17 15:01:52 +00002176 pr_cont("flow control ON");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002177 }
Joe Perches7995c642010-02-17 15:01:52 +00002178 pr_cont("\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002179
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002182 netdev_err(bp->dev, "NIC Link is Down\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002183 }
2184}
2185
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002186static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002187{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190
Eilon Greenstein19680c42008-08-13 15:47:33 -07002191 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002194 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002196 else
David S. Millerc0700f92008-12-16 23:53:20 -08002197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002198
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002199 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002200
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
Eilon Greenstein19680c42008-08-13 15:47:33 -07002204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002205
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002206 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002207
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002208 bnx2x_calc_fc_adv(bp);
2209
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002212 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002213 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002214
Eilon Greenstein19680c42008-08-13 15:47:33 -07002215 return rc;
2216 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002218 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002219}
2220
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002221static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002222{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002223 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002224 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002226 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002227
Eilon Greenstein19680c42008-08-13 15:47:33 -07002228 bnx2x_calc_fc_adv(bp);
2229 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002230 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002231}
2232
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002233static void bnx2x__link_reset(struct bnx2x *bp)
2234{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002235 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002236 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002238 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002239 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002241}
2242
2243static u8 bnx2x_link_test(struct bnx2x *bp)
2244{
2245 u8 rc;
2246
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002247 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002249 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002250
2251 return rc;
2252}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002253
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002254static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002255{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002259
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002263
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002266
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002277
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002280
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002287}
2288
Eilon Greenstein2691d512009-08-12 08:22:08 +00002289/* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2293 or
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2297 */
2298static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299{
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2303
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2314
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2320
2321 bp->vn_weight_sum += vn_min_rate;
2322 }
2323
2324 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002333}
2334
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002336{
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2342
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2347
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002357 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002360
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002371 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002377 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2383 }
2384
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2390
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2395}
2396
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002397
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002398/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002399static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002400{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002405
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002406 if (bp->link_vars.link_up) {
2407
Eilon Greenstein1c063282009-02-12 08:36:43 +00002408 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2412
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2415
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002418 pause_enabled);
2419 }
2420
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2423
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2428 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002429 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431 }
2432
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002433 /* indicate link status */
2434 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002435
2436 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002437 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002438 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002439 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002440
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002441 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2445
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002446 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002450
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002451 if (bp->link_vars.link_up) {
2452 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002453
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2466 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002467 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002468}
2469
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002470static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002471{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002473 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002474
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
Eilon Greenstein2691d512009-08-12 08:22:08 +00002482 bnx2x_calc_vn_weight_sum(bp);
2483
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002484 /* indicate link status */
2485 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002486}
2487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002488static void bnx2x_pmf_update(struct bnx2x *bp)
2489{
2490 int port = BP_PORT(bp);
2491 u32 val;
2492
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002500
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002502}
2503
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002504/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002505
2506/* slow path */
2507
2508/*
2509 * General service functions
2510 */
2511
Eilon Greenstein2691d512009-08-12 08:22:08 +00002512/* send the MCP a request, block until there is a reply */
2513u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514{
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002521 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2528
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002533
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2536
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2545 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002546 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002547
2548 return rc;
2549}
2550
2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002553static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002558
2559 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
Eilon Greenstein2691d512009-08-12 08:22:08 +00002563 netif_carrier_off(bp->dev);
2564}
2565
2566static void bnx2x_e1h_enable(struct bnx2x *bp)
2567{
2568 int port = BP_PORT(bp);
2569
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
Eilon Greenstein2691d512009-08-12 08:22:08 +00002572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2574
Eilon Greenstein061bc702009-10-15 00:18:47 -07002575 /*
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2578 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002579}
2580
2581static void bnx2x_update_min_max(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
2584 int vn, i;
2585
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2588
2589 bnx2x_calc_vn_weight_sum(bp);
2590
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594 if (bp->port.pmf) {
2595 int func;
2596
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2600 continue;
2601
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605 }
2606
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2612 }
2613}
2614
2615static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002618
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002621 /*
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2625 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002628 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002629
2630 bnx2x_e1h_disable(bp);
2631 } else {
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002633 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002634
2635 bnx2x_e1h_enable(bp);
2636 }
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638 }
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643 }
2644
2645 /* Report results to MCP */
2646 if (dcc_event)
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648 else
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650}
2651
Michael Chan28912902009-10-10 13:46:53 +00002652/* must be called under the spq lock */
2653static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654{
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2664 }
2665 return next_spe;
2666}
2667
2668/* must be called under the spq lock */
2669static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670{
2671 int func = BP_FUNC(bp);
2672
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2675
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2679}
2680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002681/* the slow path queue is odd since completions arrive on the fastpath ring */
2682static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2684{
Michael Chan28912902009-10-10 13:46:53 +00002685 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693#ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2695 return -EIO;
2696#endif
2697
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002698 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002702 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002703 bnx2x_panic();
2704 return -EBUSY;
2705 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002706
Michael Chan28912902009-10-10 13:46:53 +00002707 spe = bnx2x_sp_get_next(bp);
2708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002710 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002714 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002715 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
Michael Chan28912902009-10-10 13:46:53 +00002718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720
2721 bp->spq_left--;
2722
Michael Chan28912902009-10-10 13:46:53 +00002723 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002724 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002725 return 0;
2726}
2727
2728/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002729static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002732 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002733
2734 might_sleep();
2735 i = 100;
2736 for (j = 0; j < i*10; j++) {
2737 val = (1UL << 31);
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2741 break;
2742
2743 msleep(5);
2744 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002745 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747 rc = -EBUSY;
2748 }
2749
2750 return rc;
2751}
2752
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002753/* release split MCP access lock register */
2754static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002755{
2756 u32 val = 0;
2757
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759}
2760
2761static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762{
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2764 u16 rc = 0;
2765
2766 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769 rc |= 1;
2770 }
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773 rc |= 2;
2774 }
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777 rc |= 4;
2778 }
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781 rc |= 8;
2782 }
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785 rc |= 16;
2786 }
2787 return rc;
2788}
2789
2790/*
2791 * slow path service functions
2792 */
2793
2794static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002796 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002803 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002804 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002805
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2808
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2811
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002816
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002819
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002823
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002826
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002827 bnx2x_acquire_phy_lock(bp);
2828
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002829 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002830 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002831 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002833 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002834
2835 /* handle unicore attn? */
2836 }
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849 if (port == 0) {
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853 }
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857 }
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861 }
2862 } else {
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874 }
2875 }
2876
2877 } /* if hardwired */
2878
Eilon Greenstein5c862842008-08-13 15:51:48 -07002879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880 asserted, hc_addr);
2881 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002882
2883 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002884 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002885 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002886 bnx2x_release_phy_lock(bp);
2887 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002888}
2889
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002890static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891{
2892 int port = BP_PORT(bp);
2893
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2899
2900 /* log the failure */
Joe Perches7995c642010-02-17 15:01:52 +00002901 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2902 "Please contact Dell Support for assistance.\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002903}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002904
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002905static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2906{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002907 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002908 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002909 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002910
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002911 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2912 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002913
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002914 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002915
2916 val = REG_RD(bp, reg_offset);
2917 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2918 REG_WR(bp, reg_offset, val);
2919
2920 BNX2X_ERR("SPIO5 hw attention\n");
2921
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002922 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002923 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2924 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002925 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002926 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002927 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002928 /* The PHY reset is controlled by GPIO 1 */
2929 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2930 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002931 break;
2932
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002933 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2934 /* The PHY reset is controlled by GPIO 1 */
2935 /* fake the port number to cancel the swap done in
2936 set_gpio() */
2937 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2938 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2939 port = (swap_val && swap_override) ^ 1;
2940 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2941 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2942 break;
2943
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002944 default:
2945 break;
2946 }
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002947 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002948 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002949
Eilon Greenstein589abe32009-02-12 08:36:55 +00002950 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2951 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2952 bnx2x_acquire_phy_lock(bp);
2953 bnx2x_handle_module_detect_int(&bp->link_params);
2954 bnx2x_release_phy_lock(bp);
2955 }
2956
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002957 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2958
2959 val = REG_RD(bp, reg_offset);
2960 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2961 REG_WR(bp, reg_offset, val);
2962
2963 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002964 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002965 bnx2x_panic();
2966 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002967}
2968
2969static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2970{
2971 u32 val;
2972
Eilon Greenstein0626b892009-02-12 08:38:14 +00002973 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002974
2975 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2976 BNX2X_ERR("DB hw attention 0x%x\n", val);
2977 /* DORQ discard attention */
2978 if (val & 0x2)
2979 BNX2X_ERR("FATAL error from DORQ\n");
2980 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002981
2982 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2983
2984 int port = BP_PORT(bp);
2985 int reg_offset;
2986
2987 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2988 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2989
2990 val = REG_RD(bp, reg_offset);
2991 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2992 REG_WR(bp, reg_offset, val);
2993
2994 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002995 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002996 bnx2x_panic();
2997 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002998}
2999
3000static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3001{
3002 u32 val;
3003
3004 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3005
3006 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3007 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3008 /* CFC error attention */
3009 if (val & 0x2)
3010 BNX2X_ERR("FATAL error from CFC\n");
3011 }
3012
3013 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3014
3015 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3016 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3017 /* RQ_USDMDP_FIFO_OVERFLOW */
3018 if (val & 0x18000)
3019 BNX2X_ERR("FATAL error from PXP\n");
3020 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003021
3022 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3023
3024 int port = BP_PORT(bp);
3025 int reg_offset;
3026
3027 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3028 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3029
3030 val = REG_RD(bp, reg_offset);
3031 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3032 REG_WR(bp, reg_offset, val);
3033
3034 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003035 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003036 bnx2x_panic();
3037 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003038}
3039
3040static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3041{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003042 u32 val;
3043
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003044 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3045
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003046 if (attn & BNX2X_PMF_LINK_ASSERT) {
3047 int func = BP_FUNC(bp);
3048
3049 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003050 bp->mf_config = SHMEM_RD(bp,
3051 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003052 val = SHMEM_RD(bp, func_mb[func].drv_status);
3053 if (val & DRV_STATUS_DCC_EVENT_MASK)
3054 bnx2x_dcc_event(bp,
3055 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003056 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003057 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003058 bnx2x_pmf_update(bp);
3059
3060 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003061
3062 BNX2X_ERR("MC assert!\n");
3063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3064 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3067 bnx2x_panic();
3068
3069 } else if (attn & BNX2X_MCP_ASSERT) {
3070
3071 BNX2X_ERR("MCP assert!\n");
3072 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003073 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003074
3075 } else
3076 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3077 }
3078
3079 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003080 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3081 if (attn & BNX2X_GRC_TIMEOUT) {
3082 val = CHIP_IS_E1H(bp) ?
3083 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3084 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3085 }
3086 if (attn & BNX2X_GRC_RSV) {
3087 val = CHIP_IS_E1H(bp) ?
3088 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3089 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3090 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003091 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003092 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003093}
3094
3095static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3096{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003097 struct attn_route attn;
3098 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003099 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003100 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003101 u32 reg_addr;
3102 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003103 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003104
3105 /* need to take HW lock because MCP or other port might also
3106 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003107 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003108
3109 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3110 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3111 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3112 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003113 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3114 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003115
3116 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3117 if (deasserted & (1 << index)) {
3118 group_mask = bp->attn_group[index];
3119
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003120 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3121 index, group_mask.sig[0], group_mask.sig[1],
3122 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003123
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003124 bnx2x_attn_int_deasserted3(bp,
3125 attn.sig[3] & group_mask.sig[3]);
3126 bnx2x_attn_int_deasserted1(bp,
3127 attn.sig[1] & group_mask.sig[1]);
3128 bnx2x_attn_int_deasserted2(bp,
3129 attn.sig[2] & group_mask.sig[2]);
3130 bnx2x_attn_int_deasserted0(bp,
3131 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003132
3133 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003134 HW_PRTY_ASSERT_SET_0) ||
3135 (attn.sig[1] & group_mask.sig[1] &
3136 HW_PRTY_ASSERT_SET_1) ||
3137 (attn.sig[2] & group_mask.sig[2] &
3138 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003139 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003140 }
3141 }
3142
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003143 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003144
Eilon Greenstein5c862842008-08-13 15:51:48 -07003145 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003146
3147 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003148 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3149 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003150 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003151
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003152 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003153 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003154
3155 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3156 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3157
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003158 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3159 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003160
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003161 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3162 aeu_mask, deasserted);
3163 aeu_mask |= (deasserted & 0xff);
3164 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3165
3166 REG_WR(bp, reg_addr, aeu_mask);
3167 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003168
3169 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3170 bp->attn_state &= ~deasserted;
3171 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3172}
3173
3174static void bnx2x_attn_int(struct bnx2x *bp)
3175{
3176 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003177 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3178 attn_bits);
3179 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003181 u32 attn_state = bp->attn_state;
3182
3183 /* look for changed bits */
3184 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3185 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3186
3187 DP(NETIF_MSG_HW,
3188 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3189 attn_bits, attn_ack, asserted, deasserted);
3190
3191 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003192 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003193
3194 /* handle bits that were raised */
3195 if (asserted)
3196 bnx2x_attn_int_asserted(bp, asserted);
3197
3198 if (deasserted)
3199 bnx2x_attn_int_deasserted(bp, deasserted);
3200}
3201
3202static void bnx2x_sp_task(struct work_struct *work)
3203{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003204 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003205 u16 status;
3206
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003207
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003208 /* Return here if interrupt is disabled */
3209 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003210 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003211 return;
3212 }
3213
3214 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003215/* if (status == 0) */
3216/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003217
Eilon Greenstein3196a882008-08-13 15:58:49 -07003218 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003220 /* HW attentions */
3221 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003222 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003223
Eilon Greenstein68d59482009-01-14 21:27:36 -08003224 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003225 IGU_INT_NOP, 1);
3226 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3227 IGU_INT_NOP, 1);
3228 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3229 IGU_INT_NOP, 1);
3230 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3231 IGU_INT_NOP, 1);
3232 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3233 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003234
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003235}
3236
3237static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3238{
3239 struct net_device *dev = dev_instance;
3240 struct bnx2x *bp = netdev_priv(dev);
3241
3242 /* Return here if interrupt is disabled */
3243 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003244 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003245 return IRQ_HANDLED;
3246 }
3247
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003248 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003249
3250#ifdef BNX2X_STOP_ON_ERROR
3251 if (unlikely(bp->panic))
3252 return IRQ_HANDLED;
3253#endif
3254
Michael Chan993ac7b2009-10-10 13:46:56 +00003255#ifdef BCM_CNIC
3256 {
3257 struct cnic_ops *c_ops;
3258
3259 rcu_read_lock();
3260 c_ops = rcu_dereference(bp->cnic_ops);
3261 if (c_ops)
3262 c_ops->cnic_handler(bp->cnic_data, NULL);
3263 rcu_read_unlock();
3264 }
3265#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003266 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003267
3268 return IRQ_HANDLED;
3269}
3270
3271/* end of slow path */
3272
3273/* Statistics */
3274
3275/****************************************************************************
3276* Macros
3277****************************************************************************/
3278
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003279/* sum[hi:lo] += add[hi:lo] */
3280#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3281 do { \
3282 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003283 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003284 } while (0)
3285
3286/* difference = minuend - subtrahend */
3287#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3288 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003289 if (m_lo < s_lo) { \
3290 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003291 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003292 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003293 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003294 d_hi--; \
3295 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003296 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003297 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003298 d_hi = 0; \
3299 d_lo = 0; \
3300 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003301 } else { \
3302 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003303 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003304 d_hi = 0; \
3305 d_lo = 0; \
3306 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003307 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003308 d_hi = m_hi - s_hi; \
3309 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003310 } \
3311 } \
3312 } while (0)
3313
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003314#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003315 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003316 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3317 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3318 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3319 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3320 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3321 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003322 } while (0)
3323
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003324#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003325 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003326 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3327 diff.lo, new->s##_lo, old->s##_lo); \
3328 ADD_64(estats->t##_hi, diff.hi, \
3329 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003330 } while (0)
3331
3332/* sum[hi:lo] += add */
3333#define ADD_EXTEND_64(s_hi, s_lo, a) \
3334 do { \
3335 s_lo += a; \
3336 s_hi += (s_lo < a) ? 1 : 0; \
3337 } while (0)
3338
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003339#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003340 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003341 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3342 pstats->mac_stx[1].s##_lo, \
3343 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003344 } while (0)
3345
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003346#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003347 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003348 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3349 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003350 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3351 } while (0)
3352
3353#define UPDATE_EXTEND_USTAT(s, t) \
3354 do { \
3355 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3356 old_uclient->s = uclient->s; \
3357 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003358 } while (0)
3359
3360#define UPDATE_EXTEND_XSTAT(s, t) \
3361 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003362 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3363 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003364 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3365 } while (0)
3366
3367/* minuend -= subtrahend */
3368#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3369 do { \
3370 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3371 } while (0)
3372
3373/* minuend[hi:lo] -= subtrahend */
3374#define SUB_EXTEND_64(m_hi, m_lo, s) \
3375 do { \
3376 SUB_64(m_hi, 0, m_lo, s); \
3377 } while (0)
3378
3379#define SUB_EXTEND_USTAT(s, t) \
3380 do { \
3381 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3382 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003383 } while (0)
3384
3385/*
3386 * General service functions
3387 */
3388
3389static inline long bnx2x_hilo(u32 *hiref)
3390{
3391 u32 lo = *(hiref + 1);
3392#if (BITS_PER_LONG == 64)
3393 u32 hi = *hiref;
3394
3395 return HILO_U64(hi, lo);
3396#else
3397 return lo;
3398#endif
3399}
3400
3401/*
3402 * Init service functions
3403 */
3404
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003405static void bnx2x_storm_stats_post(struct bnx2x *bp)
3406{
3407 if (!bp->stats_pending) {
3408 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003409 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003410
3411 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003412 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003413 for_each_queue(bp, i)
3414 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003415
3416 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3417 ((u32 *)&ramrod_data)[1],
3418 ((u32 *)&ramrod_data)[0], 0);
3419 if (rc == 0) {
3420 /* stats ramrod has it's own slot on the spq */
3421 bp->spq_left++;
3422 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003423 }
3424 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003425}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003426
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003427static void bnx2x_hw_stats_post(struct bnx2x *bp)
3428{
3429 struct dmae_command *dmae = &bp->stats_dmae;
3430 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431
3432 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003433 if (CHIP_REV_IS_SLOW(bp))
3434 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003435
3436 /* loader */
3437 if (bp->executer_idx) {
3438 int loader_idx = PMF_DMAE_C(bp);
3439
3440 memset(dmae, 0, sizeof(struct dmae_command));
3441
3442 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3443 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3444 DMAE_CMD_DST_RESET |
3445#ifdef __BIG_ENDIAN
3446 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3447#else
3448 DMAE_CMD_ENDIANITY_DW_SWAP |
3449#endif
3450 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3451 DMAE_CMD_PORT_0) |
3452 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3453 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3454 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3455 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3456 sizeof(struct dmae_command) *
3457 (loader_idx + 1)) >> 2;
3458 dmae->dst_addr_hi = 0;
3459 dmae->len = sizeof(struct dmae_command) >> 2;
3460 if (CHIP_IS_E1(bp))
3461 dmae->len--;
3462 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3463 dmae->comp_addr_hi = 0;
3464 dmae->comp_val = 1;
3465
3466 *stats_comp = 0;
3467 bnx2x_post_dmae(bp, dmae, loader_idx);
3468
3469 } else if (bp->func_stx) {
3470 *stats_comp = 0;
3471 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3472 }
3473}
3474
3475static int bnx2x_stats_comp(struct bnx2x *bp)
3476{
3477 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3478 int cnt = 10;
3479
3480 might_sleep();
3481 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003482 if (!cnt) {
3483 BNX2X_ERR("timeout waiting for stats finished\n");
3484 break;
3485 }
3486 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003487 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003488 }
3489 return 1;
3490}
3491
3492/*
3493 * Statistics service functions
3494 */
3495
3496static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3497{
3498 struct dmae_command *dmae;
3499 u32 opcode;
3500 int loader_idx = PMF_DMAE_C(bp);
3501 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3502
3503 /* sanity */
3504 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3505 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003506 return;
3507 }
3508
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003509 bp->executer_idx = 0;
3510
3511 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3512 DMAE_CMD_C_ENABLE |
3513 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3514#ifdef __BIG_ENDIAN
3515 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3516#else
3517 DMAE_CMD_ENDIANITY_DW_SWAP |
3518#endif
3519 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3520 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3521
3522 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3523 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3524 dmae->src_addr_lo = bp->port.port_stx >> 2;
3525 dmae->src_addr_hi = 0;
3526 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3527 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3528 dmae->len = DMAE_LEN32_RD_MAX;
3529 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3530 dmae->comp_addr_hi = 0;
3531 dmae->comp_val = 1;
3532
3533 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3534 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3535 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3536 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003537 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3538 DMAE_LEN32_RD_MAX * 4);
3539 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003541 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3542 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3543 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3544 dmae->comp_val = DMAE_COMP_VAL;
3545
3546 *stats_comp = 0;
3547 bnx2x_hw_stats_post(bp);
3548 bnx2x_stats_comp(bp);
3549}
3550
3551static void bnx2x_port_stats_init(struct bnx2x *bp)
3552{
3553 struct dmae_command *dmae;
3554 int port = BP_PORT(bp);
3555 int vn = BP_E1HVN(bp);
3556 u32 opcode;
3557 int loader_idx = PMF_DMAE_C(bp);
3558 u32 mac_addr;
3559 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3560
3561 /* sanity */
3562 if (!bp->link_vars.link_up || !bp->port.pmf) {
3563 BNX2X_ERR("BUG!\n");
3564 return;
3565 }
3566
3567 bp->executer_idx = 0;
3568
3569 /* MCP */
3570 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3571 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3572 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3573#ifdef __BIG_ENDIAN
3574 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3575#else
3576 DMAE_CMD_ENDIANITY_DW_SWAP |
3577#endif
3578 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3579 (vn << DMAE_CMD_E1HVN_SHIFT));
3580
3581 if (bp->port.port_stx) {
3582
3583 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3584 dmae->opcode = opcode;
3585 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3586 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3587 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3588 dmae->dst_addr_hi = 0;
3589 dmae->len = sizeof(struct host_port_stats) >> 2;
3590 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591 dmae->comp_addr_hi = 0;
3592 dmae->comp_val = 1;
3593 }
3594
3595 if (bp->func_stx) {
3596
3597 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3598 dmae->opcode = opcode;
3599 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3600 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3601 dmae->dst_addr_lo = bp->func_stx >> 2;
3602 dmae->dst_addr_hi = 0;
3603 dmae->len = sizeof(struct host_func_stats) >> 2;
3604 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3605 dmae->comp_addr_hi = 0;
3606 dmae->comp_val = 1;
3607 }
3608
3609 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003610 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3611 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3612 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3613#ifdef __BIG_ENDIAN
3614 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3615#else
3616 DMAE_CMD_ENDIANITY_DW_SWAP |
3617#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003618 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3619 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003620
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003621 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003622
3623 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3624 NIG_REG_INGRESS_BMAC0_MEM);
3625
3626 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3627 BIGMAC_REGISTER_TX_STAT_GTBYT */
3628 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3629 dmae->opcode = opcode;
3630 dmae->src_addr_lo = (mac_addr +
3631 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3632 dmae->src_addr_hi = 0;
3633 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3634 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3635 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3636 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3637 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3638 dmae->comp_addr_hi = 0;
3639 dmae->comp_val = 1;
3640
3641 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3642 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3643 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3644 dmae->opcode = opcode;
3645 dmae->src_addr_lo = (mac_addr +
3646 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3647 dmae->src_addr_hi = 0;
3648 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003649 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003650 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003652 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3653 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3654 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655 dmae->comp_addr_hi = 0;
3656 dmae->comp_val = 1;
3657
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003658 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003659
3660 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3661
3662 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3663 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3664 dmae->opcode = opcode;
3665 dmae->src_addr_lo = (mac_addr +
3666 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3667 dmae->src_addr_hi = 0;
3668 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3669 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3670 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3671 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3672 dmae->comp_addr_hi = 0;
3673 dmae->comp_val = 1;
3674
3675 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3676 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677 dmae->opcode = opcode;
3678 dmae->src_addr_lo = (mac_addr +
3679 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3680 dmae->src_addr_hi = 0;
3681 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003682 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003683 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003685 dmae->len = 1;
3686 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3687 dmae->comp_addr_hi = 0;
3688 dmae->comp_val = 1;
3689
3690 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3691 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3692 dmae->opcode = opcode;
3693 dmae->src_addr_lo = (mac_addr +
3694 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3695 dmae->src_addr_hi = 0;
3696 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003697 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003698 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003700 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3701 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3702 dmae->comp_addr_hi = 0;
3703 dmae->comp_val = 1;
3704 }
3705
3706 /* NIG */
3707 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003708 dmae->opcode = opcode;
3709 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3710 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3711 dmae->src_addr_hi = 0;
3712 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3713 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3714 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3715 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3716 dmae->comp_addr_hi = 0;
3717 dmae->comp_val = 1;
3718
3719 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3720 dmae->opcode = opcode;
3721 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3722 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3723 dmae->src_addr_hi = 0;
3724 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3725 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3726 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->len = (2*sizeof(u32)) >> 2;
3729 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3730 dmae->comp_addr_hi = 0;
3731 dmae->comp_val = 1;
3732
3733 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003734 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3735 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3736 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3737#ifdef __BIG_ENDIAN
3738 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3739#else
3740 DMAE_CMD_ENDIANITY_DW_SWAP |
3741#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003742 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3743 (vn << DMAE_CMD_E1HVN_SHIFT));
3744 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3745 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003746 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003747 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3748 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3749 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->len = (2*sizeof(u32)) >> 2;
3752 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3753 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3754 dmae->comp_val = DMAE_COMP_VAL;
3755
3756 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003757}
3758
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003759static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003760{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003761 struct dmae_command *dmae = &bp->stats_dmae;
3762 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003763
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003764 /* sanity */
3765 if (!bp->func_stx) {
3766 BNX2X_ERR("BUG!\n");
3767 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003768 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003769
3770 bp->executer_idx = 0;
3771 memset(dmae, 0, sizeof(struct dmae_command));
3772
3773 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3774 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3775 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3776#ifdef __BIG_ENDIAN
3777 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3778#else
3779 DMAE_CMD_ENDIANITY_DW_SWAP |
3780#endif
3781 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3782 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3783 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3784 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3785 dmae->dst_addr_lo = bp->func_stx >> 2;
3786 dmae->dst_addr_hi = 0;
3787 dmae->len = sizeof(struct host_func_stats) >> 2;
3788 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3789 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3790 dmae->comp_val = DMAE_COMP_VAL;
3791
3792 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003793}
3794
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003795static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003796{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003797 if (bp->port.pmf)
3798 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003799
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003800 else if (bp->func_stx)
3801 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003802
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003803 bnx2x_hw_stats_post(bp);
3804 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003805}
3806
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003807static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003808{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003809 bnx2x_stats_comp(bp);
3810 bnx2x_stats_pmf_update(bp);
3811 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003812}
3813
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003814static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003815{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003816 bnx2x_stats_comp(bp);
3817 bnx2x_stats_start(bp);
3818}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003819
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003820static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3821{
3822 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3823 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003824 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003825 struct {
3826 u32 lo;
3827 u32 hi;
3828 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003829
3830 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3831 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3832 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3833 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3834 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3835 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003836 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003837 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003838 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003839 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3840 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3841 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3842 UPDATE_STAT64(tx_stat_gt127,
3843 tx_stat_etherstatspkts65octetsto127octets);
3844 UPDATE_STAT64(tx_stat_gt255,
3845 tx_stat_etherstatspkts128octetsto255octets);
3846 UPDATE_STAT64(tx_stat_gt511,
3847 tx_stat_etherstatspkts256octetsto511octets);
3848 UPDATE_STAT64(tx_stat_gt1023,
3849 tx_stat_etherstatspkts512octetsto1023octets);
3850 UPDATE_STAT64(tx_stat_gt1518,
3851 tx_stat_etherstatspkts1024octetsto1522octets);
3852 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3853 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3854 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3855 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3856 UPDATE_STAT64(tx_stat_gterr,
3857 tx_stat_dot3statsinternalmactransmiterrors);
3858 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003859
3860 estats->pause_frames_received_hi =
3861 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3862 estats->pause_frames_received_lo =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3864
3865 estats->pause_frames_sent_hi =
3866 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3867 estats->pause_frames_sent_lo =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003869}
3870
3871static void bnx2x_emac_stats_update(struct bnx2x *bp)
3872{
3873 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3874 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003875 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003876
3877 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3878 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3879 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3880 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3882 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3883 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3884 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3886 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3887 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3888 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3889 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3891 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3892 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3893 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3895 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3896 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3900 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3901 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3907 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003908
3909 estats->pause_frames_received_hi =
3910 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3911 estats->pause_frames_received_lo =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3913 ADD_64(estats->pause_frames_received_hi,
3914 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3915 estats->pause_frames_received_lo,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3917
3918 estats->pause_frames_sent_hi =
3919 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3920 estats->pause_frames_sent_lo =
3921 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3922 ADD_64(estats->pause_frames_sent_hi,
3923 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3924 estats->pause_frames_sent_lo,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003926}
3927
3928static int bnx2x_hw_stats_update(struct bnx2x *bp)
3929{
3930 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3931 struct nig_stats *old = &(bp->port.old_nig_stats);
3932 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003934 struct {
3935 u32 lo;
3936 u32 hi;
3937 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003938 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003939
3940 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3941 bnx2x_bmac_stats_update(bp);
3942
3943 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3944 bnx2x_emac_stats_update(bp);
3945
3946 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003947 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003948 return -1;
3949 }
3950
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003951 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3952 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003953 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3954 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003955
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003956 UPDATE_STAT64_NIG(egress_mac_pkt0,
3957 etherstatspkts1024octetsto1522octets);
3958 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003959
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003960 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003961
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003962 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3963 sizeof(struct mac_stx));
3964 estats->brb_drop_hi = pstats->brb_drop_hi;
3965 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003966
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003967 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003968
Eilon Greensteinde832a52009-02-12 08:36:33 +00003969 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3970 if (nig_timer_max != estats->nig_timer_max) {
3971 estats->nig_timer_max = nig_timer_max;
3972 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3973 }
3974
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003975 return 0;
3976}
3977
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003978static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003979{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003980 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003981 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003982 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003983 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3984 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003985 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003986
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00003987 memcpy(&(fstats->total_bytes_received_hi),
3988 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00003989 sizeof(struct host_func_stats) - 2*sizeof(u32));
3990 estats->error_bytes_received_hi = 0;
3991 estats->error_bytes_received_lo = 0;
3992 estats->etherstatsoverrsizepkts_hi = 0;
3993 estats->etherstatsoverrsizepkts_lo = 0;
3994 estats->no_buff_discard_hi = 0;
3995 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003996
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003997 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00003998 struct bnx2x_fastpath *fp = &bp->fp[i];
3999 int cl_id = fp->cl_id;
4000 struct tstorm_per_client_stats *tclient =
4001 &stats->tstorm_common.client_statistics[cl_id];
4002 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4003 struct ustorm_per_client_stats *uclient =
4004 &stats->ustorm_common.client_statistics[cl_id];
4005 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4006 struct xstorm_per_client_stats *xclient =
4007 &stats->xstorm_common.client_statistics[cl_id];
4008 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4009 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4010 u32 diff;
4011
4012 /* are storm stats valid? */
4013 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4014 bp->stats_counter) {
4015 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4016 " xstorm counter (%d) != stats_counter (%d)\n",
4017 i, xclient->stats_counter, bp->stats_counter);
4018 return -1;
4019 }
4020 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4021 bp->stats_counter) {
4022 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4023 " tstorm counter (%d) != stats_counter (%d)\n",
4024 i, tclient->stats_counter, bp->stats_counter);
4025 return -2;
4026 }
4027 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4028 bp->stats_counter) {
4029 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4030 " ustorm counter (%d) != stats_counter (%d)\n",
4031 i, uclient->stats_counter, bp->stats_counter);
4032 return -4;
4033 }
4034
4035 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004036 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004037 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004038 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4039
4040 ADD_64(qstats->total_bytes_received_hi,
4041 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4042 qstats->total_bytes_received_lo,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4044
4045 ADD_64(qstats->total_bytes_received_hi,
4046 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4047 qstats->total_bytes_received_lo,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4049
4050 qstats->valid_bytes_received_hi =
4051 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004052 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004053 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004054
Eilon Greensteinde832a52009-02-12 08:36:33 +00004055 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004056 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004057 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004058 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004059
4060 ADD_64(qstats->total_bytes_received_hi,
4061 qstats->error_bytes_received_hi,
4062 qstats->total_bytes_received_lo,
4063 qstats->error_bytes_received_lo);
4064
4065 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4066 total_unicast_packets_received);
4067 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4068 total_multicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4070 total_broadcast_packets_received);
4071 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4072 etherstatsoverrsizepkts);
4073 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4074
4075 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4076 total_unicast_packets_received);
4077 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4078 total_multicast_packets_received);
4079 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4080 total_broadcast_packets_received);
4081 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4082 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4083 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4084
4085 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004086 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004087 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004088 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4089
4090 ADD_64(qstats->total_bytes_transmitted_hi,
4091 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4092 qstats->total_bytes_transmitted_lo,
4093 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4094
4095 ADD_64(qstats->total_bytes_transmitted_hi,
4096 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4097 qstats->total_bytes_transmitted_lo,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004099
4100 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4101 total_unicast_packets_transmitted);
4102 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4103 total_multicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4105 total_broadcast_packets_transmitted);
4106
4107 old_tclient->checksum_discard = tclient->checksum_discard;
4108 old_tclient->ttl0_discard = tclient->ttl0_discard;
4109
4110 ADD_64(fstats->total_bytes_received_hi,
4111 qstats->total_bytes_received_hi,
4112 fstats->total_bytes_received_lo,
4113 qstats->total_bytes_received_lo);
4114 ADD_64(fstats->total_bytes_transmitted_hi,
4115 qstats->total_bytes_transmitted_hi,
4116 fstats->total_bytes_transmitted_lo,
4117 qstats->total_bytes_transmitted_lo);
4118 ADD_64(fstats->total_unicast_packets_received_hi,
4119 qstats->total_unicast_packets_received_hi,
4120 fstats->total_unicast_packets_received_lo,
4121 qstats->total_unicast_packets_received_lo);
4122 ADD_64(fstats->total_multicast_packets_received_hi,
4123 qstats->total_multicast_packets_received_hi,
4124 fstats->total_multicast_packets_received_lo,
4125 qstats->total_multicast_packets_received_lo);
4126 ADD_64(fstats->total_broadcast_packets_received_hi,
4127 qstats->total_broadcast_packets_received_hi,
4128 fstats->total_broadcast_packets_received_lo,
4129 qstats->total_broadcast_packets_received_lo);
4130 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4131 qstats->total_unicast_packets_transmitted_hi,
4132 fstats->total_unicast_packets_transmitted_lo,
4133 qstats->total_unicast_packets_transmitted_lo);
4134 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4135 qstats->total_multicast_packets_transmitted_hi,
4136 fstats->total_multicast_packets_transmitted_lo,
4137 qstats->total_multicast_packets_transmitted_lo);
4138 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4139 qstats->total_broadcast_packets_transmitted_hi,
4140 fstats->total_broadcast_packets_transmitted_lo,
4141 qstats->total_broadcast_packets_transmitted_lo);
4142 ADD_64(fstats->valid_bytes_received_hi,
4143 qstats->valid_bytes_received_hi,
4144 fstats->valid_bytes_received_lo,
4145 qstats->valid_bytes_received_lo);
4146
4147 ADD_64(estats->error_bytes_received_hi,
4148 qstats->error_bytes_received_hi,
4149 estats->error_bytes_received_lo,
4150 qstats->error_bytes_received_lo);
4151 ADD_64(estats->etherstatsoverrsizepkts_hi,
4152 qstats->etherstatsoverrsizepkts_hi,
4153 estats->etherstatsoverrsizepkts_lo,
4154 qstats->etherstatsoverrsizepkts_lo);
4155 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4156 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4157 }
4158
4159 ADD_64(fstats->total_bytes_received_hi,
4160 estats->rx_stat_ifhcinbadoctets_hi,
4161 fstats->total_bytes_received_lo,
4162 estats->rx_stat_ifhcinbadoctets_lo);
4163
4164 memcpy(estats, &(fstats->total_bytes_received_hi),
4165 sizeof(struct host_func_stats) - 2*sizeof(u32));
4166
4167 ADD_64(estats->etherstatsoverrsizepkts_hi,
4168 estats->rx_stat_dot3statsframestoolong_hi,
4169 estats->etherstatsoverrsizepkts_lo,
4170 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004171 ADD_64(estats->error_bytes_received_hi,
4172 estats->rx_stat_ifhcinbadoctets_hi,
4173 estats->error_bytes_received_lo,
4174 estats->rx_stat_ifhcinbadoctets_lo);
4175
Eilon Greensteinde832a52009-02-12 08:36:33 +00004176 if (bp->port.pmf) {
4177 estats->mac_filter_discard =
4178 le32_to_cpu(tport->mac_filter_discard);
4179 estats->xxoverflow_discard =
4180 le32_to_cpu(tport->xxoverflow_discard);
4181 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004182 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004183 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4184 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004185
4186 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4187
Eilon Greensteinde832a52009-02-12 08:36:33 +00004188 bp->stats_pending = 0;
4189
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004190 return 0;
4191}
4192
4193static void bnx2x_net_stats_update(struct bnx2x *bp)
4194{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004195 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004196 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004197 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004198
4199 nstats->rx_packets =
4200 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4201 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4202 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4203
4204 nstats->tx_packets =
4205 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4206 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4207 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4208
Eilon Greensteinde832a52009-02-12 08:36:33 +00004209 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004210
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004211 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212
Eilon Greensteinde832a52009-02-12 08:36:33 +00004213 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004214 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004215 nstats->rx_dropped +=
4216 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4217
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004218 nstats->tx_dropped = 0;
4219
4220 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004221 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004222
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004223 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004224 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004225
4226 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004227 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4228 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4229 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4230 bnx2x_hilo(&estats->brb_truncate_hi);
4231 nstats->rx_crc_errors =
4232 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4233 nstats->rx_frame_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4235 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004236 nstats->rx_missed_errors = estats->xxoverflow_discard;
4237
4238 nstats->rx_errors = nstats->rx_length_errors +
4239 nstats->rx_over_errors +
4240 nstats->rx_crc_errors +
4241 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004242 nstats->rx_fifo_errors +
4243 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004244
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004245 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004246 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4247 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4248 nstats->tx_carrier_errors =
4249 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004250 nstats->tx_fifo_errors = 0;
4251 nstats->tx_heartbeat_errors = 0;
4252 nstats->tx_window_errors = 0;
4253
4254 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004255 nstats->tx_carrier_errors +
4256 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4257}
4258
4259static void bnx2x_drv_stats_update(struct bnx2x *bp)
4260{
4261 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4262 int i;
4263
4264 estats->driver_xoff = 0;
4265 estats->rx_err_discard_pkt = 0;
4266 estats->rx_skb_alloc_failed = 0;
4267 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004268 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004269 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4270
4271 estats->driver_xoff += qstats->driver_xoff;
4272 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4273 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4274 estats->hw_csum_err += qstats->hw_csum_err;
4275 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004276}
4277
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004278static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004279{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004280 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004281
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004282 if (*stats_comp != DMAE_COMP_VAL)
4283 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004284
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004285 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004286 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004287
Eilon Greensteinde832a52009-02-12 08:36:33 +00004288 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4289 BNX2X_ERR("storm stats were not updated for 3 times\n");
4290 bnx2x_panic();
4291 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004292 }
4293
Eilon Greensteinde832a52009-02-12 08:36:33 +00004294 bnx2x_net_stats_update(bp);
4295 bnx2x_drv_stats_update(bp);
4296
Joe Perches7995c642010-02-17 15:01:52 +00004297 if (netif_msg_timer(bp)) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004298 struct bnx2x_fastpath *fp0_rx = bp->fp;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004299 struct bnx2x_fastpath *fp0_tx = bp->fp;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004300 struct tstorm_per_client_stats *old_tclient =
4301 &bp->fp->old_tclient;
4302 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004303 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004304 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004305 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004306
Joe Perches7995c642010-02-17 15:01:52 +00004307 netdev_printk(KERN_DEBUG, bp->dev, "\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004308 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4309 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004310 bnx2x_tx_avail(fp0_tx),
4311 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004312 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4313 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004314 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4315 fp0_rx->rx_comp_cons),
4316 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004317 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4318 "brb truncate %u\n",
4319 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4320 qstats->driver_xoff,
4321 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004322 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004323 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004324 "mac_discard %u mac_filter_discard %u "
4325 "xxovrflow_discard %u brb_truncate_discard %u "
4326 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004327 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004328 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4329 bnx2x_hilo(&qstats->no_buff_discard_hi),
4330 estats->mac_discard, estats->mac_filter_discard,
4331 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004332 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004333
4334 for_each_queue(bp, i) {
4335 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4336 bnx2x_fp(bp, i, tx_pkt),
4337 bnx2x_fp(bp, i, rx_pkt),
4338 bnx2x_fp(bp, i, rx_calls));
4339 }
4340 }
4341
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004342 bnx2x_hw_stats_post(bp);
4343 bnx2x_storm_stats_post(bp);
4344}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004345
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004346static void bnx2x_port_stats_stop(struct bnx2x *bp)
4347{
4348 struct dmae_command *dmae;
4349 u32 opcode;
4350 int loader_idx = PMF_DMAE_C(bp);
4351 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004352
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004353 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004354
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004355 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4356 DMAE_CMD_C_ENABLE |
4357 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004358#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004359 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004361 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004362#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004363 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4364 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4365
4366 if (bp->port.port_stx) {
4367
4368 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4369 if (bp->func_stx)
4370 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4371 else
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4373 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4374 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4375 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004376 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004377 dmae->len = sizeof(struct host_port_stats) >> 2;
4378 if (bp->func_stx) {
4379 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4380 dmae->comp_addr_hi = 0;
4381 dmae->comp_val = 1;
4382 } else {
4383 dmae->comp_addr_lo =
4384 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4385 dmae->comp_addr_hi =
4386 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004388
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004389 *stats_comp = 0;
4390 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004391 }
4392
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004393 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004394
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4396 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4397 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4398 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4399 dmae->dst_addr_lo = bp->func_stx >> 2;
4400 dmae->dst_addr_hi = 0;
4401 dmae->len = sizeof(struct host_func_stats) >> 2;
4402 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4403 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4404 dmae->comp_val = DMAE_COMP_VAL;
4405
4406 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004407 }
4408}
4409
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004410static void bnx2x_stats_stop(struct bnx2x *bp)
4411{
4412 int update = 0;
4413
4414 bnx2x_stats_comp(bp);
4415
4416 if (bp->port.pmf)
4417 update = (bnx2x_hw_stats_update(bp) == 0);
4418
4419 update |= (bnx2x_storm_stats_update(bp) == 0);
4420
4421 if (update) {
4422 bnx2x_net_stats_update(bp);
4423
4424 if (bp->port.pmf)
4425 bnx2x_port_stats_stop(bp);
4426
4427 bnx2x_hw_stats_post(bp);
4428 bnx2x_stats_comp(bp);
4429 }
4430}
4431
4432static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4433{
4434}
4435
4436static const struct {
4437 void (*action)(struct bnx2x *bp);
4438 enum bnx2x_stats_state next_state;
4439} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4440/* state event */
4441{
4442/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4443/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4444/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4445/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4446},
4447{
4448/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4449/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4450/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4451/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4452}
4453};
4454
4455static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4456{
4457 enum bnx2x_stats_state state = bp->stats_state;
4458
4459 bnx2x_stats_stm[state][event].action(bp);
4460 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4461
Eilon Greenstein89246652009-08-12 08:23:56 +00004462 /* Make sure the state has been "changed" */
4463 smp_wmb();
4464
Joe Perches7995c642010-02-17 15:01:52 +00004465 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004466 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4467 state, event, bp->stats_state);
4468}
4469
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004470static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4471{
4472 struct dmae_command *dmae;
4473 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4474
4475 /* sanity */
4476 if (!bp->port.pmf || !bp->port.port_stx) {
4477 BNX2X_ERR("BUG!\n");
4478 return;
4479 }
4480
4481 bp->executer_idx = 0;
4482
4483 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4484 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4485 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4486 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4487#ifdef __BIG_ENDIAN
4488 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4489#else
4490 DMAE_CMD_ENDIANITY_DW_SWAP |
4491#endif
4492 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4493 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4494 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4495 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4496 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4497 dmae->dst_addr_hi = 0;
4498 dmae->len = sizeof(struct host_port_stats) >> 2;
4499 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4500 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4501 dmae->comp_val = DMAE_COMP_VAL;
4502
4503 *stats_comp = 0;
4504 bnx2x_hw_stats_post(bp);
4505 bnx2x_stats_comp(bp);
4506}
4507
4508static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4509{
4510 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4511 int port = BP_PORT(bp);
4512 int func;
4513 u32 func_stx;
4514
4515 /* sanity */
4516 if (!bp->port.pmf || !bp->func_stx) {
4517 BNX2X_ERR("BUG!\n");
4518 return;
4519 }
4520
4521 /* save our func_stx */
4522 func_stx = bp->func_stx;
4523
4524 for (vn = VN_0; vn < vn_max; vn++) {
4525 func = 2*vn + port;
4526
4527 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4528 bnx2x_func_stats_init(bp);
4529 bnx2x_hw_stats_post(bp);
4530 bnx2x_stats_comp(bp);
4531 }
4532
4533 /* restore our func_stx */
4534 bp->func_stx = func_stx;
4535}
4536
4537static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4538{
4539 struct dmae_command *dmae = &bp->stats_dmae;
4540 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4541
4542 /* sanity */
4543 if (!bp->func_stx) {
4544 BNX2X_ERR("BUG!\n");
4545 return;
4546 }
4547
4548 bp->executer_idx = 0;
4549 memset(dmae, 0, sizeof(struct dmae_command));
4550
4551 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4552 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4553 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4554#ifdef __BIG_ENDIAN
4555 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4556#else
4557 DMAE_CMD_ENDIANITY_DW_SWAP |
4558#endif
4559 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4560 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4561 dmae->src_addr_lo = bp->func_stx >> 2;
4562 dmae->src_addr_hi = 0;
4563 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4564 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4565 dmae->len = sizeof(struct host_func_stats) >> 2;
4566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4568 dmae->comp_val = DMAE_COMP_VAL;
4569
4570 *stats_comp = 0;
4571 bnx2x_hw_stats_post(bp);
4572 bnx2x_stats_comp(bp);
4573}
4574
4575static void bnx2x_stats_init(struct bnx2x *bp)
4576{
4577 int port = BP_PORT(bp);
4578 int func = BP_FUNC(bp);
4579 int i;
4580
4581 bp->stats_pending = 0;
4582 bp->executer_idx = 0;
4583 bp->stats_counter = 0;
4584
4585 /* port and func stats for management */
4586 if (!BP_NOMCP(bp)) {
4587 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4588 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4589
4590 } else {
4591 bp->port.port_stx = 0;
4592 bp->func_stx = 0;
4593 }
4594 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4595 bp->port.port_stx, bp->func_stx);
4596
4597 /* port stats */
4598 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4599 bp->port.old_nig_stats.brb_discard =
4600 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4601 bp->port.old_nig_stats.brb_truncate =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4603 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4604 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4607
4608 /* function stats */
4609 for_each_queue(bp, i) {
4610 struct bnx2x_fastpath *fp = &bp->fp[i];
4611
4612 memset(&fp->old_tclient, 0,
4613 sizeof(struct tstorm_per_client_stats));
4614 memset(&fp->old_uclient, 0,
4615 sizeof(struct ustorm_per_client_stats));
4616 memset(&fp->old_xclient, 0,
4617 sizeof(struct xstorm_per_client_stats));
4618 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4619 }
4620
4621 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4622 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4623
4624 bp->stats_state = STATS_STATE_DISABLED;
4625
4626 if (bp->port.pmf) {
4627 if (bp->port.port_stx)
4628 bnx2x_port_stats_base_init(bp);
4629
4630 if (bp->func_stx)
4631 bnx2x_func_stats_base_init(bp);
4632
4633 } else if (bp->func_stx)
4634 bnx2x_func_stats_base_update(bp);
4635}
4636
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004637static void bnx2x_timer(unsigned long data)
4638{
4639 struct bnx2x *bp = (struct bnx2x *) data;
4640
4641 if (!netif_running(bp->dev))
4642 return;
4643
4644 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004645 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004646
4647 if (poll) {
4648 struct bnx2x_fastpath *fp = &bp->fp[0];
4649 int rc;
4650
Eilon Greenstein7961f792009-03-02 07:59:31 +00004651 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004652 rc = bnx2x_rx_int(fp, 1000);
4653 }
4654
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004655 if (!BP_NOMCP(bp)) {
4656 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004657 u32 drv_pulse;
4658 u32 mcp_pulse;
4659
4660 ++bp->fw_drv_pulse_wr_seq;
4661 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4662 /* TBD - add SYSTEM_TIME */
4663 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004664 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004665
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004666 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004667 MCP_PULSE_SEQ_MASK);
4668 /* The delta between driver pulse and mcp response
4669 * should be 1 (before mcp response) or 0 (after mcp response)
4670 */
4671 if ((drv_pulse != mcp_pulse) &&
4672 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4673 /* someone lost a heartbeat... */
4674 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4675 drv_pulse, mcp_pulse);
4676 }
4677 }
4678
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07004679 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004680 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004681
Eliezer Tamirf1410642008-02-28 11:51:50 -08004682timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004683 mod_timer(&bp->timer, jiffies + bp->current_interval);
4684}
4685
4686/* end of Statistics */
4687
4688/* nic init */
4689
4690/*
4691 * nic init service functions
4692 */
4693
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004694static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004695{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004696 int port = BP_PORT(bp);
4697
Eilon Greensteinca003922009-08-12 22:53:28 -07004698 /* "CSTORM" */
4699 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4700 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4701 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4702 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4703 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4704 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004705}
4706
Eilon Greenstein5c862842008-08-13 15:51:48 -07004707static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4708 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004709{
4710 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004711 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004712 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004713 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004714
4715 /* USTORM */
4716 section = ((u64)mapping) + offsetof(struct host_status_block,
4717 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004718 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004719
Eilon Greensteinca003922009-08-12 22:53:28 -07004720 REG_WR(bp, BAR_CSTRORM_INTMEM +
4721 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004724 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004725 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4726 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004727
4728 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004729 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4730 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004731
4732 /* CSTORM */
4733 section = ((u64)mapping) + offsetof(struct host_status_block,
4734 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004735 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004736
4737 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004738 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004739 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004740 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004741 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004742 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004743 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004744
4745 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4746 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004747 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004748
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004749 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4750}
4751
4752static void bnx2x_zero_def_sb(struct bnx2x *bp)
4753{
4754 int func = BP_FUNC(bp);
4755
Eilon Greensteinca003922009-08-12 22:53:28 -07004756 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004757 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4758 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004759 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4760 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4761 sizeof(struct cstorm_def_status_block_u)/4);
4762 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4764 sizeof(struct cstorm_def_status_block_c)/4);
4765 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004766 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4767 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004768}
4769
4770static void bnx2x_init_def_sb(struct bnx2x *bp,
4771 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004772 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004773{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004774 int port = BP_PORT(bp);
4775 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004776 int index, val, reg_offset;
4777 u64 section;
4778
4779 /* ATTN */
4780 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4781 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004782 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004783
Eliezer Tamir49d66772008-02-28 11:53:13 -08004784 bp->attn_state = 0;
4785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004786 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4787 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4788
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004789 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004790 bp->attn_group[index].sig[0] = REG_RD(bp,
4791 reg_offset + 0x10*index);
4792 bp->attn_group[index].sig[1] = REG_RD(bp,
4793 reg_offset + 0x4 + 0x10*index);
4794 bp->attn_group[index].sig[2] = REG_RD(bp,
4795 reg_offset + 0x8 + 0x10*index);
4796 bp->attn_group[index].sig[3] = REG_RD(bp,
4797 reg_offset + 0xc + 0x10*index);
4798 }
4799
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004800 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4801 HC_REG_ATTN_MSG0_ADDR_L);
4802
4803 REG_WR(bp, reg_offset, U64_LO(section));
4804 REG_WR(bp, reg_offset + 4, U64_HI(section));
4805
4806 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4807
4808 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004809 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004810 REG_WR(bp, reg_offset, val);
4811
4812 /* USTORM */
4813 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4814 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004815 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004816
Eilon Greensteinca003922009-08-12 22:53:28 -07004817 REG_WR(bp, BAR_CSTRORM_INTMEM +
4818 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004821 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004822 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4823 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004824
4825 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004826 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4827 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004828
4829 /* CSTORM */
4830 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4831 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004832 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004833
4834 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004835 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004836 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004837 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004838 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004839 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004840 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004841
4842 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4843 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004844 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004845
4846 /* TSTORM */
4847 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4848 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004849 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004850
4851 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004852 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004853 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004854 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004855 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004856 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004857 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004858
4859 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4860 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004861 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004862
4863 /* XSTORM */
4864 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4865 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004866 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004867
4868 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004869 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004870 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004871 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004872 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004873 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004874 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004875
4876 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4877 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004878 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004879
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004880 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004881 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004882
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004883 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004884}
4885
4886static void bnx2x_update_coalesce(struct bnx2x *bp)
4887{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004888 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004889 int i;
4890
4891 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004892 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004893
4894 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004895 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4896 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4897 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004898 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07004899 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4900 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4901 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004902 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004903
4904 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4905 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004906 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4907 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004908 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004909 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004910 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4911 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004912 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004913 }
4914}
4915
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004916static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4917 struct bnx2x_fastpath *fp, int last)
4918{
4919 int i;
4920
4921 for (i = 0; i < last; i++) {
4922 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4923 struct sk_buff *skb = rx_buf->skb;
4924
4925 if (skb == NULL) {
4926 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4927 continue;
4928 }
4929
4930 if (fp->tpa_state[i] == BNX2X_TPA_START)
4931 pci_unmap_single(bp->pdev,
4932 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004933 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004934
4935 dev_kfree_skb(skb);
4936 rx_buf->skb = NULL;
4937 }
4938}
4939
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004940static void bnx2x_init_rx_rings(struct bnx2x *bp)
4941{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004942 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004943 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4944 ETH_MAX_AGGREGATION_QUEUES_E1H;
4945 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004946 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004947
Eilon Greenstein87942b42009-02-12 08:36:49 +00004948 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004949 DP(NETIF_MSG_IFUP,
4950 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004951
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004952 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004953
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004954 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004955 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004956
Eilon Greenstein32626232008-08-13 15:51:07 -07004957 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004958 fp->tpa_pool[i].skb =
4959 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4960 if (!fp->tpa_pool[i].skb) {
4961 BNX2X_ERR("Failed to allocate TPA "
4962 "skb pool for queue[%d] - "
4963 "disabling TPA on this "
4964 "queue!\n", j);
4965 bnx2x_free_tpa_pool(bp, fp, i);
4966 fp->disable_tpa = 1;
4967 break;
4968 }
4969 pci_unmap_addr_set((struct sw_rx_bd *)
4970 &bp->fp->tpa_pool[i],
4971 mapping, 0);
4972 fp->tpa_state[i] = BNX2X_TPA_STOP;
4973 }
4974 }
4975 }
4976
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004977 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004978 struct bnx2x_fastpath *fp = &bp->fp[j];
4979
4980 fp->rx_bd_cons = 0;
4981 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004982 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004983
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004984 /* "next page" elements initialization */
4985 /* SGE ring */
4986 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4987 struct eth_rx_sge *sge;
4988
4989 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4990 sge->addr_hi =
4991 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4992 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4993 sge->addr_lo =
4994 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4995 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4996 }
4997
4998 bnx2x_init_sge_ring_bit_mask(fp);
4999
5000 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005001 for (i = 1; i <= NUM_RX_RINGS; i++) {
5002 struct eth_rx_bd *rx_bd;
5003
5004 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5005 rx_bd->addr_hi =
5006 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005007 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005008 rx_bd->addr_lo =
5009 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005010 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005011 }
5012
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005013 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005014 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5015 struct eth_rx_cqe_next_page *nextpg;
5016
5017 nextpg = (struct eth_rx_cqe_next_page *)
5018 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5019 nextpg->addr_hi =
5020 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005021 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005022 nextpg->addr_lo =
5023 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005024 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005025 }
5026
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005027 /* Allocate SGEs and initialize the ring elements */
5028 for (i = 0, ring_prod = 0;
5029 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005030
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005031 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5032 BNX2X_ERR("was only able to allocate "
5033 "%d rx sges\n", i);
5034 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5035 /* Cleanup already allocated elements */
5036 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005037 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005038 fp->disable_tpa = 1;
5039 ring_prod = 0;
5040 break;
5041 }
5042 ring_prod = NEXT_SGE_IDX(ring_prod);
5043 }
5044 fp->rx_sge_prod = ring_prod;
5045
5046 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005047 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005048 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005049 for (i = 0; i < bp->rx_ring_size; i++) {
5050 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5051 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005052 "%d rx skbs on queue[%d]\n", i, j);
5053 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005054 break;
5055 }
5056 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005057 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005058 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005059 }
5060
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005061 fp->rx_bd_prod = ring_prod;
5062 /* must not have more available CQEs than BDs */
5063 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5064 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005065 fp->rx_pkt = fp->rx_calls = 0;
5066
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005067 /* Warning!
5068 * this will generate an interrupt (to the TSTORM)
5069 * must only be done after chip is initialized
5070 */
5071 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5072 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005073 if (j != 0)
5074 continue;
5075
5076 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005077 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005078 U64_LO(fp->rx_comp_mapping));
5079 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005080 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005081 U64_HI(fp->rx_comp_mapping));
5082 }
5083}
5084
5085static void bnx2x_init_tx_ring(struct bnx2x *bp)
5086{
5087 int i, j;
5088
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005089 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005090 struct bnx2x_fastpath *fp = &bp->fp[j];
5091
5092 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005093 struct eth_tx_next_bd *tx_next_bd =
5094 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005095
Eilon Greensteinca003922009-08-12 22:53:28 -07005096 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005097 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005098 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005099 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005100 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005101 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005102 }
5103
Eilon Greensteinca003922009-08-12 22:53:28 -07005104 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5105 fp->tx_db.data.zero_fill1 = 0;
5106 fp->tx_db.data.prod = 0;
5107
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005108 fp->tx_pkt_prod = 0;
5109 fp->tx_pkt_cons = 0;
5110 fp->tx_bd_prod = 0;
5111 fp->tx_bd_cons = 0;
5112 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5113 fp->tx_pkt = 0;
5114 }
5115}
5116
5117static void bnx2x_init_sp_ring(struct bnx2x *bp)
5118{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005119 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005120
5121 spin_lock_init(&bp->spq_lock);
5122
5123 bp->spq_left = MAX_SPQ_PENDING;
5124 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005125 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5126 bp->spq_prod_bd = bp->spq;
5127 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5128
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005129 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005130 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005131 REG_WR(bp,
5132 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005133 U64_HI(bp->spq_mapping));
5134
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005135 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136 bp->spq_prod_idx);
5137}
5138
5139static void bnx2x_init_context(struct bnx2x *bp)
5140{
5141 int i;
5142
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005143 /* Rx */
5144 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005145 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5146 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005147 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005148
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005149 context->ustorm_st_context.common.sb_index_numbers =
5150 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005151 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005152 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005153 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005154 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5155 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5156 context->ustorm_st_context.common.statistics_counter_id =
5157 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005158 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005159 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005160 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005161 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005162 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005163 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005164 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005165 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005166 if (!fp->disable_tpa) {
5167 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005168 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005169 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005170 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5171 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005172 context->ustorm_st_context.common.sge_page_base_hi =
5173 U64_HI(fp->rx_sge_mapping);
5174 context->ustorm_st_context.common.sge_page_base_lo =
5175 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005176
5177 context->ustorm_st_context.common.max_sges_for_packet =
5178 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 ((context->ustorm_st_context.common.
5181 max_sges_for_packet + PAGES_PER_SGE - 1) &
5182 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005183 }
5184
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005185 context->ustorm_ag_context.cdu_usage =
5186 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5187 CDU_REGION_NUMBER_UCM_AG,
5188 ETH_CONNECTION_TYPE);
5189
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005190 context->xstorm_ag_context.cdu_reserved =
5191 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5192 CDU_REGION_NUMBER_XCM_AG,
5193 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005194 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005195
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005196 /* Tx */
5197 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005198 struct bnx2x_fastpath *fp = &bp->fp[i];
5199 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005200 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005201
5202 context->cstorm_st_context.sb_index_number =
5203 C_SB_ETH_TX_CQ_INDEX;
5204 context->cstorm_st_context.status_block_id = fp->sb_id;
5205
5206 context->xstorm_st_context.tx_bd_page_base_hi =
5207 U64_HI(fp->tx_desc_mapping);
5208 context->xstorm_st_context.tx_bd_page_base_lo =
5209 U64_LO(fp->tx_desc_mapping);
5210 context->xstorm_st_context.statistics_data = (fp->cl_id |
5211 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5212 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005213}
5214
5215static void bnx2x_init_ind_table(struct bnx2x *bp)
5216{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005217 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005218 int i;
5219
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005220 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005221 return;
5222
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005223 DP(NETIF_MSG_IFUP,
5224 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005225 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005226 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005227 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005228 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005229}
5230
Eliezer Tamir49d66772008-02-28 11:53:13 -08005231static void bnx2x_set_client_config(struct bnx2x *bp)
5232{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005233 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005234 int port = BP_PORT(bp);
5235 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005236
Eilon Greensteine7799c52009-01-14 21:30:27 -08005237 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005238 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005239 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5240 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005241#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005242 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005243 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005244 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005245 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5246 }
5247#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005248
5249 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005250 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5251
Eliezer Tamir49d66772008-02-28 11:53:13 -08005252 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005253 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005254 ((u32 *)&tstorm_client)[0]);
5255 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005256 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005257 ((u32 *)&tstorm_client)[1]);
5258 }
5259
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005260 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5261 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005262}
5263
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005264static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5265{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005267 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005268 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005269 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005270 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005271 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005272 /* All but management unicast packets should pass to the host as well */
5273 u32 llh_mask =
5274 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5275 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005278
Eilon Greenstein3196a882008-08-13 15:58:49 -07005279 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005280
5281 switch (mode) {
5282 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005283 tstorm_mac_filter.ucast_drop_all = mask;
5284 tstorm_mac_filter.mcast_drop_all = mask;
5285 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005286 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005287
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005288 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005289 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005290 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005291
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005292 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005293 tstorm_mac_filter.mcast_accept_all = mask;
5294 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005295 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005296
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005297 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005298 tstorm_mac_filter.ucast_accept_all = mask;
5299 tstorm_mac_filter.mcast_accept_all = mask;
5300 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005301 /* pass management unicast packets as well */
5302 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005303 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005304
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005305 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005306 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5307 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005308 }
5309
Eilon Greenstein581ce432009-07-29 00:20:04 +00005310 REG_WR(bp,
5311 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5312 llh_mask);
5313
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005314 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5315 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005316 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005317 ((u32 *)&tstorm_mac_filter)[i]);
5318
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005319/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005320 ((u32 *)&tstorm_mac_filter)[i]); */
5321 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005322
Eliezer Tamir49d66772008-02-28 11:53:13 -08005323 if (mode != BNX2X_RX_MODE_NONE)
5324 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325}
5326
Eilon Greenstein471de712008-08-13 15:49:35 -07005327static void bnx2x_init_internal_common(struct bnx2x *bp)
5328{
5329 int i;
5330
5331 /* Zero this manually as its initialization is
5332 currently missing in the initTool */
5333 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5334 REG_WR(bp, BAR_USTRORM_INTMEM +
5335 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5336}
5337
5338static void bnx2x_init_internal_port(struct bnx2x *bp)
5339{
5340 int port = BP_PORT(bp);
5341
Eilon Greensteinca003922009-08-12 22:53:28 -07005342 REG_WR(bp,
5343 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5344 REG_WR(bp,
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005346 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5347 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5348}
5349
5350static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005351{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005352 struct tstorm_eth_function_common_config tstorm_config = {0};
5353 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005354 int port = BP_PORT(bp);
5355 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005356 int i, j;
5357 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005358 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005359
5360 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005361 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005362 tstorm_config.rss_result_mask = MULTI_MASK;
5363 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005364
5365 /* Enable TPA if needed */
5366 if (bp->flags & TPA_ENABLE_FLAG)
5367 tstorm_config.config_flags |=
5368 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5369
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005370 if (IS_E1HMF(bp))
5371 tstorm_config.config_flags |=
5372 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005373
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005374 tstorm_config.leading_client_id = BP_L_ID(bp);
5375
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005376 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005377 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005378 (*(u32 *)&tstorm_config));
5379
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005380 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005381 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005382 bnx2x_set_storm_rx_mode(bp);
5383
Eilon Greensteinde832a52009-02-12 08:36:33 +00005384 for_each_queue(bp, i) {
5385 u8 cl_id = bp->fp[i].cl_id;
5386
5387 /* reset xstorm per client statistics */
5388 offset = BAR_XSTRORM_INTMEM +
5389 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5390 for (j = 0;
5391 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5392 REG_WR(bp, offset + j*4, 0);
5393
5394 /* reset tstorm per client statistics */
5395 offset = BAR_TSTRORM_INTMEM +
5396 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5397 for (j = 0;
5398 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5399 REG_WR(bp, offset + j*4, 0);
5400
5401 /* reset ustorm per client statistics */
5402 offset = BAR_USTRORM_INTMEM +
5403 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5404 for (j = 0;
5405 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5406 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005407 }
5408
5409 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005410 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005411
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005412 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005413 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005415 ((u32 *)&stats_flags)[1]);
5416
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005417 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005418 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005420 ((u32 *)&stats_flags)[1]);
5421
Eilon Greensteinde832a52009-02-12 08:36:33 +00005422 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5423 ((u32 *)&stats_flags)[0]);
5424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5425 ((u32 *)&stats_flags)[1]);
5426
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005427 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005428 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005430 ((u32 *)&stats_flags)[1]);
5431
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005432 REG_WR(bp, BAR_XSTRORM_INTMEM +
5433 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5434 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5435 REG_WR(bp, BAR_XSTRORM_INTMEM +
5436 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5437 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5438
5439 REG_WR(bp, BAR_TSTRORM_INTMEM +
5440 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5441 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5442 REG_WR(bp, BAR_TSTRORM_INTMEM +
5443 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5444 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005445
Eilon Greensteinde832a52009-02-12 08:36:33 +00005446 REG_WR(bp, BAR_USTRORM_INTMEM +
5447 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5448 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5449 REG_WR(bp, BAR_USTRORM_INTMEM +
5450 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5451 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5452
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005453 if (CHIP_IS_E1H(bp)) {
5454 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5455 IS_E1HMF(bp));
5456 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5457 IS_E1HMF(bp));
5458 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5459 IS_E1HMF(bp));
5460 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5461 IS_E1HMF(bp));
5462
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005463 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5464 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005465 }
5466
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005467 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5468 max_agg_size =
5469 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5470 SGE_PAGE_SIZE * PAGES_PER_SGE),
5471 (u32)0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005472 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005473 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005474
5475 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005476 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005477 U64_LO(fp->rx_comp_mapping));
5478 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005479 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005480 U64_HI(fp->rx_comp_mapping));
5481
Eilon Greensteinca003922009-08-12 22:53:28 -07005482 /* Next page */
5483 REG_WR(bp, BAR_USTRORM_INTMEM +
5484 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5485 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5486 REG_WR(bp, BAR_USTRORM_INTMEM +
5487 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5488 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5489
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005490 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005491 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005492 max_agg_size);
5493 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005494
Eilon Greenstein1c063282009-02-12 08:36:43 +00005495 /* dropless flow control */
5496 if (CHIP_IS_E1H(bp)) {
5497 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5498
5499 rx_pause.bd_thr_low = 250;
5500 rx_pause.cqe_thr_low = 250;
5501 rx_pause.cos = 1;
5502 rx_pause.sge_thr_low = 0;
5503 rx_pause.bd_thr_high = 350;
5504 rx_pause.cqe_thr_high = 350;
5505 rx_pause.sge_thr_high = 0;
5506
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005507 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005508 struct bnx2x_fastpath *fp = &bp->fp[i];
5509
5510 if (!fp->disable_tpa) {
5511 rx_pause.sge_thr_low = 150;
5512 rx_pause.sge_thr_high = 250;
5513 }
5514
5515
5516 offset = BAR_USTRORM_INTMEM +
5517 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5518 fp->cl_id);
5519 for (j = 0;
5520 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5521 j++)
5522 REG_WR(bp, offset + j*4,
5523 ((u32 *)&rx_pause)[j]);
5524 }
5525 }
5526
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005527 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5528
5529 /* Init rate shaping and fairness contexts */
5530 if (IS_E1HMF(bp)) {
5531 int vn;
5532
5533 /* During init there is no active link
5534 Until link is up, set link rate to 10Gbps */
5535 bp->link_vars.line_speed = SPEED_10000;
5536 bnx2x_init_port_minmax(bp);
5537
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005538 if (!BP_NOMCP(bp))
5539 bp->mf_config =
5540 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005541 bnx2x_calc_vn_weight_sum(bp);
5542
5543 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5544 bnx2x_init_vn_minmax(bp, 2*vn + port);
5545
5546 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005547 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005548 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005549
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005550 } else {
5551 /* rate shaping and fairness are disabled */
5552 DP(NETIF_MSG_IFUP,
5553 "single function mode minmax will be disabled\n");
5554 }
5555
5556
5557 /* Store it to internal memory */
5558 if (bp->port.pmf)
5559 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5560 REG_WR(bp, BAR_XSTRORM_INTMEM +
5561 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5562 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005563}
5564
Eilon Greenstein471de712008-08-13 15:49:35 -07005565static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5566{
5567 switch (load_code) {
5568 case FW_MSG_CODE_DRV_LOAD_COMMON:
5569 bnx2x_init_internal_common(bp);
5570 /* no break */
5571
5572 case FW_MSG_CODE_DRV_LOAD_PORT:
5573 bnx2x_init_internal_port(bp);
5574 /* no break */
5575
5576 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5577 bnx2x_init_internal_func(bp);
5578 break;
5579
5580 default:
5581 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5582 break;
5583 }
5584}
5585
5586static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005587{
5588 int i;
5589
5590 for_each_queue(bp, i) {
5591 struct bnx2x_fastpath *fp = &bp->fp[i];
5592
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005593 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005594 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005595 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005596 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005597#ifdef BCM_CNIC
5598 fp->sb_id = fp->cl_id + 1;
5599#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005600 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005601#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005602 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005603 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5604 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005605 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005606 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005607 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005608 }
5609
Eilon Greenstein16119782009-03-02 07:59:27 +00005610 /* ensure status block indices were read */
5611 rmb();
5612
5613
Eilon Greenstein5c862842008-08-13 15:51:48 -07005614 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5615 DEF_SB_ID);
5616 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005617 bnx2x_update_coalesce(bp);
5618 bnx2x_init_rx_rings(bp);
5619 bnx2x_init_tx_ring(bp);
5620 bnx2x_init_sp_ring(bp);
5621 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005622 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005623 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005624 bnx2x_stats_init(bp);
5625
5626 /* At this point, we are ready for interrupts */
5627 atomic_set(&bp->intr_sem, 0);
5628
5629 /* flush all before enabling interrupts */
5630 mb();
5631 mmiowb();
5632
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005633 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005634
5635 /* Check for SPIO5 */
5636 bnx2x_attn_int_deasserted0(bp,
5637 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5638 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005639}
5640
5641/* end of nic init */
5642
5643/*
5644 * gzip service functions
5645 */
5646
5647static int bnx2x_gunzip_init(struct bnx2x *bp)
5648{
5649 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5650 &bp->gunzip_mapping);
5651 if (bp->gunzip_buf == NULL)
5652 goto gunzip_nomem1;
5653
5654 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5655 if (bp->strm == NULL)
5656 goto gunzip_nomem2;
5657
5658 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5659 GFP_KERNEL);
5660 if (bp->strm->workspace == NULL)
5661 goto gunzip_nomem3;
5662
5663 return 0;
5664
5665gunzip_nomem3:
5666 kfree(bp->strm);
5667 bp->strm = NULL;
5668
5669gunzip_nomem2:
5670 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5671 bp->gunzip_mapping);
5672 bp->gunzip_buf = NULL;
5673
5674gunzip_nomem1:
Joe Perches7995c642010-02-17 15:01:52 +00005675 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005676 return -ENOMEM;
5677}
5678
5679static void bnx2x_gunzip_end(struct bnx2x *bp)
5680{
5681 kfree(bp->strm->workspace);
5682
5683 kfree(bp->strm);
5684 bp->strm = NULL;
5685
5686 if (bp->gunzip_buf) {
5687 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5688 bp->gunzip_mapping);
5689 bp->gunzip_buf = NULL;
5690 }
5691}
5692
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005693static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005694{
5695 int n, rc;
5696
5697 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005698 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5699 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005700 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005701 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005702
5703 n = 10;
5704
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005705#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005706
5707 if (zbuf[3] & FNAME)
5708 while ((zbuf[n++] != 0) && (n < len));
5709
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005710 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005711 bp->strm->avail_in = len - n;
5712 bp->strm->next_out = bp->gunzip_buf;
5713 bp->strm->avail_out = FW_BUF_SIZE;
5714
5715 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5716 if (rc != Z_OK)
5717 return rc;
5718
5719 rc = zlib_inflate(bp->strm, Z_FINISH);
5720 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00005721 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5722 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005723
5724 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5725 if (bp->gunzip_outlen & 0x3)
Joe Perches7995c642010-02-17 15:01:52 +00005726 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5727 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005728 bp->gunzip_outlen >>= 2;
5729
5730 zlib_inflateEnd(bp->strm);
5731
5732 if (rc == Z_STREAM_END)
5733 return 0;
5734
5735 return rc;
5736}
5737
5738/* nic load/unload */
5739
5740/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005741 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005742 */
5743
5744/* send a NIG loopback debug packet */
5745static void bnx2x_lb_pckt(struct bnx2x *bp)
5746{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005747 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005748
5749 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005750 wb_write[0] = 0x55555555;
5751 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005752 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005753 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005754
5755 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005756 wb_write[0] = 0x09000000;
5757 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005758 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005759 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005760}
5761
5762/* some of the internal memories
5763 * are not directly readable from the driver
5764 * to test them we send debug packets
5765 */
5766static int bnx2x_int_mem_test(struct bnx2x *bp)
5767{
5768 int factor;
5769 int count, i;
5770 u32 val = 0;
5771
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005772 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005773 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005774 else if (CHIP_REV_IS_EMUL(bp))
5775 factor = 200;
5776 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005777 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005778
5779 DP(NETIF_MSG_HW, "start part1\n");
5780
5781 /* Disable inputs of parser neighbor blocks */
5782 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5783 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5784 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005785 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005786
5787 /* Write 0 to parser credits for CFC search request */
5788 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5789
5790 /* send Ethernet packet */
5791 bnx2x_lb_pckt(bp);
5792
5793 /* TODO do i reset NIG statistic? */
5794 /* Wait until NIG register shows 1 packet of size 0x10 */
5795 count = 1000 * factor;
5796 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005797
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005798 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5799 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005800 if (val == 0x10)
5801 break;
5802
5803 msleep(10);
5804 count--;
5805 }
5806 if (val != 0x10) {
5807 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5808 return -1;
5809 }
5810
5811 /* Wait until PRS register shows 1 packet */
5812 count = 1000 * factor;
5813 while (count) {
5814 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005815 if (val == 1)
5816 break;
5817
5818 msleep(10);
5819 count--;
5820 }
5821 if (val != 0x1) {
5822 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5823 return -2;
5824 }
5825
5826 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005827 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005828 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005829 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005830 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005831 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5832 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005833
5834 DP(NETIF_MSG_HW, "part2\n");
5835
5836 /* Disable inputs of parser neighbor blocks */
5837 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5838 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5839 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005840 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005841
5842 /* Write 0 to parser credits for CFC search request */
5843 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5844
5845 /* send 10 Ethernet packets */
5846 for (i = 0; i < 10; i++)
5847 bnx2x_lb_pckt(bp);
5848
5849 /* Wait until NIG register shows 10 + 1
5850 packets of size 11*0x10 = 0xb0 */
5851 count = 1000 * factor;
5852 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005853
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005854 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5855 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005856 if (val == 0xb0)
5857 break;
5858
5859 msleep(10);
5860 count--;
5861 }
5862 if (val != 0xb0) {
5863 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5864 return -3;
5865 }
5866
5867 /* Wait until PRS register shows 2 packets */
5868 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5869 if (val != 2)
5870 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5871
5872 /* Write 1 to parser credits for CFC search request */
5873 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5874
5875 /* Wait until PRS register shows 3 packets */
5876 msleep(10 * factor);
5877 /* Wait until NIG register shows 1 packet of size 0x10 */
5878 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5879 if (val != 3)
5880 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5881
5882 /* clear NIG EOP FIFO */
5883 for (i = 0; i < 11; i++)
5884 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5885 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5886 if (val != 1) {
5887 BNX2X_ERR("clear of NIG failed\n");
5888 return -4;
5889 }
5890
5891 /* Reset and init BRB, PRS, NIG */
5892 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5893 msleep(50);
5894 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5895 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005896 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5897 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005898#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005899 /* set NIC mode */
5900 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5901#endif
5902
5903 /* Enable inputs of parser neighbor blocks */
5904 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5905 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5906 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005907 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005908
5909 DP(NETIF_MSG_HW, "done\n");
5910
5911 return 0; /* OK */
5912}
5913
5914static void enable_blocks_attention(struct bnx2x *bp)
5915{
5916 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5917 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5918 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5919 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5920 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5921 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5922 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5923 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5924 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005925/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5926/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005927 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5928 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5929 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005930/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5931/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005932 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5933 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5934 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5935 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005936/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5937/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5938 if (CHIP_REV_IS_FPGA(bp))
5939 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5940 else
5941 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005942 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5943 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5944 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005945/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5946/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5948 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005949/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5950 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005951}
5952
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005953
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005954static void bnx2x_reset_common(struct bnx2x *bp)
5955{
5956 /* reset_common */
5957 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5958 0xd3ffff7f);
5959 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5960}
5961
Eilon Greenstein573f2032009-08-12 08:24:14 +00005962static void bnx2x_init_pxp(struct bnx2x *bp)
5963{
5964 u16 devctl;
5965 int r_order, w_order;
5966
5967 pci_read_config_word(bp->pdev,
5968 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5969 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5970 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5971 if (bp->mrrs == -1)
5972 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5973 else {
5974 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5975 r_order = bp->mrrs;
5976 }
5977
5978 bnx2x_init_pxp_arb(bp, r_order, w_order);
5979}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005980
5981static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5982{
5983 u32 val;
5984 u8 port;
5985 u8 is_required = 0;
5986
5987 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5988 SHARED_HW_CFG_FAN_FAILURE_MASK;
5989
5990 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5991 is_required = 1;
5992
5993 /*
5994 * The fan failure mechanism is usually related to the PHY type since
5995 * the power consumption of the board is affected by the PHY. Currently,
5996 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5997 */
5998 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5999 for (port = PORT_0; port < PORT_MAX; port++) {
6000 u32 phy_type =
6001 SHMEM_RD(bp, dev_info.port_hw_config[port].
6002 external_phy_config) &
6003 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6004 is_required |=
6005 ((phy_type ==
6006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6007 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006008 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6009 (phy_type ==
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6011 }
6012
6013 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6014
6015 if (is_required == 0)
6016 return;
6017
6018 /* Fan failure is indicated by SPIO 5 */
6019 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6020 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6021
6022 /* set to active low mode */
6023 val = REG_RD(bp, MISC_REG_SPIO_INT);
6024 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6025 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6026 REG_WR(bp, MISC_REG_SPIO_INT, val);
6027
6028 /* enable interrupt to signal the IGU */
6029 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6030 val |= (1 << MISC_REGISTERS_SPIO_5);
6031 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6032}
6033
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006034static int bnx2x_init_common(struct bnx2x *bp)
6035{
6036 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006037#ifdef BCM_CNIC
6038 u32 wb_write[2];
6039#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006040
6041 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6042
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006043 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6046
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006047 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006048 if (CHIP_IS_E1H(bp))
6049 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6050
6051 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6052 msleep(30);
6053 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6054
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006055 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006056 if (CHIP_IS_E1(bp)) {
6057 /* enable HW interrupt from PXP on USDM overflow
6058 bit 16 on INT_MASK_0 */
6059 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006060 }
6061
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006062 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006063 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006064
6065#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006066 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6067 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6068 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6069 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6070 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006071 /* make sure this value is 0 */
6072 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006073
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006074/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6075 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6076 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6077 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6078 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006079#endif
6080
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006081 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006082#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006083 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6084 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6085 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006086#endif
6087
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006088 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6089 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006090
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006091 /* let the HW do it's magic ... */
6092 msleep(100);
6093 /* finish PXP init */
6094 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6095 if (val != 1) {
6096 BNX2X_ERR("PXP2 CFG failed\n");
6097 return -EBUSY;
6098 }
6099 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6100 if (val != 1) {
6101 BNX2X_ERR("PXP2 RD_INIT failed\n");
6102 return -EBUSY;
6103 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006104
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006105 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6106 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006107
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006108 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006109
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006110 /* clean the DMAE memory */
6111 bp->dmae_ready = 1;
6112 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006114 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6115 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6116 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6117 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006118
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006119 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6120 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6121 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6122 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6123
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006124 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006125
6126#ifdef BCM_CNIC
6127 wb_write[0] = 0;
6128 wb_write[1] = 0;
6129 for (i = 0; i < 64; i++) {
6130 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6131 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6132
6133 if (CHIP_IS_E1H(bp)) {
6134 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6136 wb_write, 2);
6137 }
6138 }
6139#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006140 /* soft reset pulse */
6141 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6142 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006143
Michael Chan37b091b2009-10-10 13:46:55 +00006144#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006145 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006146#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006148 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006149 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6150 if (!CHIP_REV_IS_SLOW(bp)) {
6151 /* enable hw interrupt from doorbell Q */
6152 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6153 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006154
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006155 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6156 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006157 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006158#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006159 /* set NIC mode */
6160 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006161#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006162 if (CHIP_IS_E1H(bp))
6163 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006164
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006165 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6166 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6167 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6168 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006169
Eilon Greensteinca003922009-08-12 22:53:28 -07006170 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6171 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6172 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6173 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006174
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006175 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6176 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6177 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6178 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006179
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006180 /* sync semi rtc */
6181 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6182 0x80000000);
6183 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6184 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006185
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006186 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6187 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6188 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006189
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006190 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6191 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6192 REG_WR(bp, i, 0xc0cac01a);
6193 /* TODO: replace with something meaningful */
6194 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006195 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006196#ifdef BCM_CNIC
6197 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6198 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6199 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6200 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6201 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6207#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006208 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006209
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006210 if (sizeof(union cdu_context) != 1024)
6211 /* we currently assume that a context is 1024 bytes */
Joe Perches7995c642010-02-17 15:01:52 +00006212 pr_alert("please adjust the size of cdu_context(%ld)\n",
6213 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006214
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006215 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006216 val = (4 << 24) + (0 << 12) + 1024;
6217 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006218
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006219 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006220 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006221 /* enable context validation interrupt from CFC */
6222 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6223
6224 /* set the thresholds to prevent CFC/CDU race */
6225 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006226
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006227 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6228 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006229
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006230 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006231 /* Reset PCIE errors for debug */
6232 REG_WR(bp, 0x2814, 0xffffffff);
6233 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006234
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006235 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006236 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006237 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006238 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006239
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006240 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006241 if (CHIP_IS_E1H(bp)) {
6242 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6243 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6244 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006245
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006246 if (CHIP_REV_IS_SLOW(bp))
6247 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006248
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006249 /* finish CFC init */
6250 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6251 if (val != 1) {
6252 BNX2X_ERR("CFC LL_INIT failed\n");
6253 return -EBUSY;
6254 }
6255 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6256 if (val != 1) {
6257 BNX2X_ERR("CFC AC_INIT failed\n");
6258 return -EBUSY;
6259 }
6260 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6261 if (val != 1) {
6262 BNX2X_ERR("CFC CAM_INIT failed\n");
6263 return -EBUSY;
6264 }
6265 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006266
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006267 /* read NIG statistic
6268 to see if this is our first up since powerup */
6269 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6270 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006271
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006272 /* do internal memory self test */
6273 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6274 BNX2X_ERR("internal mem self test failed\n");
6275 return -EBUSY;
6276 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006277
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006278 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6281 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006283 bp->port.need_hw_lock = 1;
6284 break;
6285
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006286 default:
6287 break;
6288 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006289
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006290 bnx2x_setup_fan_failure_detection(bp);
6291
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006292 /* clear PXP2 attentions */
6293 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006294
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006295 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006296
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006297 if (!BP_NOMCP(bp)) {
6298 bnx2x_acquire_phy_lock(bp);
6299 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6300 bnx2x_release_phy_lock(bp);
6301 } else
6302 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6303
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006304 return 0;
6305}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006306
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006307static int bnx2x_init_port(struct bnx2x *bp)
6308{
6309 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006310 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006311 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006312 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006313
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006314 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6315
6316 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006317
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006318 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006319 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006320
6321 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6322 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6323 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006324 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006325
Michael Chan37b091b2009-10-10 13:46:55 +00006326#ifdef BCM_CNIC
6327 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006328
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006329 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006330 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6331 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006332#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006333 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006334
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006335 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006336 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6337 /* no pause for emulation and FPGA */
6338 low = 0;
6339 high = 513;
6340 } else {
6341 if (IS_E1HMF(bp))
6342 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6343 else if (bp->dev->mtu > 4096) {
6344 if (bp->flags & ONE_PORT_FLAG)
6345 low = 160;
6346 else {
6347 val = bp->dev->mtu;
6348 /* (24*1024 + val*4)/256 */
6349 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6350 }
6351 } else
6352 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6353 high = low + 56; /* 14*1024/256 */
6354 }
6355 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6356 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6357
6358
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006359 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006360
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006361 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006362 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006363 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006364 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006365
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006366 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6367 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6368 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6369 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006370
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006371 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006372 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006373
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006374 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006375
6376 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006377 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006378
6379 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006380 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006381 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006382 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006383
6384 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006385 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006386 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006387 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006388
Michael Chan37b091b2009-10-10 13:46:55 +00006389#ifdef BCM_CNIC
6390 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006391#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006392 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006393 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006394
6395 if (CHIP_IS_E1(bp)) {
6396 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6397 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6398 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006399 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006400
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006401 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006402 /* init aeu_mask_attn_func_0/1:
6403 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6404 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6405 * bits 4-7 are used for "per vn group attention" */
6406 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6407 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6408
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006409 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006410 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006411 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006412 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006413 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006414
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006415 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006416
6417 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6418
6419 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006420 /* 0x2 disable e1hov, 0x1 enable */
6421 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6422 (IS_E1HMF(bp) ? 0x1 : 0x2));
6423
Eilon Greenstein1c063282009-02-12 08:36:43 +00006424 {
6425 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6426 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6427 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6428 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006429 }
6430
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006431 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006432 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006433
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006434 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6436 {
6437 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6438
6439 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6440 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6441
6442 /* The GPIO should be swapped if the swap register is
6443 set and active */
6444 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6445 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6446
6447 /* Select function upon port-swap configuration */
6448 if (port == 0) {
6449 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6450 aeu_gpio_mask = (swap_val && swap_override) ?
6451 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6452 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6453 } else {
6454 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6455 aeu_gpio_mask = (swap_val && swap_override) ?
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6457 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6458 }
6459 val = REG_RD(bp, offset);
6460 /* add GPIO3 to group */
6461 val |= aeu_gpio_mask;
6462 REG_WR(bp, offset, val);
6463 }
6464 break;
6465
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006466 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006468 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006469 {
6470 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6471 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6472 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006473 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006474 REG_WR(bp, reg_addr, val);
6475 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006476 break;
6477
6478 default:
6479 break;
6480 }
6481
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006482 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006483
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006484 return 0;
6485}
6486
6487#define ILT_PER_FUNC (768/2)
6488#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6489/* the phys address is shifted right 12 bits and has an added
6490 1=valid bit added to the 53rd bit
6491 then since this is a wide register(TM)
6492 we split it into two 32 bit writes
6493 */
6494#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6495#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6496#define PXP_ONE_ILT(x) (((x) << 10) | x)
6497#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6498
Michael Chan37b091b2009-10-10 13:46:55 +00006499#ifdef BCM_CNIC
6500#define CNIC_ILT_LINES 127
6501#define CNIC_CTX_PER_ILT 16
6502#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006503#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006504#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006505
6506static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6507{
6508 int reg;
6509
6510 if (CHIP_IS_E1H(bp))
6511 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6512 else /* E1 */
6513 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6514
6515 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6516}
6517
6518static int bnx2x_init_func(struct bnx2x *bp)
6519{
6520 int port = BP_PORT(bp);
6521 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006522 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006523 int i;
6524
6525 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6526
Eilon Greenstein8badd272009-02-12 08:36:15 +00006527 /* set MSI reconfigure capability */
6528 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6529 val = REG_RD(bp, addr);
6530 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6531 REG_WR(bp, addr, val);
6532
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006533 i = FUNC_ILT_BASE(func);
6534
6535 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6536 if (CHIP_IS_E1H(bp)) {
6537 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6538 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6539 } else /* E1 */
6540 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6541 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6542
Michael Chan37b091b2009-10-10 13:46:55 +00006543#ifdef BCM_CNIC
6544 i += 1 + CNIC_ILT_LINES;
6545 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6546 if (CHIP_IS_E1(bp))
6547 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6548 else {
6549 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6550 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6551 }
6552
6553 i++;
6554 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6555 if (CHIP_IS_E1(bp))
6556 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6557 else {
6558 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6559 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6560 }
6561
6562 i++;
6563 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6564 if (CHIP_IS_E1(bp))
6565 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6566 else {
6567 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6568 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6569 }
6570
6571 /* tell the searcher where the T2 table is */
6572 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6573
6574 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6575 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6576
6577 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6578 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6579 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6580
6581 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6582#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006583
6584 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006585 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6586 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6587 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6588 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6589 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006594
6595 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6596 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6597 }
6598
6599 /* HC init per function */
6600 if (CHIP_IS_E1H(bp)) {
6601 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6602
6603 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6604 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6605 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006606 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006607
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006608 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006609 REG_WR(bp, 0x2114, 0xffffffff);
6610 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006611
6612 return 0;
6613}
6614
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006615static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6616{
6617 int i, rc = 0;
6618
6619 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6620 BP_FUNC(bp), load_code);
6621
6622 bp->dmae_ready = 0;
6623 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00006624 rc = bnx2x_gunzip_init(bp);
6625 if (rc)
6626 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006627
6628 switch (load_code) {
6629 case FW_MSG_CODE_DRV_LOAD_COMMON:
6630 rc = bnx2x_init_common(bp);
6631 if (rc)
6632 goto init_hw_err;
6633 /* no break */
6634
6635 case FW_MSG_CODE_DRV_LOAD_PORT:
6636 bp->dmae_ready = 1;
6637 rc = bnx2x_init_port(bp);
6638 if (rc)
6639 goto init_hw_err;
6640 /* no break */
6641
6642 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6643 bp->dmae_ready = 1;
6644 rc = bnx2x_init_func(bp);
6645 if (rc)
6646 goto init_hw_err;
6647 break;
6648
6649 default:
6650 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6651 break;
6652 }
6653
6654 if (!BP_NOMCP(bp)) {
6655 int func = BP_FUNC(bp);
6656
6657 bp->fw_drv_pulse_wr_seq =
6658 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6659 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006660 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6661 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006662
6663 /* this needs to be done before gunzip end */
6664 bnx2x_zero_def_sb(bp);
6665 for_each_queue(bp, i)
6666 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00006667#ifdef BCM_CNIC
6668 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6669#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006670
6671init_hw_err:
6672 bnx2x_gunzip_end(bp);
6673
6674 return rc;
6675}
6676
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006677static void bnx2x_free_mem(struct bnx2x *bp)
6678{
6679
6680#define BNX2X_PCI_FREE(x, y, size) \
6681 do { \
6682 if (x) { \
6683 pci_free_consistent(bp->pdev, size, x, y); \
6684 x = NULL; \
6685 y = 0; \
6686 } \
6687 } while (0)
6688
6689#define BNX2X_FREE(x) \
6690 do { \
6691 if (x) { \
6692 vfree(x); \
6693 x = NULL; \
6694 } \
6695 } while (0)
6696
6697 int i;
6698
6699 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006700 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006701 for_each_queue(bp, i) {
6702
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006703 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006704 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6705 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006706 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006707 }
6708 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006709 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006710
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006711 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006712 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6713 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6714 bnx2x_fp(bp, i, rx_desc_mapping),
6715 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6716
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6718 bnx2x_fp(bp, i, rx_comp_mapping),
6719 sizeof(struct eth_fast_path_rx_cqe) *
6720 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006721
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006722 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006723 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006724 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6725 bnx2x_fp(bp, i, rx_sge_mapping),
6726 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6727 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006728 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006729 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006730
6731 /* fastpath tx rings: tx_buf tx_desc */
6732 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6733 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6734 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006735 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006736 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006737 /* end of fastpath */
6738
6739 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006740 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006741
6742 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006743 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006744
Michael Chan37b091b2009-10-10 13:46:55 +00006745#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006746 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6747 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6748 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6749 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006750 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6751 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006752#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006753 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006754
6755#undef BNX2X_PCI_FREE
6756#undef BNX2X_KFREE
6757}
6758
6759static int bnx2x_alloc_mem(struct bnx2x *bp)
6760{
6761
6762#define BNX2X_PCI_ALLOC(x, y, size) \
6763 do { \
6764 x = pci_alloc_consistent(bp->pdev, size, y); \
6765 if (x == NULL) \
6766 goto alloc_mem_err; \
6767 memset(x, 0, size); \
6768 } while (0)
6769
6770#define BNX2X_ALLOC(x, size) \
6771 do { \
6772 x = vmalloc(size); \
6773 if (x == NULL) \
6774 goto alloc_mem_err; \
6775 memset(x, 0, size); \
6776 } while (0)
6777
6778 int i;
6779
6780 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006781 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006782 for_each_queue(bp, i) {
6783 bnx2x_fp(bp, i, bp) = bp;
6784
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006785 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6787 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006788 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006789 }
6790 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006791 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006792
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006793 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006794 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6795 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6796 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6797 &bnx2x_fp(bp, i, rx_desc_mapping),
6798 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6799
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6801 &bnx2x_fp(bp, i, rx_comp_mapping),
6802 sizeof(struct eth_fast_path_rx_cqe) *
6803 NUM_RCQ_BD);
6804
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006805 /* SGE ring */
6806 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6807 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6808 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6809 &bnx2x_fp(bp, i, rx_sge_mapping),
6810 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006811 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006812 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006813 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006814
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006815 /* fastpath tx rings: tx_buf tx_desc */
6816 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6817 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6818 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6819 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006820 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006821 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006822 /* end of fastpath */
6823
6824 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6825 sizeof(struct host_def_status_block));
6826
6827 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6828 sizeof(struct bnx2x_slowpath));
6829
Michael Chan37b091b2009-10-10 13:46:55 +00006830#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006831 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6832
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006833 /* allocate searcher T2 table
6834 we allocate 1/4 of alloc num for T2
6835 (which is not entered into the ILT) */
6836 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6837
Michael Chan37b091b2009-10-10 13:46:55 +00006838 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006839 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00006840 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006841
Michael Chan37b091b2009-10-10 13:46:55 +00006842 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006843 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6844
6845 /* QM queues (128*MAX_CONN) */
6846 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006847
6848 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6849 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006850#endif
6851
6852 /* Slow path ring */
6853 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6854
6855 return 0;
6856
6857alloc_mem_err:
6858 bnx2x_free_mem(bp);
6859 return -ENOMEM;
6860
6861#undef BNX2X_PCI_ALLOC
6862#undef BNX2X_ALLOC
6863}
6864
6865static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6866{
6867 int i;
6868
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006869 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006870 struct bnx2x_fastpath *fp = &bp->fp[i];
6871
6872 u16 bd_cons = fp->tx_bd_cons;
6873 u16 sw_prod = fp->tx_pkt_prod;
6874 u16 sw_cons = fp->tx_pkt_cons;
6875
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006876 while (sw_cons != sw_prod) {
6877 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6878 sw_cons++;
6879 }
6880 }
6881}
6882
6883static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6884{
6885 int i, j;
6886
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006887 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006888 struct bnx2x_fastpath *fp = &bp->fp[j];
6889
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006890 for (i = 0; i < NUM_RX_BD; i++) {
6891 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6892 struct sk_buff *skb = rx_buf->skb;
6893
6894 if (skb == NULL)
6895 continue;
6896
6897 pci_unmap_single(bp->pdev,
6898 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006899 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006900
6901 rx_buf->skb = NULL;
6902 dev_kfree_skb(skb);
6903 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006904 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006905 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6906 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006907 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006908 }
6909}
6910
6911static void bnx2x_free_skbs(struct bnx2x *bp)
6912{
6913 bnx2x_free_tx_skbs(bp);
6914 bnx2x_free_rx_skbs(bp);
6915}
6916
6917static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6918{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006919 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006920
6921 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006922 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006923 bp->msix_table[0].vector);
6924
Michael Chan37b091b2009-10-10 13:46:55 +00006925#ifdef BCM_CNIC
6926 offset++;
6927#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006928 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006929 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006930 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006931 bnx2x_fp(bp, i, state));
6932
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006933 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006934 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006935}
6936
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006937static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006938{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939 if (bp->flags & USING_MSIX_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006940 if (!disable_only)
6941 bnx2x_free_msix_irqs(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006942 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006943 bp->flags &= ~USING_MSIX_FLAG;
6944
Eilon Greenstein8badd272009-02-12 08:36:15 +00006945 } else if (bp->flags & USING_MSI_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006946 if (!disable_only)
6947 free_irq(bp->pdev->irq, bp->dev);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006948 pci_disable_msi(bp->pdev);
6949 bp->flags &= ~USING_MSI_FLAG;
6950
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006951 } else if (!disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006952 free_irq(bp->pdev->irq, bp->dev);
6953}
6954
6955static int bnx2x_enable_msix(struct bnx2x *bp)
6956{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006957 int i, rc, offset = 1;
6958 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006959
Eilon Greenstein8badd272009-02-12 08:36:15 +00006960 bp->msix_table[0].entry = igu_vec;
6961 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006962
Michael Chan37b091b2009-10-10 13:46:55 +00006963#ifdef BCM_CNIC
6964 igu_vec = BP_L_ID(bp) + offset;
6965 bp->msix_table[1].entry = igu_vec;
6966 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6967 offset++;
6968#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006969 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006970 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006971 bp->msix_table[i + offset].entry = igu_vec;
6972 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6973 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006974 }
6975
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006976 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006977 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006978 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006979 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6980 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006981 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006983 bp->flags |= USING_MSIX_FLAG;
6984
6985 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006986}
6987
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006988static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6989{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006990 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006991
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006992 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6993 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006994 if (rc) {
6995 BNX2X_ERR("request sp irq failed\n");
6996 return -EBUSY;
6997 }
6998
Michael Chan37b091b2009-10-10 13:46:55 +00006999#ifdef BCM_CNIC
7000 offset++;
7001#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007002 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007003 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007004 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7005 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007006
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007007 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007008 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007009 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007010 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007011 bnx2x_free_msix_irqs(bp);
7012 return -EBUSY;
7013 }
7014
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007015 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007016 }
7017
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007018 i = BNX2X_NUM_QUEUES(bp);
Joe Perches7995c642010-02-17 15:01:52 +00007019 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7020 bp->msix_table[0].vector,
7021 0, bp->msix_table[offset].vector,
7022 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007023
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007024 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007025}
7026
Eilon Greenstein8badd272009-02-12 08:36:15 +00007027static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007028{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007029 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007030
Eilon Greenstein8badd272009-02-12 08:36:15 +00007031 rc = pci_enable_msi(bp->pdev);
7032 if (rc) {
7033 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7034 return -1;
7035 }
7036 bp->flags |= USING_MSI_FLAG;
7037
7038 return 0;
7039}
7040
7041static int bnx2x_req_irq(struct bnx2x *bp)
7042{
7043 unsigned long flags;
7044 int rc;
7045
7046 if (bp->flags & USING_MSI_FLAG)
7047 flags = 0;
7048 else
7049 flags = IRQF_SHARED;
7050
7051 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007052 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007053 if (!rc)
7054 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7055
7056 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007057}
7058
Yitchak Gertner65abd742008-08-25 15:26:24 -07007059static void bnx2x_napi_enable(struct bnx2x *bp)
7060{
7061 int i;
7062
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007063 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007064 napi_enable(&bnx2x_fp(bp, i, napi));
7065}
7066
7067static void bnx2x_napi_disable(struct bnx2x *bp)
7068{
7069 int i;
7070
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007071 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007072 napi_disable(&bnx2x_fp(bp, i, napi));
7073}
7074
7075static void bnx2x_netif_start(struct bnx2x *bp)
7076{
Eilon Greensteine1510702009-07-21 05:47:41 +00007077 int intr_sem;
7078
7079 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7080 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7081
7082 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007083 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007084 bnx2x_napi_enable(bp);
7085 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007086 if (bp->state == BNX2X_STATE_OPEN)
7087 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007088 }
7089 }
7090}
7091
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007092static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007093{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007094 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007095 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007096 netif_tx_disable(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007097}
7098
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007099/*
7100 * Init service functions
7101 */
7102
Michael Chane665bfd2009-10-10 13:46:54 +00007103/**
7104 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7105 *
7106 * @param bp driver descriptor
7107 * @param set set or clear an entry (1 or 0)
7108 * @param mac pointer to a buffer containing a MAC
7109 * @param cl_bit_vec bit vector of clients to register a MAC for
7110 * @param cam_offset offset in a CAM to use
7111 * @param with_bcast set broadcast MAC as well
7112 */
7113static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7114 u32 cl_bit_vec, u8 cam_offset,
7115 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007116{
7117 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007118 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007119
7120 /* CAM allocation
7121 * unicasts 0-31:port0 32-63:port1
7122 * multicast 64-127:port0 128-191:port1
7123 */
Michael Chane665bfd2009-10-10 13:46:54 +00007124 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7125 config->hdr.offset = cam_offset;
7126 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007127 config->hdr.reserved1 = 0;
7128
7129 /* primary MAC */
7130 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007131 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007132 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007133 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007134 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007135 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007136 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007137 if (set)
7138 config->config_table[0].target_table_entry.flags = 0;
7139 else
7140 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007141 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007142 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007143 config->config_table[0].target_table_entry.vlan_id = 0;
7144
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007145 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7146 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007147 config->config_table[0].cam_entry.msb_mac_addr,
7148 config->config_table[0].cam_entry.middle_mac_addr,
7149 config->config_table[0].cam_entry.lsb_mac_addr);
7150
7151 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007152 if (with_bcast) {
7153 config->config_table[1].cam_entry.msb_mac_addr =
7154 cpu_to_le16(0xffff);
7155 config->config_table[1].cam_entry.middle_mac_addr =
7156 cpu_to_le16(0xffff);
7157 config->config_table[1].cam_entry.lsb_mac_addr =
7158 cpu_to_le16(0xffff);
7159 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7160 if (set)
7161 config->config_table[1].target_table_entry.flags =
7162 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7163 else
7164 CAM_INVALIDATE(config->config_table[1]);
7165 config->config_table[1].target_table_entry.clients_bit_vector =
7166 cpu_to_le32(cl_bit_vec);
7167 config->config_table[1].target_table_entry.vlan_id = 0;
7168 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007169
7170 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7171 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7172 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7173}
7174
Michael Chane665bfd2009-10-10 13:46:54 +00007175/**
7176 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7177 *
7178 * @param bp driver descriptor
7179 * @param set set or clear an entry (1 or 0)
7180 * @param mac pointer to a buffer containing a MAC
7181 * @param cl_bit_vec bit vector of clients to register a MAC for
7182 * @param cam_offset offset in a CAM to use
7183 */
7184static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7185 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007186{
7187 struct mac_configuration_cmd_e1h *config =
7188 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7189
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007190 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007191 config->hdr.offset = cam_offset;
7192 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007193 config->hdr.reserved1 = 0;
7194
7195 /* primary MAC */
7196 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007197 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007198 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007199 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007200 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007201 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007202 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007203 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007204 config->config_table[0].vlan_id = 0;
7205 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007206 if (set)
7207 config->config_table[0].flags = BP_PORT(bp);
7208 else
7209 config->config_table[0].flags =
7210 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007211
Michael Chane665bfd2009-10-10 13:46:54 +00007212 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007213 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007214 config->config_table[0].msb_mac_addr,
7215 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007216 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007217
7218 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7219 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7220 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7221}
7222
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007223static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7224 int *state_p, int poll)
7225{
7226 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007227 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007228
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007229 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7230 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007231
7232 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007233 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007234 if (poll) {
7235 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007236 /* if index is different from 0
7237 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007238 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007239 */
7240 if (idx)
7241 bnx2x_rx_int(&bp->fp[idx], 10);
7242 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007243
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007244 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007245 if (*state_p == state) {
7246#ifdef BNX2X_STOP_ON_ERROR
7247 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7248#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007249 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007250 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007251
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007252 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007253
7254 if (bp->panic)
7255 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007256 }
7257
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007258 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007259 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7260 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007261#ifdef BNX2X_STOP_ON_ERROR
7262 bnx2x_panic();
7263#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007264
Eliezer Tamir49d66772008-02-28 11:53:13 -08007265 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007266}
7267
Michael Chane665bfd2009-10-10 13:46:54 +00007268static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7269{
7270 bp->set_mac_pending++;
7271 smp_wmb();
7272
7273 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7274 (1 << bp->fp->cl_id), BP_FUNC(bp));
7275
7276 /* Wait for a completion */
7277 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7278}
7279
7280static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7281{
7282 bp->set_mac_pending++;
7283 smp_wmb();
7284
7285 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7286 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7287 1);
7288
7289 /* Wait for a completion */
7290 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7291}
7292
Michael Chan993ac7b2009-10-10 13:46:56 +00007293#ifdef BCM_CNIC
7294/**
7295 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7296 * MAC(s). This function will wait until the ramdord completion
7297 * returns.
7298 *
7299 * @param bp driver handle
7300 * @param set set or clear the CAM entry
7301 *
7302 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7303 */
7304static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7305{
7306 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7307
7308 bp->set_mac_pending++;
7309 smp_wmb();
7310
7311 /* Send a SET_MAC ramrod */
7312 if (CHIP_IS_E1(bp))
7313 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7314 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7315 1);
7316 else
7317 /* CAM allocation for E1H
7318 * unicasts: by func number
7319 * multicast: 20+FUNC*20, 20 each
7320 */
7321 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7322 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7323
7324 /* Wait for a completion when setting */
7325 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7326
7327 return 0;
7328}
7329#endif
7330
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007331static int bnx2x_setup_leading(struct bnx2x *bp)
7332{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007333 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007334
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007335 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007336 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007337
7338 /* SETUP ramrod */
7339 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7340
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007341 /* Wait for completion */
7342 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007343
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007344 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007345}
7346
7347static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7348{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007349 struct bnx2x_fastpath *fp = &bp->fp[index];
7350
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007351 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007352 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007353
Eliezer Tamir228241e2008-02-28 11:56:57 -08007354 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007355 fp->state = BNX2X_FP_STATE_OPENING;
7356 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7357 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007358
7359 /* Wait for completion */
7360 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007361 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007362}
7363
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007364static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007365
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007366static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007367{
Eilon Greensteinca003922009-08-12 22:53:28 -07007368
7369 switch (bp->multi_mode) {
7370 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007371 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007372 break;
7373
7374 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007375 if (num_queues)
7376 bp->num_queues = min_t(u32, num_queues,
7377 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007378 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007379 bp->num_queues = min_t(u32, num_online_cpus(),
7380 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007381 break;
7382
7383
7384 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007385 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007386 break;
7387 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007388}
7389
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007390static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007391{
7392 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007393
Eilon Greenstein8badd272009-02-12 08:36:15 +00007394 switch (int_mode) {
7395 case INT_MODE_INTx:
7396 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007397 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007398 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007399 break;
7400
7401 case INT_MODE_MSIX:
7402 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007403 /* Set number of queues according to bp->multi_mode value */
7404 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007405
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007406 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7407 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007408
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007409 /* if we can't use MSI-X we only need one fp,
7410 * so try to enable MSI-X with the requested number of fp's
7411 * and fallback to MSI or legacy INTx with one fp
7412 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007413 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007414 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007415 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007416 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007417 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007418 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007419 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007420 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007421}
7422
Michael Chan993ac7b2009-10-10 13:46:56 +00007423#ifdef BCM_CNIC
7424static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7425static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7426#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007427
7428/* must be called with rtnl_lock */
7429static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7430{
7431 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007432 int i, rc;
7433
Eilon Greenstein8badd272009-02-12 08:36:15 +00007434#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007435 if (unlikely(bp->panic))
7436 return -EPERM;
7437#endif
7438
7439 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7440
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007441 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007442
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007443 if (bnx2x_alloc_mem(bp)) {
7444 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007445 return -ENOMEM;
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007446 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007447
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007448 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007449 bnx2x_fp(bp, i, disable_tpa) =
7450 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7451
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007452 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007453 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7454 bnx2x_poll, 128);
7455
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007456 bnx2x_napi_enable(bp);
7457
7458 if (bp->flags & USING_MSIX_FLAG) {
7459 rc = bnx2x_req_msix_irqs(bp);
7460 if (rc) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007461 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007462 goto load_error1;
7463 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007464 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007465 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007466 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007467 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7468 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007469 bnx2x_ack_int(bp);
7470 rc = bnx2x_req_irq(bp);
7471 if (rc) {
7472 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007473 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007474 goto load_error1;
7475 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007476 if (bp->flags & USING_MSI_FLAG) {
7477 bp->dev->irq = bp->pdev->irq;
Joe Perches7995c642010-02-17 15:01:52 +00007478 netdev_info(bp->dev, "using MSI IRQ %d\n",
7479 bp->pdev->irq);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007480 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007481 }
7482
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007483 /* Send LOAD_REQUEST command to MCP
7484 Returns the type of LOAD command:
7485 if it is the first port to be initialized
7486 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007487 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007488 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007489 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7490 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007491 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007492 rc = -EBUSY;
7493 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007494 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007495 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7496 rc = -EBUSY; /* other port in diagnostic mode */
7497 goto load_error2;
7498 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007499
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007500 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007501 int port = BP_PORT(bp);
7502
Eilon Greensteinf5372252009-02-12 08:38:30 +00007503 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007504 load_count[0], load_count[1], load_count[2]);
7505 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007506 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007507 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007508 load_count[0], load_count[1], load_count[2]);
7509 if (load_count[0] == 1)
7510 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007511 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007512 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7513 else
7514 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007515 }
7516
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007517 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7518 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7519 bp->port.pmf = 1;
7520 else
7521 bp->port.pmf = 0;
7522 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7523
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007524 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007525 rc = bnx2x_init_hw(bp, load_code);
7526 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007527 BNX2X_ERR("HW init failed, aborting\n");
Vladislav Zolotarovf1e1a192010-02-17 02:03:33 +00007528 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7529 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7530 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007531 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007532 }
7533
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007534 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007535 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007536
Eilon Greenstein2691d512009-08-12 08:22:08 +00007537 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7538 (bp->common.shmem2_base))
7539 SHMEM2_WR(bp, dcc_support,
7540 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7541 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7542
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007543 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007544 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007545 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7546 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007547 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007548 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007549 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007550 }
7551 }
7552
7553 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7554
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007555 rc = bnx2x_setup_leading(bp);
7556 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007557 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007558#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007559 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007560#else
7561 bp->panic = 1;
7562 return -EBUSY;
7563#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007564 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007565
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007566 if (CHIP_IS_E1H(bp))
7567 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007568 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007569 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007570 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007571
Eilon Greensteinca003922009-08-12 22:53:28 -07007572 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007573#ifdef BCM_CNIC
7574 /* Enable Timer scan */
7575 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7576#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007577 for_each_nondefault_queue(bp, i) {
7578 rc = bnx2x_setup_multi(bp, i);
7579 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007580#ifdef BCM_CNIC
7581 goto load_error4;
7582#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007583 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007584#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007585 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007586
Eilon Greensteinca003922009-08-12 22:53:28 -07007587 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007588 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007589 else
Michael Chane665bfd2009-10-10 13:46:54 +00007590 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007591#ifdef BCM_CNIC
7592 /* Set iSCSI L2 MAC */
7593 mutex_lock(&bp->cnic_mutex);
7594 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7595 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7596 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
Michael Chan4a6e47a2009-12-25 17:13:07 -08007597 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7598 CNIC_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00007599 }
7600 mutex_unlock(&bp->cnic_mutex);
7601#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07007602 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007603
7604 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007605 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007606
7607 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007608 switch (load_mode) {
7609 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007610 if (bp->state == BNX2X_STATE_OPEN) {
7611 /* Tx queue should be only reenabled */
7612 netif_tx_wake_all_queues(bp->dev);
7613 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007614 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007615 bnx2x_set_rx_mode(bp->dev);
7616 break;
7617
7618 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007619 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007620 if (bp->state != BNX2X_STATE_OPEN)
7621 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007622 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007623 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007624 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007625
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007626 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007627 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007628 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007629 bp->state = BNX2X_STATE_DIAG;
7630 break;
7631
7632 default:
7633 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007634 }
7635
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007636 if (!bp->port.pmf)
7637 bnx2x__link_status_update(bp);
7638
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007639 /* start the timer */
7640 mod_timer(&bp->timer, jiffies + bp->current_interval);
7641
Michael Chan993ac7b2009-10-10 13:46:56 +00007642#ifdef BCM_CNIC
7643 bnx2x_setup_cnic_irq_info(bp);
7644 if (bp->state == BNX2X_STATE_OPEN)
7645 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7646#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007647
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007648 return 0;
7649
Michael Chan37b091b2009-10-10 13:46:55 +00007650#ifdef BCM_CNIC
7651load_error4:
7652 /* Disable Timer scan */
7653 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7654#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007655load_error3:
7656 bnx2x_int_disable_sync(bp, 1);
7657 if (!BP_NOMCP(bp)) {
7658 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7659 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7660 }
7661 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007662 /* Free SKBs, SGEs, TPA pool and driver internals */
7663 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007664 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007665 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007666load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007667 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007668 bnx2x_free_irq(bp, false);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007669load_error1:
7670 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007671 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007672 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007673 bnx2x_free_mem(bp);
7674
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007675 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007676}
7677
7678static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7679{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007680 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007681 int rc;
7682
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007683 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007684 fp->state = BNX2X_FP_STATE_HALTING;
7685 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007687 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007688 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007689 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007690 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007691 return rc;
7692
7693 /* delete cfc entry */
7694 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7695
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007696 /* Wait for completion */
7697 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007698 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007699 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007700}
7701
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007702static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007703{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007704 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007705 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007706 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007707 int cnt = 500;
7708 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007709
7710 might_sleep();
7711
7712 /* Send HALT ramrod */
7713 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007714 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007715
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007716 /* Wait for completion */
7717 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7718 &(bp->fp[0].state), 1);
7719 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007720 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007721
Eliezer Tamir49d66772008-02-28 11:53:13 -08007722 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007723
Eliezer Tamir228241e2008-02-28 11:56:57 -08007724 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007725 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7726
Eliezer Tamir49d66772008-02-28 11:53:13 -08007727 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007728 we are going to reset the chip anyway
7729 so there is not much to do if this times out
7730 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007731 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007732 if (!cnt) {
7733 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7734 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7735 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7736#ifdef BNX2X_STOP_ON_ERROR
7737 bnx2x_panic();
7738#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007739 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007740 break;
7741 }
7742 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007743 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007744 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007745 }
7746 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7747 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007748
7749 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007750}
7751
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007752static void bnx2x_reset_func(struct bnx2x *bp)
7753{
7754 int port = BP_PORT(bp);
7755 int func = BP_FUNC(bp);
7756 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007757
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007758 /* Configure IGU */
7759 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7760 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7761
Michael Chan37b091b2009-10-10 13:46:55 +00007762#ifdef BCM_CNIC
7763 /* Disable Timer scan */
7764 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7765 /*
7766 * Wait for at least 10ms and up to 2 second for the timers scan to
7767 * complete
7768 */
7769 for (i = 0; i < 200; i++) {
7770 msleep(10);
7771 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7772 break;
7773 }
7774#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007775 /* Clear ILT */
7776 base = FUNC_ILT_BASE(func);
7777 for (i = base; i < base + ILT_PER_FUNC; i++)
7778 bnx2x_ilt_wr(bp, i, 0);
7779}
7780
7781static void bnx2x_reset_port(struct bnx2x *bp)
7782{
7783 int port = BP_PORT(bp);
7784 u32 val;
7785
7786 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7787
7788 /* Do not rcv packets to BRB */
7789 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7790 /* Do not direct rcv packets that are not for MCP to the BRB */
7791 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7792 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7793
7794 /* Configure AEU */
7795 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7796
7797 msleep(100);
7798 /* Check for BRB port occupancy */
7799 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7800 if (val)
7801 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007802 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007803
7804 /* TODO: Close Doorbell port? */
7805}
7806
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007807static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7808{
7809 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7810 BP_FUNC(bp), reset_code);
7811
7812 switch (reset_code) {
7813 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7814 bnx2x_reset_port(bp);
7815 bnx2x_reset_func(bp);
7816 bnx2x_reset_common(bp);
7817 break;
7818
7819 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7820 bnx2x_reset_port(bp);
7821 bnx2x_reset_func(bp);
7822 break;
7823
7824 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7825 bnx2x_reset_func(bp);
7826 break;
7827
7828 default:
7829 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7830 break;
7831 }
7832}
7833
Eilon Greenstein33471622008-08-13 15:59:08 -07007834/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007835static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007836{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007837 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007838 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007839 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007840
Michael Chan993ac7b2009-10-10 13:46:56 +00007841#ifdef BCM_CNIC
7842 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7843#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007844 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7845
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007846 /* Set "drop all" */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007847 bp->rx_mode = BNX2X_RX_MODE_NONE;
7848 bnx2x_set_storm_rx_mode(bp);
7849
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007850 /* Disable HW interrupts, NAPI and Tx */
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007851 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007852
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007853 del_timer_sync(&bp->timer);
7854 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7855 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007856 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007857
Eilon Greenstein70b99862009-01-14 06:43:48 +00007858 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007859 bnx2x_free_irq(bp, false);
Eilon Greenstein70b99862009-01-14 06:43:48 +00007860
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007861 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007862 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007863 struct bnx2x_fastpath *fp = &bp->fp[i];
7864
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007865 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007866 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007867
Eilon Greenstein7961f792009-03-02 07:59:31 +00007868 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007869 if (!cnt) {
7870 BNX2X_ERR("timeout waiting for queue[%d]\n",
7871 i);
7872#ifdef BNX2X_STOP_ON_ERROR
7873 bnx2x_panic();
7874 return -EBUSY;
7875#else
7876 break;
7877#endif
7878 }
7879 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007880 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007881 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007882 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007883 /* Give HW time to discard old tx messages */
7884 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007885
Yitchak Gertner65abd742008-08-25 15:26:24 -07007886 if (CHIP_IS_E1(bp)) {
7887 struct mac_configuration_cmd *config =
7888 bnx2x_sp(bp, mcast_config);
7889
Michael Chane665bfd2009-10-10 13:46:54 +00007890 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007891
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007892 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007893 CAM_INVALIDATE(config->config_table[i]);
7894
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007895 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007896 if (CHIP_REV_IS_SLOW(bp))
7897 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7898 else
7899 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007900 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007901 config->hdr.reserved1 = 0;
7902
Michael Chane665bfd2009-10-10 13:46:54 +00007903 bp->set_mac_pending++;
7904 smp_wmb();
7905
Yitchak Gertner65abd742008-08-25 15:26:24 -07007906 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7907 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7908 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7909
7910 } else { /* E1H */
7911 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7912
Michael Chane665bfd2009-10-10 13:46:54 +00007913 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007914
7915 for (i = 0; i < MC_HASH_SIZE; i++)
7916 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007917
7918 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007919 }
Michael Chan993ac7b2009-10-10 13:46:56 +00007920#ifdef BCM_CNIC
7921 /* Clear iSCSI L2 MAC */
7922 mutex_lock(&bp->cnic_mutex);
7923 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7924 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7925 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7926 }
7927 mutex_unlock(&bp->cnic_mutex);
7928#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007929
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007930 if (unload_mode == UNLOAD_NORMAL)
7931 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007932
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007933 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007934 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007935
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007936 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007937 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007938 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007939 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007940 /* The mac address is written to entries 1-4 to
7941 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007942 u8 entry = (BP_E1HVN(bp) + 1)*8;
7943
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007944 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007945 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007946
7947 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7948 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007949 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007950
7951 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007952
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007953 } else
7954 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7955
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007956 /* Close multi and leading connections
7957 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007958 for_each_nondefault_queue(bp, i)
7959 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007960 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007961
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007962 rc = bnx2x_stop_leading(bp);
7963 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007964 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007965#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007966 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007967#else
7968 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007969#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007970 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007971
Eliezer Tamir228241e2008-02-28 11:56:57 -08007972unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007973 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007974 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007975 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007976 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007977 load_count[0], load_count[1], load_count[2]);
7978 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007979 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007980 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007981 load_count[0], load_count[1], load_count[2]);
7982 if (load_count[0] == 0)
7983 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007984 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007985 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7986 else
7987 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7988 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007989
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007990 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7991 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7992 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007993
7994 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007995 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007996
7997 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007998 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007999 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00008000
Eilon Greenstein9a035442008-11-03 16:45:55 -08008001 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008002
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008003 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008004 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008005 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008006 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008007 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008008 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008009 bnx2x_free_mem(bp);
8010
8011 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008012
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008013 netif_carrier_off(bp->dev);
8014
8015 return 0;
8016}
8017
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008018static void bnx2x_reset_task(struct work_struct *work)
8019{
8020 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8021
8022#ifdef BNX2X_STOP_ON_ERROR
8023 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8024 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07008025 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008026 return;
8027#endif
8028
8029 rtnl_lock();
8030
8031 if (!netif_running(bp->dev))
8032 goto reset_task_exit;
8033
8034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8035 bnx2x_nic_load(bp, LOAD_NORMAL);
8036
8037reset_task_exit:
8038 rtnl_unlock();
8039}
8040
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008041/* end of nic load/unload */
8042
8043/* ethtool_ops */
8044
8045/*
8046 * Init service functions
8047 */
8048
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008049static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8050{
8051 switch (func) {
8052 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8053 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8054 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8055 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8056 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8057 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8058 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8059 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8060 default:
8061 BNX2X_ERR("Unsupported function index: %d\n", func);
8062 return (u32)(-1);
8063 }
8064}
8065
8066static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8067{
8068 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8069
8070 /* Flush all outstanding writes */
8071 mmiowb();
8072
8073 /* Pretend to be function 0 */
8074 REG_WR(bp, reg, 0);
8075 /* Flush the GRC transaction (in the chip) */
8076 new_val = REG_RD(bp, reg);
8077 if (new_val != 0) {
8078 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8079 new_val);
8080 BUG();
8081 }
8082
8083 /* From now we are in the "like-E1" mode */
8084 bnx2x_int_disable(bp);
8085
8086 /* Flush all outstanding writes */
8087 mmiowb();
8088
8089 /* Restore the original funtion settings */
8090 REG_WR(bp, reg, orig_func);
8091 new_val = REG_RD(bp, reg);
8092 if (new_val != orig_func) {
8093 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8094 orig_func, new_val);
8095 BUG();
8096 }
8097}
8098
8099static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8100{
8101 if (CHIP_IS_E1H(bp))
8102 bnx2x_undi_int_disable_e1h(bp, func);
8103 else
8104 bnx2x_int_disable(bp);
8105}
8106
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008107static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008108{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008109 u32 val;
8110
8111 /* Check if there is any driver already loaded */
8112 val = REG_RD(bp, MISC_REG_UNPREPARED);
8113 if (val == 0x1) {
8114 /* Check if it is the UNDI driver
8115 * UNDI driver initializes CID offset for normal bell to 0x7
8116 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008117 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008118 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8119 if (val == 0x7) {
8120 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008121 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008122 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008123 u32 swap_en;
8124 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008125
Eilon Greensteinb4661732009-01-14 06:43:56 +00008126 /* clear the UNDI indication */
8127 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8128
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008129 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8130
8131 /* try unload UNDI on port 0 */
8132 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008133 bp->fw_seq =
8134 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8135 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008136 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008137
8138 /* if UNDI is loaded on the other port */
8139 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8140
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008141 /* send "DONE" for previous unload */
8142 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8143
8144 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008145 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008146 bp->fw_seq =
8147 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8148 DRV_MSG_SEQ_NUMBER_MASK);
8149 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008150
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008151 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008152 }
8153
Eilon Greensteinb4661732009-01-14 06:43:56 +00008154 /* now it's safe to release the lock */
8155 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8156
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008157 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008158
8159 /* close input traffic and wait for it */
8160 /* Do not rcv packets to BRB */
8161 REG_WR(bp,
8162 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8163 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8164 /* Do not direct rcv packets that are not for MCP to
8165 * the BRB */
8166 REG_WR(bp,
8167 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8168 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8169 /* clear AEU */
8170 REG_WR(bp,
8171 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8172 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8173 msleep(10);
8174
8175 /* save NIG port swap info */
8176 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8177 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008178 /* reset device */
8179 REG_WR(bp,
8180 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008181 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008182 REG_WR(bp,
8183 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8184 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008185 /* take the NIG out of reset and restore swap values */
8186 REG_WR(bp,
8187 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8188 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8189 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8190 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8191
8192 /* send unload done to the MCP */
8193 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8194
8195 /* restore our func and fw_seq */
8196 bp->func = func;
8197 bp->fw_seq =
8198 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8199 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00008200
8201 } else
8202 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008203 }
8204}
8205
8206static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8207{
8208 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008209 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008210
8211 /* Get the chip revision id and number. */
8212 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8213 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8214 id = ((val & 0xffff) << 16);
8215 val = REG_RD(bp, MISC_REG_CHIP_REV);
8216 id |= ((val & 0xf) << 12);
8217 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8218 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00008219 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008220 id |= (val & 0xf);
8221 bp->common.chip_id = id;
8222 bp->link_params.chip_id = bp->common.chip_id;
8223 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8224
Eilon Greenstein1c063282009-02-12 08:36:43 +00008225 val = (REG_RD(bp, 0x2874) & 0x55);
8226 if ((bp->common.chip_id & 0x1) ||
8227 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8228 bp->flags |= ONE_PORT_FLAG;
8229 BNX2X_DEV_INFO("single port device\n");
8230 }
8231
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008232 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8233 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8234 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8235 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8236 bp->common.flash_size, bp->common.flash_size);
8237
8238 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008239 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008240 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008241 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8242 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008243
8244 if (!bp->common.shmem_base ||
8245 (bp->common.shmem_base < 0xA0000) ||
8246 (bp->common.shmem_base >= 0xC0000)) {
8247 BNX2X_DEV_INFO("MCP not active\n");
8248 bp->flags |= NO_MCP_FLAG;
8249 return;
8250 }
8251
8252 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8253 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8255 BNX2X_ERR("BAD MCP validity signature\n");
8256
8257 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008258 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008259
8260 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8261 SHARED_HW_CFG_LED_MODE_MASK) >>
8262 SHARED_HW_CFG_LED_MODE_SHIFT);
8263
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008264 bp->link_params.feature_config_flags = 0;
8265 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8266 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8267 bp->link_params.feature_config_flags |=
8268 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8269 else
8270 bp->link_params.feature_config_flags &=
8271 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8272
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008273 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8274 bp->common.bc_ver = val;
8275 BNX2X_DEV_INFO("bc_ver %X\n", val);
8276 if (val < BNX2X_BC_VER) {
8277 /* for now only warn
8278 * later we might need to enforce this */
8279 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8280 " please upgrade BC\n", BNX2X_BC_VER, val);
8281 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008282 bp->link_params.feature_config_flags |=
8283 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8284 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008285
8286 if (BP_E1HVN(bp) == 0) {
8287 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8288 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8289 } else {
8290 /* no WOL capability for E1HVN != 0 */
8291 bp->flags |= NO_WOL_FLAG;
8292 }
8293 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008294 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008295
8296 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8297 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8298 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8299 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8300
Joe Perches7995c642010-02-17 15:01:52 +00008301 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008302}
8303
8304static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8305 u32 switch_cfg)
8306{
8307 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008308 u32 ext_phy_type;
8309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008310 switch (switch_cfg) {
8311 case SWITCH_CFG_1G:
8312 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8313
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008314 ext_phy_type =
8315 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008316 switch (ext_phy_type) {
8317 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8319 ext_phy_type);
8320
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008321 bp->port.supported |= (SUPPORTED_10baseT_Half |
8322 SUPPORTED_10baseT_Full |
8323 SUPPORTED_100baseT_Half |
8324 SUPPORTED_100baseT_Full |
8325 SUPPORTED_1000baseT_Full |
8326 SUPPORTED_2500baseX_Full |
8327 SUPPORTED_TP |
8328 SUPPORTED_FIBRE |
8329 SUPPORTED_Autoneg |
8330 SUPPORTED_Pause |
8331 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008332 break;
8333
8334 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8336 ext_phy_type);
8337
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008338 bp->port.supported |= (SUPPORTED_10baseT_Half |
8339 SUPPORTED_10baseT_Full |
8340 SUPPORTED_100baseT_Half |
8341 SUPPORTED_100baseT_Full |
8342 SUPPORTED_1000baseT_Full |
8343 SUPPORTED_TP |
8344 SUPPORTED_FIBRE |
8345 SUPPORTED_Autoneg |
8346 SUPPORTED_Pause |
8347 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008348 break;
8349
8350 default:
8351 BNX2X_ERR("NVRAM config error. "
8352 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008353 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008354 return;
8355 }
8356
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008357 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8358 port*0x10);
8359 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008360 break;
8361
8362 case SWITCH_CFG_10G:
8363 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8364
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008365 ext_phy_type =
8366 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008367 switch (ext_phy_type) {
8368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8370 ext_phy_type);
8371
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008372 bp->port.supported |= (SUPPORTED_10baseT_Half |
8373 SUPPORTED_10baseT_Full |
8374 SUPPORTED_100baseT_Half |
8375 SUPPORTED_100baseT_Full |
8376 SUPPORTED_1000baseT_Full |
8377 SUPPORTED_2500baseX_Full |
8378 SUPPORTED_10000baseT_Full |
8379 SUPPORTED_TP |
8380 SUPPORTED_FIBRE |
8381 SUPPORTED_Autoneg |
8382 SUPPORTED_Pause |
8383 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008384 break;
8385
Eliezer Tamirf1410642008-02-28 11:51:50 -08008386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8388 ext_phy_type);
8389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008390 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391 SUPPORTED_1000baseT_Full |
8392 SUPPORTED_FIBRE |
8393 SUPPORTED_Autoneg |
8394 SUPPORTED_Pause |
8395 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008396 break;
8397
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8400 ext_phy_type);
8401
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008402 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403 SUPPORTED_2500baseX_Full |
8404 SUPPORTED_1000baseT_Full |
8405 SUPPORTED_FIBRE |
8406 SUPPORTED_Autoneg |
8407 SUPPORTED_Pause |
8408 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008409 break;
8410
Eilon Greenstein589abe32009-02-12 08:36:55 +00008411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8413 ext_phy_type);
8414
8415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416 SUPPORTED_FIBRE |
8417 SUPPORTED_Pause |
8418 SUPPORTED_Asym_Pause);
8419 break;
8420
8421 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8423 ext_phy_type);
8424
8425 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426 SUPPORTED_1000baseT_Full |
8427 SUPPORTED_FIBRE |
8428 SUPPORTED_Pause |
8429 SUPPORTED_Asym_Pause);
8430 break;
8431
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8434 ext_phy_type);
8435
8436 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437 SUPPORTED_1000baseT_Full |
8438 SUPPORTED_Autoneg |
8439 SUPPORTED_FIBRE |
8440 SUPPORTED_Pause |
8441 SUPPORTED_Asym_Pause);
8442 break;
8443
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8446 ext_phy_type);
8447
8448 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449 SUPPORTED_1000baseT_Full |
8450 SUPPORTED_Autoneg |
8451 SUPPORTED_FIBRE |
8452 SUPPORTED_Pause |
8453 SUPPORTED_Asym_Pause);
8454 break;
8455
Eliezer Tamirf1410642008-02-28 11:51:50 -08008456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8458 ext_phy_type);
8459
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461 SUPPORTED_TP |
8462 SUPPORTED_Autoneg |
8463 SUPPORTED_Pause |
8464 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008465 break;
8466
Eilon Greenstein28577182009-02-12 08:37:00 +00008467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8469 ext_phy_type);
8470
8471 bp->port.supported |= (SUPPORTED_10baseT_Half |
8472 SUPPORTED_10baseT_Full |
8473 SUPPORTED_100baseT_Half |
8474 SUPPORTED_100baseT_Full |
8475 SUPPORTED_1000baseT_Full |
8476 SUPPORTED_10000baseT_Full |
8477 SUPPORTED_TP |
8478 SUPPORTED_Autoneg |
8479 SUPPORTED_Pause |
8480 SUPPORTED_Asym_Pause);
8481 break;
8482
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485 bp->link_params.ext_phy_config);
8486 break;
8487
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008488 default:
8489 BNX2X_ERR("NVRAM config error. "
8490 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008491 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008492 return;
8493 }
8494
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008495 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8496 port*0x18);
8497 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008498
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008499 break;
8500
8501 default:
8502 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008503 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008504 return;
8505 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008506 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008507
8508 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008509 if (!(bp->link_params.speed_cap_mask &
8510 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008511 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008512
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008513 if (!(bp->link_params.speed_cap_mask &
8514 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008515 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008516
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008517 if (!(bp->link_params.speed_cap_mask &
8518 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008519 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008520
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008521 if (!(bp->link_params.speed_cap_mask &
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008523 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008524
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008525 if (!(bp->link_params.speed_cap_mask &
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008527 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008529
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008532 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008533
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008536 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008538 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008539}
8540
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008541static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008542{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008543 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008544
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008545 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008546 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008547 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008548 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008549 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008550 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008551 u32 ext_phy_type =
8552 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8553
8554 if ((ext_phy_type ==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8556 (ext_phy_type ==
8557 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008558 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008559 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008560 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008561 (ADVERTISED_10000baseT_Full |
8562 ADVERTISED_FIBRE);
8563 break;
8564 }
8565 BNX2X_ERR("NVRAM config error. "
8566 "Invalid link_config 0x%x"
8567 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008568 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008569 return;
8570 }
8571 break;
8572
8573 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008574 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008575 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008576 bp->port.advertising = (ADVERTISED_10baseT_Full |
8577 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008578 } else {
8579 BNX2X_ERR("NVRAM config error. "
8580 "Invalid link_config 0x%x"
8581 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008582 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008583 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008584 return;
8585 }
8586 break;
8587
8588 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008589 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008590 bp->link_params.req_line_speed = SPEED_10;
8591 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008592 bp->port.advertising = (ADVERTISED_10baseT_Half |
8593 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008594 } else {
8595 BNX2X_ERR("NVRAM config error. "
8596 "Invalid link_config 0x%x"
8597 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008598 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008599 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008600 return;
8601 }
8602 break;
8603
8604 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008605 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008606 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008607 bp->port.advertising = (ADVERTISED_100baseT_Full |
8608 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008609 } else {
8610 BNX2X_ERR("NVRAM config error. "
8611 "Invalid link_config 0x%x"
8612 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008613 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008614 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008615 return;
8616 }
8617 break;
8618
8619 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008620 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008621 bp->link_params.req_line_speed = SPEED_100;
8622 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008623 bp->port.advertising = (ADVERTISED_100baseT_Half |
8624 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008625 } else {
8626 BNX2X_ERR("NVRAM config error. "
8627 "Invalid link_config 0x%x"
8628 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008629 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008630 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008631 return;
8632 }
8633 break;
8634
8635 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008636 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008637 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008638 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8639 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008640 } else {
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008644 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008645 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008646 return;
8647 }
8648 break;
8649
8650 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008651 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008652 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008653 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8654 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008655 } else {
8656 BNX2X_ERR("NVRAM config error. "
8657 "Invalid link_config 0x%x"
8658 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008659 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008660 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008661 return;
8662 }
8663 break;
8664
8665 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008668 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008669 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008670 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8671 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008672 } else {
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008676 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008677 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008678 return;
8679 }
8680 break;
8681
8682 default:
8683 BNX2X_ERR("NVRAM config error. "
8684 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008685 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008686 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008687 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008688 break;
8689 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008691 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008693 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008694 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008695 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008696
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008697 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008698 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008699 bp->link_params.req_line_speed,
8700 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008701 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008702}
8703
Michael Chane665bfd2009-10-10 13:46:54 +00008704static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8705{
8706 mac_hi = cpu_to_be16(mac_hi);
8707 mac_lo = cpu_to_be32(mac_lo);
8708 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8710}
8711
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008712static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008713{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008714 int port = BP_PORT(bp);
8715 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008716 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008717 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008718 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008719
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008720 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008721 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008722
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008723 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008724 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008725 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008726 SHMEM_RD(bp,
8727 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008728 /* BCM8727_NOC => BCM8727 no over current */
8729 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731 bp->link_params.ext_phy_config &=
8732 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733 bp->link_params.ext_phy_config |=
8734 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735 bp->link_params.feature_config_flags |=
8736 FEATURE_CONFIG_BCM8727_NOC;
8737 }
8738
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008739 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008740 SHMEM_RD(bp,
8741 dev_info.port_hw_config[port].speed_capability_mask);
8742
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008743 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008744 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8745
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008746 /* Get the 4 lanes xgxs config rx and tx */
8747 for (i = 0; i < 2; i++) {
8748 val = SHMEM_RD(bp,
8749 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8752
8753 val = SHMEM_RD(bp,
8754 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8757 }
8758
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008759 /* If the device is capable of WoL, set the default state according
8760 * to the HW
8761 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008762 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008763 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764 (config & PORT_FEATURE_WOL_ENABLED));
8765
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008766 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8767 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008768 bp->link_params.lane_config,
8769 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008770 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008771
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008772 bp->link_params.switch_cfg |= (bp->port.link_config &
8773 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008774 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008775
8776 bnx2x_link_settings_requested(bp);
8777
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008778 /*
8779 * If connected directly, work with the internal PHY, otherwise, work
8780 * with the external PHY
8781 */
8782 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784 bp->mdio.prtad = bp->link_params.phy_addr;
8785
8786 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8788 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00008789 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008790
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008791 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008793 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008794 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008796
8797#ifdef BCM_CNIC
8798 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8801#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008802}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008803
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008804static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8805{
8806 int func = BP_FUNC(bp);
8807 u32 val, val2;
8808 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008809
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008810 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008811
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008812 bp->e1hov = 0;
8813 bp->e1hmf = 0;
8814 if (CHIP_IS_E1H(bp)) {
8815 bp->mf_config =
8816 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008817
Eilon Greenstein2691d512009-08-12 08:22:08 +00008818 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008819 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008821 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008822 BNX2X_DEV_INFO("%s function mode\n",
8823 IS_E1HMF(bp) ? "multi" : "single");
8824
8825 if (IS_E1HMF(bp)) {
8826 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8827 e1hov_tag) &
8828 FUNC_MF_CFG_E1HOV_TAG_MASK);
8829 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8830 bp->e1hov = val;
8831 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8832 "(0x%04x)\n",
8833 func, bp->e1hov, bp->e1hov);
8834 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008835 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8836 " aborting\n", func);
8837 rc = -EPERM;
8838 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008839 } else {
8840 if (BP_E1HVN(bp)) {
8841 BNX2X_ERR("!!! VN %d in single function mode,"
8842 " aborting\n", BP_E1HVN(bp));
8843 rc = -EPERM;
8844 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008845 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008846 }
8847
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008848 if (!BP_NOMCP(bp)) {
8849 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008851 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852 DRV_MSG_SEQ_NUMBER_MASK);
8853 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8854 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008855
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008856 if (IS_E1HMF(bp)) {
8857 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8859 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8866 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8868 ETH_ALEN);
8869 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8870 ETH_ALEN);
8871 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008872
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008873 return rc;
8874 }
8875
8876 if (BP_NOMCP(bp)) {
8877 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008878 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008879 random_ether_addr(bp->dev->dev_addr);
8880 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8881 }
8882
8883 return rc;
8884}
8885
8886static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8887{
8888 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008889 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008890 int rc;
8891
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008892 /* Disable interrupt handling until HW is initialized */
8893 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008894 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008895
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008896 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008897 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00008898#ifdef BCM_CNIC
8899 mutex_init(&bp->cnic_mutex);
8900#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008901
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008902 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008903 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8904
8905 rc = bnx2x_get_hwinfo(bp);
8906
8907 /* need to reset chip if undi was active */
8908 if (!BP_NOMCP(bp))
8909 bnx2x_undi_unload(bp);
8910
8911 if (CHIP_REV_IS_FPGA(bp))
Joe Perches7995c642010-02-17 15:01:52 +00008912 pr_err("FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008913
8914 if (BP_NOMCP(bp) && (func == 0))
Joe Perches7995c642010-02-17 15:01:52 +00008915 pr_err("MCP disabled, must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008916
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008917 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008918 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8919 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Joe Perches7995c642010-02-17 15:01:52 +00008920 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008921 multi_mode = ETH_RSS_MODE_DISABLED;
8922 }
8923 bp->multi_mode = multi_mode;
8924
8925
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008926 /* Set TPA flags */
8927 if (disable_tpa) {
8928 bp->flags &= ~TPA_ENABLE_FLAG;
8929 bp->dev->features &= ~NETIF_F_LRO;
8930 } else {
8931 bp->flags |= TPA_ENABLE_FLAG;
8932 bp->dev->features |= NETIF_F_LRO;
8933 }
8934
Eilon Greensteina18f5122009-08-12 08:23:26 +00008935 if (CHIP_IS_E1(bp))
8936 bp->dropless_fc = 0;
8937 else
8938 bp->dropless_fc = dropless_fc;
8939
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008940 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008941
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008942 bp->tx_ring_size = MAX_TX_AVAIL;
8943 bp->rx_ring_size = MAX_RX_AVAIL;
8944
8945 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008946
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008947 /* make sure that the numbers are in the right granularity */
8948 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8949 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008950
Eilon Greenstein87942b42009-02-12 08:36:49 +00008951 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8952 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008953
8954 init_timer(&bp->timer);
8955 bp->timer.expires = jiffies + bp->current_interval;
8956 bp->timer.data = (unsigned long) bp;
8957 bp->timer.function = bnx2x_timer;
8958
8959 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008960}
8961
8962/*
8963 * ethtool service functions
8964 */
8965
8966/* All ethtool functions called with rtnl_lock */
8967
8968static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8969{
8970 struct bnx2x *bp = netdev_priv(dev);
8971
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008972 cmd->supported = bp->port.supported;
8973 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008974
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07008975 if ((bp->state == BNX2X_STATE_OPEN) &&
8976 !(bp->flags & MF_FUNC_DIS) &&
8977 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008978 cmd->speed = bp->link_vars.line_speed;
8979 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008980 if (IS_E1HMF(bp)) {
8981 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008982
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008983 vn_max_rate =
8984 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008985 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008986 if (vn_max_rate < cmd->speed)
8987 cmd->speed = vn_max_rate;
8988 }
8989 } else {
8990 cmd->speed = -1;
8991 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008992 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008993
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008994 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8995 u32 ext_phy_type =
8996 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008997
8998 switch (ext_phy_type) {
8999 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009000 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009006 cmd->port = PORT_FIBRE;
9007 break;
9008
9009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009011 cmd->port = PORT_TP;
9012 break;
9013
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9015 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9016 bp->link_params.ext_phy_config);
9017 break;
9018
Eliezer Tamirf1410642008-02-28 11:51:50 -08009019 default:
9020 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009021 bp->link_params.ext_phy_config);
9022 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009023 }
9024 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009025 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009026
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009027 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009028 cmd->transceiver = XCVR_INTERNAL;
9029
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009030 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009031 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009032 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009034
9035 cmd->maxtxpkt = 0;
9036 cmd->maxrxpkt = 0;
9037
9038 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9039 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9040 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9041 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9042 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9043 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9044 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9045
9046 return 0;
9047}
9048
9049static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9050{
9051 struct bnx2x *bp = netdev_priv(dev);
9052 u32 advertising;
9053
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009054 if (IS_E1HMF(bp))
9055 return 0;
9056
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009057 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9058 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9059 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9060 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9061 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9062 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9063 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9064
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009065 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009066 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9067 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009068 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009069 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070
9071 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009072 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009073
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009074 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9075 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009076 bp->port.advertising |= (ADVERTISED_Autoneg |
9077 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009078
9079 } else { /* forced speed */
9080 /* advertise the requested speed and duplex if supported */
9081 switch (cmd->speed) {
9082 case SPEED_10:
9083 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009084 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009085 SUPPORTED_10baseT_Full)) {
9086 DP(NETIF_MSG_LINK,
9087 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009088 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009089 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009090
9091 advertising = (ADVERTISED_10baseT_Full |
9092 ADVERTISED_TP);
9093 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009094 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009095 SUPPORTED_10baseT_Half)) {
9096 DP(NETIF_MSG_LINK,
9097 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009098 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009099 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009100
9101 advertising = (ADVERTISED_10baseT_Half |
9102 ADVERTISED_TP);
9103 }
9104 break;
9105
9106 case SPEED_100:
9107 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009108 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009109 SUPPORTED_100baseT_Full)) {
9110 DP(NETIF_MSG_LINK,
9111 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009112 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009113 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009114
9115 advertising = (ADVERTISED_100baseT_Full |
9116 ADVERTISED_TP);
9117 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009118 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009119 SUPPORTED_100baseT_Half)) {
9120 DP(NETIF_MSG_LINK,
9121 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009122 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009123 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009124
9125 advertising = (ADVERTISED_100baseT_Half |
9126 ADVERTISED_TP);
9127 }
9128 break;
9129
9130 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009131 if (cmd->duplex != DUPLEX_FULL) {
9132 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009133 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009134 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009135
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009136 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009137 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009139 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009140
9141 advertising = (ADVERTISED_1000baseT_Full |
9142 ADVERTISED_TP);
9143 break;
9144
9145 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009146 if (cmd->duplex != DUPLEX_FULL) {
9147 DP(NETIF_MSG_LINK,
9148 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009149 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009150 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009151
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009152 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009153 DP(NETIF_MSG_LINK,
9154 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009155 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009156 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009157
Eliezer Tamirf1410642008-02-28 11:51:50 -08009158 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009159 ADVERTISED_TP);
9160 break;
9161
9162 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009163 if (cmd->duplex != DUPLEX_FULL) {
9164 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009165 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009166 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009167
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009168 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009169 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009170 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009171 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009172
9173 advertising = (ADVERTISED_10000baseT_Full |
9174 ADVERTISED_FIBRE);
9175 break;
9176
9177 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009178 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009179 return -EINVAL;
9180 }
9181
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009182 bp->link_params.req_line_speed = cmd->speed;
9183 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009184 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009185 }
9186
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009187 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009188 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009189 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009190 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009191
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009192 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009193 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009194 bnx2x_link_set(bp);
9195 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009196
9197 return 0;
9198}
9199
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009200#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9201#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9202
9203static int bnx2x_get_regs_len(struct net_device *dev)
9204{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009205 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009206 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009207 int i;
9208
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009209 if (CHIP_IS_E1(bp)) {
9210 for (i = 0; i < REGS_COUNT; i++)
9211 if (IS_E1_ONLINE(reg_addrs[i].info))
9212 regdump_len += reg_addrs[i].size;
9213
9214 for (i = 0; i < WREGS_COUNT_E1; i++)
9215 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9216 regdump_len += wreg_addrs_e1[i].size *
9217 (1 + wreg_addrs_e1[i].read_regs_count);
9218
9219 } else { /* E1H */
9220 for (i = 0; i < REGS_COUNT; i++)
9221 if (IS_E1H_ONLINE(reg_addrs[i].info))
9222 regdump_len += reg_addrs[i].size;
9223
9224 for (i = 0; i < WREGS_COUNT_E1H; i++)
9225 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9226 regdump_len += wreg_addrs_e1h[i].size *
9227 (1 + wreg_addrs_e1h[i].read_regs_count);
9228 }
9229 regdump_len *= 4;
9230 regdump_len += sizeof(struct dump_hdr);
9231
9232 return regdump_len;
9233}
9234
9235static void bnx2x_get_regs(struct net_device *dev,
9236 struct ethtool_regs *regs, void *_p)
9237{
9238 u32 *p = _p, i, j;
9239 struct bnx2x *bp = netdev_priv(dev);
9240 struct dump_hdr dump_hdr = {0};
9241
9242 regs->version = 0;
9243 memset(p, 0, regs->len);
9244
9245 if (!netif_running(bp->dev))
9246 return;
9247
9248 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9249 dump_hdr.dump_sign = dump_sign_all;
9250 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9251 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9252 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9253 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9254 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9255
9256 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9257 p += dump_hdr.hdr_size + 1;
9258
9259 if (CHIP_IS_E1(bp)) {
9260 for (i = 0; i < REGS_COUNT; i++)
9261 if (IS_E1_ONLINE(reg_addrs[i].info))
9262 for (j = 0; j < reg_addrs[i].size; j++)
9263 *p++ = REG_RD(bp,
9264 reg_addrs[i].addr + j*4);
9265
9266 } else { /* E1H */
9267 for (i = 0; i < REGS_COUNT; i++)
9268 if (IS_E1H_ONLINE(reg_addrs[i].info))
9269 for (j = 0; j < reg_addrs[i].size; j++)
9270 *p++ = REG_RD(bp,
9271 reg_addrs[i].addr + j*4);
9272 }
9273}
9274
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009275#define PHY_FW_VER_LEN 10
9276
9277static void bnx2x_get_drvinfo(struct net_device *dev,
9278 struct ethtool_drvinfo *info)
9279{
9280 struct bnx2x *bp = netdev_priv(dev);
9281 u8 phy_fw_ver[PHY_FW_VER_LEN];
9282
9283 strcpy(info->driver, DRV_MODULE_NAME);
9284 strcpy(info->version, DRV_MODULE_VERSION);
9285
9286 phy_fw_ver[0] = '\0';
9287 if (bp->port.pmf) {
9288 bnx2x_acquire_phy_lock(bp);
9289 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9290 (bp->state != BNX2X_STATE_CLOSED),
9291 phy_fw_ver, PHY_FW_VER_LEN);
9292 bnx2x_release_phy_lock(bp);
9293 }
9294
9295 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9296 (bp->common.bc_ver & 0xff0000) >> 16,
9297 (bp->common.bc_ver & 0xff00) >> 8,
9298 (bp->common.bc_ver & 0xff),
9299 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9300 strcpy(info->bus_info, pci_name(bp->pdev));
9301 info->n_stats = BNX2X_NUM_STATS;
9302 info->testinfo_len = BNX2X_NUM_TESTS;
9303 info->eedump_len = bp->common.flash_size;
9304 info->regdump_len = bnx2x_get_regs_len(dev);
9305}
9306
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009307static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9308{
9309 struct bnx2x *bp = netdev_priv(dev);
9310
9311 if (bp->flags & NO_WOL_FLAG) {
9312 wol->supported = 0;
9313 wol->wolopts = 0;
9314 } else {
9315 wol->supported = WAKE_MAGIC;
9316 if (bp->wol)
9317 wol->wolopts = WAKE_MAGIC;
9318 else
9319 wol->wolopts = 0;
9320 }
9321 memset(&wol->sopass, 0, sizeof(wol->sopass));
9322}
9323
9324static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9325{
9326 struct bnx2x *bp = netdev_priv(dev);
9327
9328 if (wol->wolopts & ~WAKE_MAGIC)
9329 return -EINVAL;
9330
9331 if (wol->wolopts & WAKE_MAGIC) {
9332 if (bp->flags & NO_WOL_FLAG)
9333 return -EINVAL;
9334
9335 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009336 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009337 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009338
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009339 return 0;
9340}
9341
9342static u32 bnx2x_get_msglevel(struct net_device *dev)
9343{
9344 struct bnx2x *bp = netdev_priv(dev);
9345
Joe Perches7995c642010-02-17 15:01:52 +00009346 return bp->msg_enable;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009347}
9348
9349static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9350{
9351 struct bnx2x *bp = netdev_priv(dev);
9352
9353 if (capable(CAP_NET_ADMIN))
Joe Perches7995c642010-02-17 15:01:52 +00009354 bp->msg_enable = level;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009355}
9356
9357static int bnx2x_nway_reset(struct net_device *dev)
9358{
9359 struct bnx2x *bp = netdev_priv(dev);
9360
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009361 if (!bp->port.pmf)
9362 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009363
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009364 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009365 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009366 bnx2x_link_set(bp);
9367 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009368
9369 return 0;
9370}
9371
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009372static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009373{
9374 struct bnx2x *bp = netdev_priv(dev);
9375
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009376 if (bp->flags & MF_FUNC_DIS)
9377 return 0;
9378
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009379 return bp->link_vars.link_up;
9380}
9381
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009382static int bnx2x_get_eeprom_len(struct net_device *dev)
9383{
9384 struct bnx2x *bp = netdev_priv(dev);
9385
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009386 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009387}
9388
9389static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9390{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009391 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009392 int count, i;
9393 u32 val = 0;
9394
9395 /* adjust timeout for emulation/FPGA */
9396 count = NVRAM_TIMEOUT_COUNT;
9397 if (CHIP_REV_IS_SLOW(bp))
9398 count *= 100;
9399
9400 /* request access to nvram interface */
9401 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9402 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9403
9404 for (i = 0; i < count*10; i++) {
9405 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9406 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9407 break;
9408
9409 udelay(5);
9410 }
9411
9412 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009413 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009414 return -EBUSY;
9415 }
9416
9417 return 0;
9418}
9419
9420static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9421{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009422 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009423 int count, i;
9424 u32 val = 0;
9425
9426 /* adjust timeout for emulation/FPGA */
9427 count = NVRAM_TIMEOUT_COUNT;
9428 if (CHIP_REV_IS_SLOW(bp))
9429 count *= 100;
9430
9431 /* relinquish nvram interface */
9432 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9433 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9434
9435 for (i = 0; i < count*10; i++) {
9436 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9437 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9438 break;
9439
9440 udelay(5);
9441 }
9442
9443 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009444 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009445 return -EBUSY;
9446 }
9447
9448 return 0;
9449}
9450
9451static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9452{
9453 u32 val;
9454
9455 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9456
9457 /* enable both bits, even on read */
9458 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9459 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9460 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9461}
9462
9463static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9464{
9465 u32 val;
9466
9467 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9468
9469 /* disable both bits, even after read */
9470 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9471 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9472 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9473}
9474
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009475static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009476 u32 cmd_flags)
9477{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009478 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009479 u32 val;
9480
9481 /* build the command word */
9482 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9483
9484 /* need to clear DONE bit separately */
9485 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9486
9487 /* address of the NVRAM to read from */
9488 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9489 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9490
9491 /* issue a read command */
9492 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9493
9494 /* adjust timeout for emulation/FPGA */
9495 count = NVRAM_TIMEOUT_COUNT;
9496 if (CHIP_REV_IS_SLOW(bp))
9497 count *= 100;
9498
9499 /* wait for completion */
9500 *ret_val = 0;
9501 rc = -EBUSY;
9502 for (i = 0; i < count; i++) {
9503 udelay(5);
9504 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9505
9506 if (val & MCPR_NVM_COMMAND_DONE) {
9507 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009508 /* we read nvram data in cpu order
9509 * but ethtool sees it as an array of bytes
9510 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009511 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009512 rc = 0;
9513 break;
9514 }
9515 }
9516
9517 return rc;
9518}
9519
9520static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9521 int buf_size)
9522{
9523 int rc;
9524 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009525 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009526
9527 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009528 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009529 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009530 offset, buf_size);
9531 return -EINVAL;
9532 }
9533
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009534 if (offset + buf_size > bp->common.flash_size) {
9535 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009536 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009537 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009538 return -EINVAL;
9539 }
9540
9541 /* request access to nvram interface */
9542 rc = bnx2x_acquire_nvram_lock(bp);
9543 if (rc)
9544 return rc;
9545
9546 /* enable access to nvram interface */
9547 bnx2x_enable_nvram_access(bp);
9548
9549 /* read the first word(s) */
9550 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9551 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9552 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9553 memcpy(ret_buf, &val, 4);
9554
9555 /* advance to the next dword */
9556 offset += sizeof(u32);
9557 ret_buf += sizeof(u32);
9558 buf_size -= sizeof(u32);
9559 cmd_flags = 0;
9560 }
9561
9562 if (rc == 0) {
9563 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9564 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9565 memcpy(ret_buf, &val, 4);
9566 }
9567
9568 /* disable access to nvram interface */
9569 bnx2x_disable_nvram_access(bp);
9570 bnx2x_release_nvram_lock(bp);
9571
9572 return rc;
9573}
9574
9575static int bnx2x_get_eeprom(struct net_device *dev,
9576 struct ethtool_eeprom *eeprom, u8 *eebuf)
9577{
9578 struct bnx2x *bp = netdev_priv(dev);
9579 int rc;
9580
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009581 if (!netif_running(dev))
9582 return -EAGAIN;
9583
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009584 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009585 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9586 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9587 eeprom->len, eeprom->len);
9588
9589 /* parameters already validated in ethtool_get_eeprom */
9590
9591 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9592
9593 return rc;
9594}
9595
9596static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9597 u32 cmd_flags)
9598{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009599 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009600
9601 /* build the command word */
9602 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9603
9604 /* need to clear DONE bit separately */
9605 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9606
9607 /* write the data */
9608 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9609
9610 /* address of the NVRAM to write to */
9611 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9612 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9613
9614 /* issue the write command */
9615 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9616
9617 /* adjust timeout for emulation/FPGA */
9618 count = NVRAM_TIMEOUT_COUNT;
9619 if (CHIP_REV_IS_SLOW(bp))
9620 count *= 100;
9621
9622 /* wait for completion */
9623 rc = -EBUSY;
9624 for (i = 0; i < count; i++) {
9625 udelay(5);
9626 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9627 if (val & MCPR_NVM_COMMAND_DONE) {
9628 rc = 0;
9629 break;
9630 }
9631 }
9632
9633 return rc;
9634}
9635
Eliezer Tamirf1410642008-02-28 11:51:50 -08009636#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009637
9638static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9639 int buf_size)
9640{
9641 int rc;
9642 u32 cmd_flags;
9643 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009644 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009645
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009646 if (offset + buf_size > bp->common.flash_size) {
9647 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009648 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009649 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009650 return -EINVAL;
9651 }
9652
9653 /* request access to nvram interface */
9654 rc = bnx2x_acquire_nvram_lock(bp);
9655 if (rc)
9656 return rc;
9657
9658 /* enable access to nvram interface */
9659 bnx2x_enable_nvram_access(bp);
9660
9661 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9662 align_offset = (offset & ~0x03);
9663 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9664
9665 if (rc == 0) {
9666 val &= ~(0xff << BYTE_OFFSET(offset));
9667 val |= (*data_buf << BYTE_OFFSET(offset));
9668
9669 /* nvram data is returned as an array of bytes
9670 * convert it back to cpu order */
9671 val = be32_to_cpu(val);
9672
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009673 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9674 cmd_flags);
9675 }
9676
9677 /* disable access to nvram interface */
9678 bnx2x_disable_nvram_access(bp);
9679 bnx2x_release_nvram_lock(bp);
9680
9681 return rc;
9682}
9683
9684static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9685 int buf_size)
9686{
9687 int rc;
9688 u32 cmd_flags;
9689 u32 val;
9690 u32 written_so_far;
9691
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009692 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009693 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009694
9695 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009696 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009697 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009698 offset, buf_size);
9699 return -EINVAL;
9700 }
9701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009702 if (offset + buf_size > bp->common.flash_size) {
9703 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009704 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009705 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009706 return -EINVAL;
9707 }
9708
9709 /* request access to nvram interface */
9710 rc = bnx2x_acquire_nvram_lock(bp);
9711 if (rc)
9712 return rc;
9713
9714 /* enable access to nvram interface */
9715 bnx2x_enable_nvram_access(bp);
9716
9717 written_so_far = 0;
9718 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9719 while ((written_so_far < buf_size) && (rc == 0)) {
9720 if (written_so_far == (buf_size - sizeof(u32)))
9721 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9722 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9723 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9725 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9726
9727 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009728
9729 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9730
9731 /* advance to the next dword */
9732 offset += sizeof(u32);
9733 data_buf += sizeof(u32);
9734 written_so_far += sizeof(u32);
9735 cmd_flags = 0;
9736 }
9737
9738 /* disable access to nvram interface */
9739 bnx2x_disable_nvram_access(bp);
9740 bnx2x_release_nvram_lock(bp);
9741
9742 return rc;
9743}
9744
9745static int bnx2x_set_eeprom(struct net_device *dev,
9746 struct ethtool_eeprom *eeprom, u8 *eebuf)
9747{
9748 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009749 int port = BP_PORT(bp);
9750 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009751
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009752 if (!netif_running(dev))
9753 return -EAGAIN;
9754
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009755 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009756 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9757 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9758 eeprom->len, eeprom->len);
9759
9760 /* parameters already validated in ethtool_set_eeprom */
9761
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009762 /* PHY eeprom can be accessed only by the PMF */
9763 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9764 !bp->port.pmf)
9765 return -EINVAL;
9766
9767 if (eeprom->magic == 0x50485950) {
9768 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9769 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9770
9771 bnx2x_acquire_phy_lock(bp);
9772 rc |= bnx2x_link_reset(&bp->link_params,
9773 &bp->link_vars, 0);
9774 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9776 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9777 MISC_REGISTERS_GPIO_HIGH, port);
9778 bnx2x_release_phy_lock(bp);
9779 bnx2x_link_report(bp);
9780
9781 } else if (eeprom->magic == 0x50485952) {
9782 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009783 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009784 bnx2x_acquire_phy_lock(bp);
9785 rc |= bnx2x_link_reset(&bp->link_params,
9786 &bp->link_vars, 1);
9787
9788 rc |= bnx2x_phy_init(&bp->link_params,
9789 &bp->link_vars);
9790 bnx2x_release_phy_lock(bp);
9791 bnx2x_calc_fc_adv(bp);
9792 }
9793 } else if (eeprom->magic == 0x53985943) {
9794 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9795 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9796 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9797 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009798 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009799
9800 /* DSP Remove Download Mode */
9801 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9802 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009803
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009804 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009805
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009806 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9807
9808 /* wait 0.5 sec to allow it to run */
9809 msleep(500);
9810 bnx2x_ext_phy_hw_reset(bp, port);
9811 msleep(500);
9812 bnx2x_release_phy_lock(bp);
9813 }
9814 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009815 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009816
9817 return rc;
9818}
9819
9820static int bnx2x_get_coalesce(struct net_device *dev,
9821 struct ethtool_coalesce *coal)
9822{
9823 struct bnx2x *bp = netdev_priv(dev);
9824
9825 memset(coal, 0, sizeof(struct ethtool_coalesce));
9826
9827 coal->rx_coalesce_usecs = bp->rx_ticks;
9828 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009829
9830 return 0;
9831}
9832
Eilon Greensteinca003922009-08-12 22:53:28 -07009833#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009834static int bnx2x_set_coalesce(struct net_device *dev,
9835 struct ethtool_coalesce *coal)
9836{
9837 struct bnx2x *bp = netdev_priv(dev);
9838
9839 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009840 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9841 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009842
9843 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009844 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9845 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009846
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009847 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009848 bnx2x_update_coalesce(bp);
9849
9850 return 0;
9851}
9852
9853static void bnx2x_get_ringparam(struct net_device *dev,
9854 struct ethtool_ringparam *ering)
9855{
9856 struct bnx2x *bp = netdev_priv(dev);
9857
9858 ering->rx_max_pending = MAX_RX_AVAIL;
9859 ering->rx_mini_max_pending = 0;
9860 ering->rx_jumbo_max_pending = 0;
9861
9862 ering->rx_pending = bp->rx_ring_size;
9863 ering->rx_mini_pending = 0;
9864 ering->rx_jumbo_pending = 0;
9865
9866 ering->tx_max_pending = MAX_TX_AVAIL;
9867 ering->tx_pending = bp->tx_ring_size;
9868}
9869
9870static int bnx2x_set_ringparam(struct net_device *dev,
9871 struct ethtool_ringparam *ering)
9872{
9873 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009874 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009875
9876 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9877 (ering->tx_pending > MAX_TX_AVAIL) ||
9878 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9879 return -EINVAL;
9880
9881 bp->rx_ring_size = ering->rx_pending;
9882 bp->tx_ring_size = ering->tx_pending;
9883
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009884 if (netif_running(dev)) {
9885 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9886 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009887 }
9888
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009889 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009890}
9891
9892static void bnx2x_get_pauseparam(struct net_device *dev,
9893 struct ethtool_pauseparam *epause)
9894{
9895 struct bnx2x *bp = netdev_priv(dev);
9896
Eilon Greenstein356e2382009-02-12 08:38:32 +00009897 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9898 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009899 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9900
David S. Millerc0700f92008-12-16 23:53:20 -08009901 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9902 BNX2X_FLOW_CTRL_RX);
9903 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9904 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009905
9906 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9907 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9908 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9909}
9910
9911static int bnx2x_set_pauseparam(struct net_device *dev,
9912 struct ethtool_pauseparam *epause)
9913{
9914 struct bnx2x *bp = netdev_priv(dev);
9915
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009916 if (IS_E1HMF(bp))
9917 return 0;
9918
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009919 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9920 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9921 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9922
David S. Millerc0700f92008-12-16 23:53:20 -08009923 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009924
9925 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009926 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009927
9928 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009929 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009930
David S. Millerc0700f92008-12-16 23:53:20 -08009931 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9932 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009933
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009934 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009935 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009936 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009937 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009938 }
9939
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009940 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009941 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009942 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009943
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009944 DP(NETIF_MSG_LINK,
9945 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009946
9947 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009949 bnx2x_link_set(bp);
9950 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009951
9952 return 0;
9953}
9954
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009955static int bnx2x_set_flags(struct net_device *dev, u32 data)
9956{
9957 struct bnx2x *bp = netdev_priv(dev);
9958 int changed = 0;
9959 int rc = 0;
9960
9961 /* TPA requires Rx CSUM offloading */
9962 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
Vladislav Zolotarovd43a7e62010-02-17 02:03:40 +00009963 if (!disable_tpa) {
9964 if (!(dev->features & NETIF_F_LRO)) {
9965 dev->features |= NETIF_F_LRO;
9966 bp->flags |= TPA_ENABLE_FLAG;
9967 changed = 1;
9968 }
9969 } else
9970 rc = -EINVAL;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009971 } else if (dev->features & NETIF_F_LRO) {
9972 dev->features &= ~NETIF_F_LRO;
9973 bp->flags &= ~TPA_ENABLE_FLAG;
9974 changed = 1;
9975 }
9976
9977 if (changed && netif_running(dev)) {
9978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9980 }
9981
9982 return rc;
9983}
9984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009985static u32 bnx2x_get_rx_csum(struct net_device *dev)
9986{
9987 struct bnx2x *bp = netdev_priv(dev);
9988
9989 return bp->rx_csum;
9990}
9991
9992static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9993{
9994 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009995 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009996
9997 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009998
9999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000 TPA'ed packets will be discarded due to wrong TCP CSUM */
10001 if (!data) {
10002 u32 flags = ethtool_op_get_flags(dev);
10003
10004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10005 }
10006
10007 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010008}
10009
10010static int bnx2x_set_tso(struct net_device *dev, u32 data)
10011{
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010012 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010014 dev->features |= NETIF_F_TSO6;
10015 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010017 dev->features &= ~NETIF_F_TSO6;
10018 }
10019
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010020 return 0;
10021}
10022
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010023static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010024 char string[ETH_GSTRING_LEN];
10025} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010026 { "register_test (offline)" },
10027 { "memory_test (offline)" },
10028 { "loopback_test (offline)" },
10029 { "nvram_test (online)" },
10030 { "interrupt_test (online)" },
10031 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010032 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010033};
10034
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010035static int bnx2x_test_registers(struct bnx2x *bp)
10036{
10037 int idx, i, rc = -ENODEV;
10038 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010039 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010040 static const struct {
10041 u32 offset0;
10042 u32 offset1;
10043 u32 mask;
10044 } reg_tbl[] = {
10045/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10046 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10047 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10048 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10049 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10050 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10051 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10052 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10053 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10054 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10055/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10056 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10057 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10058 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10059 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10060 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10062 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010063 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010064 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10065/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010066 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10067 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10068 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10069 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10070 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10071 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10072 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10073 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010074 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10075/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010076 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10077 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10078 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10080 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10082
10083 { 0xffffffff, 0, 0x00000000 }
10084 };
10085
10086 if (!netif_running(bp->dev))
10087 return rc;
10088
10089 /* Repeat the test twice:
10090 First by writing 0x00000000, second by writing 0xffffffff */
10091 for (idx = 0; idx < 2; idx++) {
10092
10093 switch (idx) {
10094 case 0:
10095 wr_val = 0;
10096 break;
10097 case 1:
10098 wr_val = 0xffffffff;
10099 break;
10100 }
10101
10102 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010104
10105 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106 mask = reg_tbl[i].mask;
10107
10108 save_val = REG_RD(bp, offset);
10109
10110 REG_WR(bp, offset, wr_val);
10111 val = REG_RD(bp, offset);
10112
10113 /* Restore the original register's value */
10114 REG_WR(bp, offset, save_val);
10115
10116 /* verify that value is as expected value */
10117 if ((val & mask) != (wr_val & mask))
10118 goto test_reg_exit;
10119 }
10120 }
10121
10122 rc = 0;
10123
10124test_reg_exit:
10125 return rc;
10126}
10127
10128static int bnx2x_test_memory(struct bnx2x *bp)
10129{
10130 int i, j, rc = -ENODEV;
10131 u32 val;
10132 static const struct {
10133 u32 offset;
10134 int size;
10135 } mem_tbl[] = {
10136 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10137 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10139 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10140 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10141 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10142 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10143
10144 { 0xffffffff, 0 }
10145 };
10146 static const struct {
10147 char *name;
10148 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010149 u32 e1_mask;
10150 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010151 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010152 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10153 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10154 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10155 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10157 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010158
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010159 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010160 };
10161
10162 if (!netif_running(bp->dev))
10163 return rc;
10164
10165 /* Go through all the memories */
10166 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167 for (j = 0; j < mem_tbl[i].size; j++)
10168 REG_RD(bp, mem_tbl[i].offset + j*4);
10169
10170 /* Check the parity status */
10171 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010173 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010175 DP(NETIF_MSG_HW,
10176 "%s is 0x%x\n", prty_tbl[i].name, val);
10177 goto test_mem_exit;
10178 }
10179 }
10180
10181 rc = 0;
10182
10183test_mem_exit:
10184 return rc;
10185}
10186
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010187static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10188{
10189 int cnt = 1000;
10190
10191 if (link_up)
10192 while (bnx2x_link_test(bp) && cnt--)
10193 msleep(10);
10194}
10195
10196static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10197{
10198 unsigned int pkt_size, num_pkts, i;
10199 struct sk_buff *skb;
10200 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070010201 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010202 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010203 u16 tx_start_idx, tx_idx;
10204 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070010205 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010206 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010207 struct eth_tx_start_bd *tx_start_bd;
10208 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010209 dma_addr_t mapping;
10210 union eth_rx_cqe *cqe;
10211 u8 cqe_fp_flags;
10212 struct sw_rx_bd *rx_buf;
10213 u16 len;
10214 int rc = -ENODEV;
10215
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010216 /* check the loopback mode */
10217 switch (loopback_mode) {
10218 case BNX2X_PHY_LOOPBACK:
10219 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10220 return -EINVAL;
10221 break;
10222 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010223 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010224 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010225 break;
10226 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010227 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010228 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010229
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010230 /* prepare the loopback packet */
10231 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010233 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10234 if (!skb) {
10235 rc = -ENOMEM;
10236 goto test_loopback_exit;
10237 }
10238 packet = skb_put(skb, pkt_size);
10239 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070010240 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010242 for (i = ETH_HLEN; i < pkt_size; i++)
10243 packet[i] = (unsigned char) (i & 0xff);
10244
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010245 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010246 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010247 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010249
Eilon Greensteinca003922009-08-12 22:53:28 -070010250 pkt_prod = fp_tx->tx_pkt_prod++;
10251 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010253 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010254 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010255
Eilon Greensteinca003922009-08-12 22:53:28 -070010256 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010258 mapping = pci_map_single(bp->pdev, skb->data,
10259 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070010260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10268
10269 /* turn on parsing and get a BD */
10270 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10272
10273 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010274
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010275 wmb();
10276
Eilon Greensteinca003922009-08-12 22:53:28 -070010277 fp_tx->tx_db.data.prod += 2;
10278 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010279 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010280
10281 mmiowb();
10282
10283 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010284 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010285
10286 udelay(100);
10287
Eilon Greensteinca003922009-08-12 22:53:28 -070010288 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010289 if (tx_idx != tx_start_idx + num_pkts)
10290 goto test_loopback_exit;
10291
Eilon Greensteinca003922009-08-12 22:53:28 -070010292 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010293 if (rx_idx != rx_start_idx + num_pkts)
10294 goto test_loopback_exit;
10295
Eilon Greensteinca003922009-08-12 22:53:28 -070010296 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010297 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10298 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10299 goto test_loopback_rx_exit;
10300
10301 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10302 if (len != pkt_size)
10303 goto test_loopback_rx_exit;
10304
Eilon Greensteinca003922009-08-12 22:53:28 -070010305 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010306 skb = rx_buf->skb;
10307 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10308 for (i = ETH_HLEN; i < pkt_size; i++)
10309 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10310 goto test_loopback_rx_exit;
10311
10312 rc = 0;
10313
10314test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010315
Eilon Greensteinca003922009-08-12 22:53:28 -070010316 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10317 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10318 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10319 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010320
10321 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010322 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10323 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010324
10325test_loopback_exit:
10326 bp->link_params.loopback_mode = LOOPBACK_NONE;
10327
10328 return rc;
10329}
10330
10331static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10332{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010333 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010334
10335 if (!netif_running(bp->dev))
10336 return BNX2X_LOOPBACK_FAILED;
10337
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010338 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010339 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010340
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010341 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10342 if (res) {
10343 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10344 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010345 }
10346
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010347 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10348 if (res) {
10349 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10350 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010351 }
10352
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010353 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010354 bnx2x_netif_start(bp);
10355
10356 return rc;
10357}
10358
10359#define CRC32_RESIDUAL 0xdebb20e3
10360
10361static int bnx2x_test_nvram(struct bnx2x *bp)
10362{
10363 static const struct {
10364 int offset;
10365 int size;
10366 } nvram_tbl[] = {
10367 { 0, 0x14 }, /* bootstrap */
10368 { 0x14, 0xec }, /* dir */
10369 { 0x100, 0x350 }, /* manuf_info */
10370 { 0x450, 0xf0 }, /* feature_info */
10371 { 0x640, 0x64 }, /* upgrade_key_info */
10372 { 0x6a4, 0x64 },
10373 { 0x708, 0x70 }, /* manuf_key_info */
10374 { 0x778, 0x70 },
10375 { 0, 0 }
10376 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010377 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010378 u8 *data = (u8 *)buf;
10379 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010380 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010381
10382 rc = bnx2x_nvram_read(bp, 0, data, 4);
10383 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010384 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010385 goto test_nvram_exit;
10386 }
10387
10388 magic = be32_to_cpu(buf[0]);
10389 if (magic != 0x669955aa) {
10390 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10391 rc = -ENODEV;
10392 goto test_nvram_exit;
10393 }
10394
10395 for (i = 0; nvram_tbl[i].size; i++) {
10396
10397 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10398 nvram_tbl[i].size);
10399 if (rc) {
10400 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010401 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010402 goto test_nvram_exit;
10403 }
10404
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010405 crc = ether_crc_le(nvram_tbl[i].size, data);
10406 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010407 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010408 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010409 rc = -ENODEV;
10410 goto test_nvram_exit;
10411 }
10412 }
10413
10414test_nvram_exit:
10415 return rc;
10416}
10417
10418static int bnx2x_test_intr(struct bnx2x *bp)
10419{
10420 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10421 int i, rc;
10422
10423 if (!netif_running(bp->dev))
10424 return -ENODEV;
10425
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010426 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010427 if (CHIP_IS_E1(bp))
Vladislav Zolotarov0c43f432010-02-17 02:04:00 +000010428 /* use last unicast entries */
10429 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
Eilon Greensteinaf246402009-01-14 06:43:59 +000010430 else
10431 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010432 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010433 config->hdr.reserved1 = 0;
10434
Michael Chane665bfd2009-10-10 13:46:54 +000010435 bp->set_mac_pending++;
10436 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010437 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10438 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10439 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10440 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010441 for (i = 0; i < 10; i++) {
10442 if (!bp->set_mac_pending)
10443 break;
Michael Chane665bfd2009-10-10 13:46:54 +000010444 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010445 msleep_interruptible(10);
10446 }
10447 if (i == 10)
10448 rc = -ENODEV;
10449 }
10450
10451 return rc;
10452}
10453
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010454static void bnx2x_self_test(struct net_device *dev,
10455 struct ethtool_test *etest, u64 *buf)
10456{
10457 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010458
10459 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10460
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010461 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010462 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010463
Eilon Greenstein33471622008-08-13 15:59:08 -070010464 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010465 if (IS_E1HMF(bp))
10466 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10467
10468 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010469 int port = BP_PORT(bp);
10470 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010471 u8 link_up;
10472
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010473 /* save current value of input enable for TX port IF */
10474 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10475 /* disable input for TX port IF */
10476 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10477
Eilon Greenstein061bc702009-10-15 00:18:47 -070010478 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010479 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10480 bnx2x_nic_load(bp, LOAD_DIAG);
10481 /* wait until link state is restored */
10482 bnx2x_wait_for_link(bp, link_up);
10483
10484 if (bnx2x_test_registers(bp) != 0) {
10485 buf[0] = 1;
10486 etest->flags |= ETH_TEST_FL_FAILED;
10487 }
10488 if (bnx2x_test_memory(bp) != 0) {
10489 buf[1] = 1;
10490 etest->flags |= ETH_TEST_FL_FAILED;
10491 }
10492 buf[2] = bnx2x_test_loopback(bp, link_up);
10493 if (buf[2] != 0)
10494 etest->flags |= ETH_TEST_FL_FAILED;
10495
10496 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010497
10498 /* restore input for TX port IF */
10499 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10500
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010501 bnx2x_nic_load(bp, LOAD_NORMAL);
10502 /* wait until link state is restored */
10503 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010504 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010505 if (bnx2x_test_nvram(bp) != 0) {
10506 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010507 etest->flags |= ETH_TEST_FL_FAILED;
10508 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010509 if (bnx2x_test_intr(bp) != 0) {
10510 buf[4] = 1;
10511 etest->flags |= ETH_TEST_FL_FAILED;
10512 }
10513 if (bp->port.pmf)
10514 if (bnx2x_link_test(bp) != 0) {
10515 buf[5] = 1;
10516 etest->flags |= ETH_TEST_FL_FAILED;
10517 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010518
10519#ifdef BNX2X_EXTRA_DEBUG
10520 bnx2x_panic_dump(bp);
10521#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010522}
10523
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010524static const struct {
10525 long offset;
10526 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010527 u8 string[ETH_GSTRING_LEN];
10528} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10529/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10530 { Q_STATS_OFFSET32(error_bytes_received_hi),
10531 8, "[%d]: rx_error_bytes" },
10532 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10533 8, "[%d]: rx_ucast_packets" },
10534 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10535 8, "[%d]: rx_mcast_packets" },
10536 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10537 8, "[%d]: rx_bcast_packets" },
10538 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10539 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10540 4, "[%d]: rx_phy_ip_err_discards"},
10541 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10542 4, "[%d]: rx_skb_alloc_discard" },
10543 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10544
10545/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10546 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10547 8, "[%d]: tx_packets" }
10548};
10549
10550static const struct {
10551 long offset;
10552 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010553 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010554#define STATS_FLAGS_PORT 1
10555#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010556#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010557 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010558} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010559/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10560 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010561 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010562 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010563 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010564 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010565 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010566 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010567 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010568 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010569 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010570 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010571 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010572 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010573 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10574 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10575 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10576 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10577/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10578 8, STATS_FLAGS_PORT, "rx_fragments" },
10579 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10580 8, STATS_FLAGS_PORT, "rx_jabbers" },
10581 { STATS_OFFSET32(no_buff_discard_hi),
10582 8, STATS_FLAGS_BOTH, "rx_discards" },
10583 { STATS_OFFSET32(mac_filter_discard),
10584 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10585 { STATS_OFFSET32(xxoverflow_discard),
10586 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10587 { STATS_OFFSET32(brb_drop_hi),
10588 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10589 { STATS_OFFSET32(brb_truncate_hi),
10590 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10591 { STATS_OFFSET32(pause_frames_received_hi),
10592 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10593 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10594 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10595 { STATS_OFFSET32(nig_timer_max),
10596 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10597/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10598 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10599 { STATS_OFFSET32(rx_skb_alloc_failed),
10600 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10601 { STATS_OFFSET32(hw_csum_err),
10602 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10603
10604 { STATS_OFFSET32(total_bytes_transmitted_hi),
10605 8, STATS_FLAGS_BOTH, "tx_bytes" },
10606 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10607 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10608 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10609 8, STATS_FLAGS_BOTH, "tx_packets" },
10610 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10611 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10612 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10613 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010614 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010615 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010616 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010617 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010618/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010619 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010620 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010621 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010622 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010623 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010624 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010625 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010626 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010627 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010628 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010629 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010630 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010631 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010632 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010633 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010634 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010635 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010636 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010637 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010638/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010639 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010640 { STATS_OFFSET32(pause_frames_sent_hi),
10641 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010642};
10643
Eilon Greensteinde832a52009-02-12 08:36:33 +000010644#define IS_PORT_STAT(i) \
10645 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10646#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10647#define IS_E1HMF_MODE_STAT(bp) \
Joe Perches7995c642010-02-17 15:01:52 +000010648 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010649
Ben Hutchings15f0a392009-10-01 11:58:24 +000010650static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10651{
10652 struct bnx2x *bp = netdev_priv(dev);
10653 int i, num_stats;
10654
10655 switch(stringset) {
10656 case ETH_SS_STATS:
10657 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010658 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000010659 if (!IS_E1HMF_MODE_STAT(bp))
10660 num_stats += BNX2X_NUM_STATS;
10661 } else {
10662 if (IS_E1HMF_MODE_STAT(bp)) {
10663 num_stats = 0;
10664 for (i = 0; i < BNX2X_NUM_STATS; i++)
10665 if (IS_FUNC_STAT(i))
10666 num_stats++;
10667 } else
10668 num_stats = BNX2X_NUM_STATS;
10669 }
10670 return num_stats;
10671
10672 case ETH_SS_TEST:
10673 return BNX2X_NUM_TESTS;
10674
10675 default:
10676 return -EINVAL;
10677 }
10678}
10679
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010680static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10681{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010682 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010683 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010684
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010685 switch (stringset) {
10686 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010687 if (is_multi(bp)) {
10688 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010689 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010690 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10691 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10692 bnx2x_q_stats_arr[j].string, i);
10693 k += BNX2X_NUM_Q_STATS;
10694 }
10695 if (IS_E1HMF_MODE_STAT(bp))
10696 break;
10697 for (j = 0; j < BNX2X_NUM_STATS; j++)
10698 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10699 bnx2x_stats_arr[j].string);
10700 } else {
10701 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10702 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10703 continue;
10704 strcpy(buf + j*ETH_GSTRING_LEN,
10705 bnx2x_stats_arr[i].string);
10706 j++;
10707 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010708 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010709 break;
10710
10711 case ETH_SS_TEST:
10712 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10713 break;
10714 }
10715}
10716
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010717static void bnx2x_get_ethtool_stats(struct net_device *dev,
10718 struct ethtool_stats *stats, u64 *buf)
10719{
10720 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010721 u32 *hw_stats, *offset;
10722 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010723
Eilon Greensteinde832a52009-02-12 08:36:33 +000010724 if (is_multi(bp)) {
10725 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010726 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010727 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10728 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10729 if (bnx2x_q_stats_arr[j].size == 0) {
10730 /* skip this counter */
10731 buf[k + j] = 0;
10732 continue;
10733 }
10734 offset = (hw_stats +
10735 bnx2x_q_stats_arr[j].offset);
10736 if (bnx2x_q_stats_arr[j].size == 4) {
10737 /* 4-byte counter */
10738 buf[k + j] = (u64) *offset;
10739 continue;
10740 }
10741 /* 8-byte counter */
10742 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10743 }
10744 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010745 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010746 if (IS_E1HMF_MODE_STAT(bp))
10747 return;
10748 hw_stats = (u32 *)&bp->eth_stats;
10749 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10750 if (bnx2x_stats_arr[j].size == 0) {
10751 /* skip this counter */
10752 buf[k + j] = 0;
10753 continue;
10754 }
10755 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10756 if (bnx2x_stats_arr[j].size == 4) {
10757 /* 4-byte counter */
10758 buf[k + j] = (u64) *offset;
10759 continue;
10760 }
10761 /* 8-byte counter */
10762 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010763 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010764 } else {
10765 hw_stats = (u32 *)&bp->eth_stats;
10766 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10767 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10768 continue;
10769 if (bnx2x_stats_arr[i].size == 0) {
10770 /* skip this counter */
10771 buf[j] = 0;
10772 j++;
10773 continue;
10774 }
10775 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10776 if (bnx2x_stats_arr[i].size == 4) {
10777 /* 4-byte counter */
10778 buf[j] = (u64) *offset;
10779 j++;
10780 continue;
10781 }
10782 /* 8-byte counter */
10783 buf[j] = HILO_U64(*offset, *(offset + 1));
10784 j++;
10785 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010786 }
10787}
10788
10789static int bnx2x_phys_id(struct net_device *dev, u32 data)
10790{
10791 struct bnx2x *bp = netdev_priv(dev);
10792 int i;
10793
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010794 if (!netif_running(dev))
10795 return 0;
10796
10797 if (!bp->port.pmf)
10798 return 0;
10799
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010800 if (data == 0)
10801 data = 2;
10802
10803 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010804 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010805 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10806 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010807 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020010808 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010809
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010810 msleep_interruptible(500);
10811 if (signal_pending(current))
10812 break;
10813 }
10814
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010815 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010816 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10817 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010818
10819 return 0;
10820}
10821
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070010822static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010823 .get_settings = bnx2x_get_settings,
10824 .set_settings = bnx2x_set_settings,
10825 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010826 .get_regs_len = bnx2x_get_regs_len,
10827 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010828 .get_wol = bnx2x_get_wol,
10829 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010830 .get_msglevel = bnx2x_get_msglevel,
10831 .set_msglevel = bnx2x_set_msglevel,
10832 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010833 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010834 .get_eeprom_len = bnx2x_get_eeprom_len,
10835 .get_eeprom = bnx2x_get_eeprom,
10836 .set_eeprom = bnx2x_set_eeprom,
10837 .get_coalesce = bnx2x_get_coalesce,
10838 .set_coalesce = bnx2x_set_coalesce,
10839 .get_ringparam = bnx2x_get_ringparam,
10840 .set_ringparam = bnx2x_set_ringparam,
10841 .get_pauseparam = bnx2x_get_pauseparam,
10842 .set_pauseparam = bnx2x_set_pauseparam,
10843 .get_rx_csum = bnx2x_get_rx_csum,
10844 .set_rx_csum = bnx2x_set_rx_csum,
10845 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010846 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010847 .set_flags = bnx2x_set_flags,
10848 .get_flags = ethtool_op_get_flags,
10849 .get_sg = ethtool_op_get_sg,
10850 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010851 .get_tso = ethtool_op_get_tso,
10852 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010853 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000010854 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010855 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010856 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010857 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010858};
10859
10860/* end of ethtool_ops */
10861
10862/****************************************************************************
10863* General service functions
10864****************************************************************************/
10865
10866static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10867{
10868 u16 pmcsr;
10869
10870 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10871
10872 switch (state) {
10873 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010874 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010875 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10876 PCI_PM_CTRL_PME_STATUS));
10877
10878 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010879 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010880 msleep(20);
10881 break;
10882
10883 case PCI_D3hot:
10884 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10885 pmcsr |= 3;
10886
10887 if (bp->wol)
10888 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10889
10890 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10891 pmcsr);
10892
10893 /* No more memory access after this point until
10894 * device is brought back to D0.
10895 */
10896 break;
10897
10898 default:
10899 return -EINVAL;
10900 }
10901 return 0;
10902}
10903
Eilon Greenstein237907c2009-01-14 06:42:44 +000010904static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10905{
10906 u16 rx_cons_sb;
10907
10908 /* Tell compiler that status block fields can change */
10909 barrier();
10910 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10911 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10912 rx_cons_sb++;
10913 return (fp->rx_comp_cons != rx_cons_sb);
10914}
10915
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010916/*
10917 * net_device service functions
10918 */
10919
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010920static int bnx2x_poll(struct napi_struct *napi, int budget)
10921{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010922 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010923 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10924 napi);
10925 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010926
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010927 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010928#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010929 if (unlikely(bp->panic)) {
10930 napi_complete(napi);
10931 return 0;
10932 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010933#endif
10934
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010935 if (bnx2x_has_tx_work(fp))
10936 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010937
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010938 if (bnx2x_has_rx_work(fp)) {
10939 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010940
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010941 /* must not complete if we consumed full budget */
10942 if (work_done >= budget)
10943 break;
10944 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010945
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010946 /* Fall out from the NAPI loop if needed */
10947 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10948 bnx2x_update_fpsb_idx(fp);
10949 /* bnx2x_has_rx_work() reads the status block, thus we need
10950 * to ensure that status block indices have been actually read
10951 * (bnx2x_update_fpsb_idx) prior to this check
10952 * (bnx2x_has_rx_work) so that we won't write the "newer"
10953 * value of the status block to IGU (if there was a DMA right
10954 * after bnx2x_has_rx_work and if there is no rmb, the memory
10955 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10956 * before bnx2x_ack_sb). In this case there will never be
10957 * another interrupt until there is another update of the
10958 * status block, while there is still unhandled work.
10959 */
10960 rmb();
10961
10962 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10963 napi_complete(napi);
10964 /* Re-enable interrupts */
10965 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10966 le16_to_cpu(fp->fp_c_idx),
10967 IGU_INT_NOP, 1);
10968 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10969 le16_to_cpu(fp->fp_u_idx),
10970 IGU_INT_ENABLE, 1);
10971 break;
10972 }
10973 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000010974 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010975
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010976 return work_done;
10977}
10978
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010979
10980/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010981 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010982 * we use one mapping for both BDs
10983 * So far this has only been observed to happen
10984 * in Other Operating Systems(TM)
10985 */
10986static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10987 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070010988 struct sw_tx_bd *tx_buf,
10989 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010990 u16 bd_prod, int nbd)
10991{
Eilon Greensteinca003922009-08-12 22:53:28 -070010992 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010993 struct eth_tx_bd *d_tx_bd;
10994 dma_addr_t mapping;
10995 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10996
10997 /* first fix first BD */
10998 h_tx_bd->nbd = cpu_to_le16(nbd);
10999 h_tx_bd->nbytes = cpu_to_le16(hlen);
11000
11001 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11002 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11003 h_tx_bd->addr_lo, h_tx_bd->nbd);
11004
11005 /* now get a new data BD
11006 * (after the pbd) and fill it */
11007 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011008 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011009
11010 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11011 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11012
11013 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11014 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11015 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011016
11017 /* this marks the BD as one that has no individual mapping */
11018 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11019
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011020 DP(NETIF_MSG_TX_QUEUED,
11021 "TSO split data size is %d (%x:%x)\n",
11022 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11023
Eilon Greensteinca003922009-08-12 22:53:28 -070011024 /* update tx_bd */
11025 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011026
11027 return bd_prod;
11028}
11029
11030static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11031{
11032 if (fix > 0)
11033 csum = (u16) ~csum_fold(csum_sub(csum,
11034 csum_partial(t_header - fix, fix, 0)));
11035
11036 else if (fix < 0)
11037 csum = (u16) ~csum_fold(csum_add(csum,
11038 csum_partial(t_header, -fix, 0)));
11039
11040 return swab16(csum);
11041}
11042
11043static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11044{
11045 u32 rc;
11046
11047 if (skb->ip_summed != CHECKSUM_PARTIAL)
11048 rc = XMIT_PLAIN;
11049
11050 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011051 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011052 rc = XMIT_CSUM_V6;
11053 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11054 rc |= XMIT_CSUM_TCP;
11055
11056 } else {
11057 rc = XMIT_CSUM_V4;
11058 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11059 rc |= XMIT_CSUM_TCP;
11060 }
11061 }
11062
11063 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011064 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011065
11066 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011067 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011068
11069 return rc;
11070}
11071
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011072#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011073/* check if packet requires linearization (packet is too fragmented)
11074 no need to check fragmentation if page size > 8K (there will be no
11075 violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011076static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11077 u32 xmit_type)
11078{
11079 int to_copy = 0;
11080 int hlen = 0;
11081 int first_bd_sz = 0;
11082
11083 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11084 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11085
11086 if (xmit_type & XMIT_GSO) {
11087 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11088 /* Check if LSO packet needs to be copied:
11089 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11090 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011091 /* Number of windows to check */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011092 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11093 int wnd_idx = 0;
11094 int frag_idx = 0;
11095 u32 wnd_sum = 0;
11096
11097 /* Headers length */
11098 hlen = (int)(skb_transport_header(skb) - skb->data) +
11099 tcp_hdrlen(skb);
11100
11101 /* Amount of data (w/o headers) on linear part of SKB*/
11102 first_bd_sz = skb_headlen(skb) - hlen;
11103
11104 wnd_sum = first_bd_sz;
11105
11106 /* Calculate the first sum - it's special */
11107 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11108 wnd_sum +=
11109 skb_shinfo(skb)->frags[frag_idx].size;
11110
11111 /* If there was data on linear skb data - check it */
11112 if (first_bd_sz > 0) {
11113 if (unlikely(wnd_sum < lso_mss)) {
11114 to_copy = 1;
11115 goto exit_lbl;
11116 }
11117
11118 wnd_sum -= first_bd_sz;
11119 }
11120
11121 /* Others are easier: run through the frag list and
11122 check all windows */
11123 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11124 wnd_sum +=
11125 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11126
11127 if (unlikely(wnd_sum < lso_mss)) {
11128 to_copy = 1;
11129 break;
11130 }
11131 wnd_sum -=
11132 skb_shinfo(skb)->frags[wnd_idx].size;
11133 }
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011134 } else {
11135 /* in non-LSO too fragmented packet should always
11136 be linearized */
11137 to_copy = 1;
11138 }
11139 }
11140
11141exit_lbl:
11142 if (unlikely(to_copy))
11143 DP(NETIF_MSG_TX_QUEUED,
11144 "Linearization IS REQUIRED for %s packet. "
11145 "num_frags %d hlen %d first_bd_sz %d\n",
11146 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11147 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11148
11149 return to_copy;
11150}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011151#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011152
11153/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011154 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011155 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011156 */
Stephen Hemminger613573252009-08-31 19:50:58 +000011157static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011158{
11159 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011160 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011161 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011162 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011163 struct eth_tx_start_bd *tx_start_bd;
11164 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011165 struct eth_tx_parse_bd *pbd = NULL;
11166 u16 pkt_prod, bd_prod;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011167 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011168 dma_addr_t mapping;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011169 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011170 int i;
11171 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011172 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011173
11174#ifdef BNX2X_STOP_ON_ERROR
11175 if (unlikely(bp->panic))
11176 return NETDEV_TX_BUSY;
11177#endif
11178
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011179 fp_index = skb_get_queue_mapping(skb);
11180 txq = netdev_get_tx_queue(dev, fp_index);
11181
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011182 fp = &bp->fp[fp_index];
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011183
Yitchak Gertner231fd582008-08-25 15:27:06 -070011184 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011185 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011186 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011187 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11188 return NETDEV_TX_BUSY;
11189 }
11190
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011191 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11192 " gso type %x xmit_type %x\n",
11193 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11194 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11195
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011196#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011197 /* First, check if we need to linearize the skb (due to FW
11198 restrictions). No need to check fragmentation if page size > 8K
11199 (there will be no violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011200 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11201 /* Statistics of linearization */
11202 bp->lin_cnt++;
11203 if (skb_linearize(skb) != 0) {
11204 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11205 "silently dropping this SKB\n");
11206 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011207 return NETDEV_TX_OK;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011208 }
11209 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011210#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011211
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011212 /*
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011213 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070011214 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011215 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011216 (don't forget to mark the last one as last,
11217 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011218 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011219 */
11220
11221 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011222 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011223
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011224 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011225 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070011226 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011227
Eilon Greensteinca003922009-08-12 22:53:28 -070011228 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11229 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11230 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070011231 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070011232 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011233
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011234 /* remember the first BD of the packet */
11235 tx_buf->first_bd = fp->tx_bd_prod;
11236 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011237 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011238
11239 DP(NETIF_MSG_TX_QUEUED,
11240 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011241 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011242
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011243#ifdef BCM_VLAN
11244 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11245 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011246 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11247 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011248 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011249#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070011250 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011251
Eilon Greensteinca003922009-08-12 22:53:28 -070011252 /* turn on parsing and get a BD */
11253 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11254 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011255
Eilon Greensteinca003922009-08-12 22:53:28 -070011256 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011257
11258 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011259 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011260
11261 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011262 pbd->global_data =
11263 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11264 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011265
11266 pbd->ip_hlen = (skb_transport_header(skb) -
11267 skb_network_header(skb)) / 2;
11268
11269 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11270
11271 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011272 hlen = hlen*2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011273
Eilon Greensteinca003922009-08-12 22:53:28 -070011274 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011275
11276 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011277 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011278 ETH_TX_BD_FLAGS_IP_CSUM;
11279 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011280 tx_start_bd->bd_flags.as_bitfield |=
11281 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011282
11283 if (xmit_type & XMIT_CSUM_TCP) {
11284 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11285
11286 } else {
11287 s8 fix = SKB_CS_OFF(skb); /* signed! */
11288
Eilon Greensteinca003922009-08-12 22:53:28 -070011289 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011290
11291 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011292 "hlen %d fix %d csum before fix %x\n",
11293 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011294
11295 /* HW bug: fixup the CSUM */
11296 pbd->tcp_pseudo_csum =
11297 bnx2x_csum_fix(skb_transport_header(skb),
11298 SKB_CS(skb), fix);
11299
11300 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11301 pbd->tcp_pseudo_csum);
11302 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011303 }
11304
11305 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011306 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011307
Eilon Greensteinca003922009-08-12 22:53:28 -070011308 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11309 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11310 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11311 tx_start_bd->nbd = cpu_to_le16(nbd);
11312 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11313 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011314
11315 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011316 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011317 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11318 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11319 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011320
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011321 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011322
11323 DP(NETIF_MSG_TX_QUEUED,
11324 "TSO packet len %d hlen %d total len %d tso size %d\n",
11325 skb->len, hlen, skb_headlen(skb),
11326 skb_shinfo(skb)->gso_size);
11327
Eilon Greensteinca003922009-08-12 22:53:28 -070011328 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011329
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011330 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011331 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11332 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011333
11334 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11335 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011336 pbd->tcp_flags = pbd_tcp_flags(skb);
11337
11338 if (xmit_type & XMIT_GSO_V4) {
11339 pbd->ip_id = swab16(ip_hdr(skb)->id);
11340 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011341 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11342 ip_hdr(skb)->daddr,
11343 0, IPPROTO_TCP, 0));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011344
11345 } else
11346 pbd->tcp_pseudo_csum =
11347 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11348 &ipv6_hdr(skb)->daddr,
11349 0, IPPROTO_TCP, 0));
11350
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011351 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11352 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011353 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011354
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011355 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11356 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011357
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011359 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11360 if (total_pkt_bd == NULL)
11361 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011362
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011363 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11364 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011365
Eilon Greensteinca003922009-08-12 22:53:28 -070011366 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11367 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11368 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11369 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011370
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011371 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011372 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11373 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11374 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011375 }
11376
Eilon Greensteinca003922009-08-12 22:53:28 -070011377 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011378
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011379 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11380
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011381 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011382 * if the packet contains or ends with it
11383 */
11384 if (TX_BD_POFF(bd_prod) < nbd)
11385 nbd++;
11386
Eilon Greensteinca003922009-08-12 22:53:28 -070011387 if (total_pkt_bd != NULL)
11388 total_pkt_bd->total_pkt_bytes = pkt_size;
11389
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011390 if (pbd)
11391 DP(NETIF_MSG_TX_QUEUED,
11392 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11393 " tcp_flags %x xsum %x seq %u hlen %u\n",
11394 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11395 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011396 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011397
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011398 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011399
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011400 /*
11401 * Make sure that the BD data is updated before updating the producer
11402 * since FW might read the BD right after the producer is updated.
11403 * This is only applicable for weak-ordered memory model archs such
11404 * as IA-64. The following barrier is also mandatory since FW will
11405 * assumes packets must have BDs.
11406 */
11407 wmb();
11408
Eilon Greensteinca003922009-08-12 22:53:28 -070011409 fp->tx_db.data.prod += nbd;
11410 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011411 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011412
11413 mmiowb();
11414
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011415 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011416
11417 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011418 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011419 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11420 if we put Tx into XOFF state. */
11421 smp_mb();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011422 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011423 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011424 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011425 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011426 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011427
11428 return NETDEV_TX_OK;
11429}
11430
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011431/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011432static int bnx2x_open(struct net_device *dev)
11433{
11434 struct bnx2x *bp = netdev_priv(dev);
11435
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011436 netif_carrier_off(dev);
11437
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011438 bnx2x_set_power_state(bp, PCI_D0);
11439
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011440 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011441}
11442
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011443/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011444static int bnx2x_close(struct net_device *dev)
11445{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011446 struct bnx2x *bp = netdev_priv(dev);
11447
11448 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011449 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11450 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11451 if (!CHIP_REV_IS_SLOW(bp))
11452 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011453
11454 return 0;
11455}
11456
Eilon Greensteinf5372252009-02-12 08:38:30 +000011457/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011458static void bnx2x_set_rx_mode(struct net_device *dev)
11459{
11460 struct bnx2x *bp = netdev_priv(dev);
11461 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11462 int port = BP_PORT(bp);
11463
11464 if (bp->state != BNX2X_STATE_OPEN) {
11465 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11466 return;
11467 }
11468
11469 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11470
11471 if (dev->flags & IFF_PROMISC)
11472 rx_mode = BNX2X_RX_MODE_PROMISC;
11473
11474 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000011475 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11476 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011477 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11478
11479 else { /* some multicasts */
11480 if (CHIP_IS_E1(bp)) {
11481 int i, old, offset;
11482 struct dev_mc_list *mclist;
11483 struct mac_configuration_cmd *config =
11484 bnx2x_sp(bp, mcast_config);
11485
11486 for (i = 0, mclist = dev->mc_list;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000011487 mclist && (i < netdev_mc_count(dev));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011488 i++, mclist = mclist->next) {
11489
11490 config->config_table[i].
11491 cam_entry.msb_mac_addr =
11492 swab16(*(u16 *)&mclist->dmi_addr[0]);
11493 config->config_table[i].
11494 cam_entry.middle_mac_addr =
11495 swab16(*(u16 *)&mclist->dmi_addr[2]);
11496 config->config_table[i].
11497 cam_entry.lsb_mac_addr =
11498 swab16(*(u16 *)&mclist->dmi_addr[4]);
11499 config->config_table[i].cam_entry.flags =
11500 cpu_to_le16(port);
11501 config->config_table[i].
11502 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011503 config->config_table[i].target_table_entry.
11504 clients_bit_vector =
11505 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011506 config->config_table[i].
11507 target_table_entry.vlan_id = 0;
11508
11509 DP(NETIF_MSG_IFUP,
11510 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11511 config->config_table[i].
11512 cam_entry.msb_mac_addr,
11513 config->config_table[i].
11514 cam_entry.middle_mac_addr,
11515 config->config_table[i].
11516 cam_entry.lsb_mac_addr);
11517 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011518 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011519 if (old > i) {
11520 for (; i < old; i++) {
11521 if (CAM_IS_INVALID(config->
11522 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011523 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011524 break;
11525 }
11526 /* invalidate */
11527 CAM_INVALIDATE(config->
11528 config_table[i]);
11529 }
11530 }
11531
11532 if (CHIP_REV_IS_SLOW(bp))
11533 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11534 else
11535 offset = BNX2X_MAX_MULTICAST*(1 + port);
11536
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011537 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011538 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011539 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011540 config->hdr.reserved1 = 0;
11541
Michael Chane665bfd2009-10-10 13:46:54 +000011542 bp->set_mac_pending++;
11543 smp_wmb();
11544
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011545 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11546 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11547 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11548 0);
11549 } else { /* E1H */
11550 /* Accept one or more multicasts */
11551 struct dev_mc_list *mclist;
11552 u32 mc_filter[MC_HASH_SIZE];
11553 u32 crc, bit, regidx;
11554 int i;
11555
11556 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11557
11558 for (i = 0, mclist = dev->mc_list;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000011559 mclist && (i < netdev_mc_count(dev));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011560 i++, mclist = mclist->next) {
11561
Johannes Berg7c510e42008-10-27 17:47:26 -070011562 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11563 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011564
11565 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11566 bit = (crc >> 24) & 0xff;
11567 regidx = bit >> 5;
11568 bit &= 0x1f;
11569 mc_filter[regidx] |= (1 << bit);
11570 }
11571
11572 for (i = 0; i < MC_HASH_SIZE; i++)
11573 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11574 mc_filter[i]);
11575 }
11576 }
11577
11578 bp->rx_mode = rx_mode;
11579 bnx2x_set_storm_rx_mode(bp);
11580}
11581
11582/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011583static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11584{
11585 struct sockaddr *addr = p;
11586 struct bnx2x *bp = netdev_priv(dev);
11587
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011588 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011589 return -EINVAL;
11590
11591 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011592 if (netif_running(dev)) {
11593 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000011594 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011595 else
Michael Chane665bfd2009-10-10 13:46:54 +000011596 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011597 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011598
11599 return 0;
11600}
11601
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011602/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011603static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11604 int devad, u16 addr)
11605{
11606 struct bnx2x *bp = netdev_priv(netdev);
11607 u16 value;
11608 int rc;
11609 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11610
11611 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11612 prtad, devad, addr);
11613
11614 if (prtad != bp->mdio.prtad) {
11615 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11616 prtad, bp->mdio.prtad);
11617 return -EINVAL;
11618 }
11619
11620 /* The HW expects different devad if CL22 is used */
11621 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11622
11623 bnx2x_acquire_phy_lock(bp);
11624 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11625 devad, addr, &value);
11626 bnx2x_release_phy_lock(bp);
11627 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11628
11629 if (!rc)
11630 rc = value;
11631 return rc;
11632}
11633
11634/* called with rtnl_lock */
11635static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11636 u16 addr, u16 value)
11637{
11638 struct bnx2x *bp = netdev_priv(netdev);
11639 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11640 int rc;
11641
11642 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11643 " value 0x%x\n", prtad, devad, addr, value);
11644
11645 if (prtad != bp->mdio.prtad) {
11646 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11647 prtad, bp->mdio.prtad);
11648 return -EINVAL;
11649 }
11650
11651 /* The HW expects different devad if CL22 is used */
11652 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11653
11654 bnx2x_acquire_phy_lock(bp);
11655 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11656 devad, addr, value);
11657 bnx2x_release_phy_lock(bp);
11658 return rc;
11659}
11660
11661/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011662static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11663{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011664 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011665 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011666
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011667 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11668 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011669
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011670 if (!netif_running(dev))
11671 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011672
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011673 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011674}
11675
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011676/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011677static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11678{
11679 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011680 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011681
11682 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11683 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11684 return -EINVAL;
11685
11686 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011687 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011688 * only updated as part of load
11689 */
11690 dev->mtu = new_mtu;
11691
11692 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011693 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11694 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011695 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011696
11697 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011698}
11699
11700static void bnx2x_tx_timeout(struct net_device *dev)
11701{
11702 struct bnx2x *bp = netdev_priv(dev);
11703
11704#ifdef BNX2X_STOP_ON_ERROR
11705 if (!bp->panic)
11706 bnx2x_panic();
11707#endif
11708 /* This allows the netif to be shutdown gracefully before resetting */
11709 schedule_work(&bp->reset_task);
11710}
11711
11712#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011713/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011714static void bnx2x_vlan_rx_register(struct net_device *dev,
11715 struct vlan_group *vlgrp)
11716{
11717 struct bnx2x *bp = netdev_priv(dev);
11718
11719 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011720
11721 /* Set flags according to the required capabilities */
11722 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11723
11724 if (dev->features & NETIF_F_HW_VLAN_TX)
11725 bp->flags |= HW_VLAN_TX_FLAG;
11726
11727 if (dev->features & NETIF_F_HW_VLAN_RX)
11728 bp->flags |= HW_VLAN_RX_FLAG;
11729
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011730 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011731 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011732}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011733
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011734#endif
11735
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000011736#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011737static void poll_bnx2x(struct net_device *dev)
11738{
11739 struct bnx2x *bp = netdev_priv(dev);
11740
11741 disable_irq(bp->pdev->irq);
11742 bnx2x_interrupt(bp->pdev->irq, dev);
11743 enable_irq(bp->pdev->irq);
11744}
11745#endif
11746
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011747static const struct net_device_ops bnx2x_netdev_ops = {
11748 .ndo_open = bnx2x_open,
11749 .ndo_stop = bnx2x_close,
11750 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011751 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011752 .ndo_set_mac_address = bnx2x_change_mac_addr,
11753 .ndo_validate_addr = eth_validate_addr,
11754 .ndo_do_ioctl = bnx2x_ioctl,
11755 .ndo_change_mtu = bnx2x_change_mtu,
11756 .ndo_tx_timeout = bnx2x_tx_timeout,
11757#ifdef BCM_VLAN
11758 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11759#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000011760#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011761 .ndo_poll_controller = poll_bnx2x,
11762#endif
11763};
11764
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011765static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11766 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011767{
11768 struct bnx2x *bp;
11769 int rc;
11770
11771 SET_NETDEV_DEV(dev, &pdev->dev);
11772 bp = netdev_priv(dev);
11773
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011774 bp->dev = dev;
11775 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011776 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011777 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011778
11779 rc = pci_enable_device(pdev);
11780 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000011781 pr_err("Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011782 goto err_out;
11783 }
11784
11785 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Joe Perches7995c642010-02-17 15:01:52 +000011786 pr_err("Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011787 rc = -ENODEV;
11788 goto err_out_disable;
11789 }
11790
11791 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Joe Perches7995c642010-02-17 15:01:52 +000011792 pr_err("Cannot find second PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011793 rc = -ENODEV;
11794 goto err_out_disable;
11795 }
11796
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011797 if (atomic_read(&pdev->enable_cnt) == 1) {
11798 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11799 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000011800 pr_err("Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011801 goto err_out_disable;
11802 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011803
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011804 pci_set_master(pdev);
11805 pci_save_state(pdev);
11806 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011807
11808 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11809 if (bp->pm_cap == 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011810 pr_err("Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011811 rc = -EIO;
11812 goto err_out_release;
11813 }
11814
11815 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11816 if (bp->pcie_cap == 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011817 pr_err("Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011818 rc = -EIO;
11819 goto err_out_release;
11820 }
11821
Yang Hongyang6a355282009-04-06 19:01:13 -070011822 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011823 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011824 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011825 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011826 rc = -EIO;
11827 goto err_out_release;
11828 }
11829
Yang Hongyang284901a2009-04-06 19:01:15 -070011830 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011831 pr_err("System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011832 rc = -EIO;
11833 goto err_out_release;
11834 }
11835
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011836 dev->mem_start = pci_resource_start(pdev, 0);
11837 dev->base_addr = dev->mem_start;
11838 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011839
11840 dev->irq = pdev->irq;
11841
Arjan van de Ven275f1652008-10-20 21:42:39 -070011842 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011843 if (!bp->regview) {
Joe Perches7995c642010-02-17 15:01:52 +000011844 pr_err("Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011845 rc = -ENOMEM;
11846 goto err_out_release;
11847 }
11848
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011849 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11850 min_t(u64, BNX2X_DB_SIZE,
11851 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011852 if (!bp->doorbells) {
Joe Perches7995c642010-02-17 15:01:52 +000011853 pr_err("Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011854 rc = -ENOMEM;
11855 goto err_out_unmap;
11856 }
11857
11858 bnx2x_set_power_state(bp, PCI_D0);
11859
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011860 /* clean indirect addresses */
11861 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11862 PCICFG_VENDOR_ID_OFFSET);
11863 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11864 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11865 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11866 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011867
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011868 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011869
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011870 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011871 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011872 dev->features |= NETIF_F_SG;
11873 dev->features |= NETIF_F_HW_CSUM;
11874 if (bp->flags & USING_DAC_FLAG)
11875 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011876 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11877 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011878#ifdef BCM_VLAN
11879 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011880 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011881
11882 dev->vlan_features |= NETIF_F_SG;
11883 dev->vlan_features |= NETIF_F_HW_CSUM;
11884 if (bp->flags & USING_DAC_FLAG)
11885 dev->vlan_features |= NETIF_F_HIGHDMA;
11886 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11887 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011888#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011889
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011890 /* get_port_hwinfo() will set prtad and mmds properly */
11891 bp->mdio.prtad = MDIO_PRTAD_NONE;
11892 bp->mdio.mmds = 0;
11893 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11894 bp->mdio.dev = dev;
11895 bp->mdio.mdio_read = bnx2x_mdio_read;
11896 bp->mdio.mdio_write = bnx2x_mdio_write;
11897
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011898 return 0;
11899
11900err_out_unmap:
11901 if (bp->regview) {
11902 iounmap(bp->regview);
11903 bp->regview = NULL;
11904 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011905 if (bp->doorbells) {
11906 iounmap(bp->doorbells);
11907 bp->doorbells = NULL;
11908 }
11909
11910err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011911 if (atomic_read(&pdev->enable_cnt) == 1)
11912 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011913
11914err_out_disable:
11915 pci_disable_device(pdev);
11916 pci_set_drvdata(pdev, NULL);
11917
11918err_out:
11919 return rc;
11920}
11921
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011922static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11923 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080011924{
11925 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11926
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011927 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11928
11929 /* return value of 1=2.5GHz 2=5GHz */
11930 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080011931}
11932
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011933static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11934{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011935 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011936 struct bnx2x_fw_file_hdr *fw_hdr;
11937 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011938 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011939 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011940 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011941 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011942
11943 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11944 return -EINVAL;
11945
11946 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11947 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11948
11949 /* Make sure none of the offsets and sizes make us read beyond
11950 * the end of the firmware data */
11951 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11952 offset = be32_to_cpu(sections[i].offset);
11953 len = be32_to_cpu(sections[i].len);
11954 if (offset + len > firmware->size) {
Joe Perches7995c642010-02-17 15:01:52 +000011955 pr_err("Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011956 return -EINVAL;
11957 }
11958 }
11959
11960 /* Likewise for the init_ops offsets */
11961 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11962 ops_offsets = (u16 *)(firmware->data + offset);
11963 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11964
11965 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11966 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Joe Perches7995c642010-02-17 15:01:52 +000011967 pr_err("Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011968 return -EINVAL;
11969 }
11970 }
11971
11972 /* Check FW version */
11973 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11974 fw_ver = firmware->data + offset;
11975 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11976 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11977 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11978 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Joe Perches7995c642010-02-17 15:01:52 +000011979 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011980 fw_ver[0], fw_ver[1], fw_ver[2],
11981 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11982 BCM_5710_FW_MINOR_VERSION,
11983 BCM_5710_FW_REVISION_VERSION,
11984 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011985 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011986 }
11987
11988 return 0;
11989}
11990
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011991static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011992{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011993 const __be32 *source = (const __be32 *)_source;
11994 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011995 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011996
11997 for (i = 0; i < n/4; i++)
11998 target[i] = be32_to_cpu(source[i]);
11999}
12000
12001/*
12002 Ops array is stored in the following format:
12003 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12004 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012005static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012006{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012007 const __be32 *source = (const __be32 *)_source;
12008 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012009 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012010
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012011 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012012 tmp = be32_to_cpu(source[j]);
12013 target[i].op = (tmp >> 24) & 0xff;
12014 target[i].offset = tmp & 0xffffff;
12015 target[i].raw_data = be32_to_cpu(source[j+1]);
12016 }
12017}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012018
12019static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012020{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012021 const __be16 *source = (const __be16 *)_source;
12022 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012023 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012024
12025 for (i = 0; i < n/2; i++)
12026 target[i] = be16_to_cpu(source[i]);
12027}
12028
Joe Perches7995c642010-02-17 15:01:52 +000012029#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12030do { \
12031 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12032 bp->arr = kmalloc(len, GFP_KERNEL); \
12033 if (!bp->arr) { \
12034 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12035 goto lbl; \
12036 } \
12037 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12038 (u8 *)bp->arr, len); \
12039} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012040
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012041static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12042{
Ben Hutchings45229b42009-11-07 11:53:39 +000012043 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012044 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000012045 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012046
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012047 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000012048 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012049 else
Ben Hutchings45229b42009-11-07 11:53:39 +000012050 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012051
Joe Perches7995c642010-02-17 15:01:52 +000012052 pr_info("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012053
12054 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12055 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012056 pr_err("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012057 goto request_firmware_exit;
12058 }
12059
12060 rc = bnx2x_check_firmware(bp);
12061 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012062 pr_err("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012063 goto request_firmware_exit;
12064 }
12065
12066 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12067
12068 /* Initialize the pointers to the init arrays */
12069 /* Blob */
12070 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12071
12072 /* Opcodes */
12073 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12074
12075 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012076 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12077 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012078
12079 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000012080 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12081 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12082 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12083 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12084 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12085 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12086 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12087 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12088 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12089 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12090 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12091 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12092 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12093 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12094 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12095 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012096
12097 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012098
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012099init_offsets_alloc_err:
12100 kfree(bp->init_ops);
12101init_ops_alloc_err:
12102 kfree(bp->init_data);
12103request_firmware_exit:
12104 release_firmware(bp->firmware);
12105
12106 return rc;
12107}
12108
12109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012110static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12111 const struct pci_device_id *ent)
12112{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012113 struct net_device *dev = NULL;
12114 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012115 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080012116 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012117
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012118 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012119 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012120 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012121 pr_err("Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012122 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012123 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012124
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012125 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +000012126 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012127
Eilon Greensteindf4770de2009-08-12 08:23:28 +000012128 pci_set_drvdata(pdev, dev);
12129
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012130 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012131 if (rc < 0) {
12132 free_netdev(dev);
12133 return rc;
12134 }
12135
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012136 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012137 if (rc)
12138 goto init_one_exit;
12139
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012140 /* Set init arrays */
12141 rc = bnx2x_init_firmware(bp, &pdev->dev);
12142 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012143 pr_err("Error loading firmware\n");
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012144 goto init_one_exit;
12145 }
12146
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012147 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012148 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012149 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012150 goto init_one_exit;
12151 }
12152
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012153 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Joe Perches7995c642010-02-17 15:01:52 +000012154 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12155 board_info[ent->driver_data].name,
12156 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12157 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12158 dev->base_addr, bp->pdev->irq, dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000012159
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012160 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012161
12162init_one_exit:
12163 if (bp->regview)
12164 iounmap(bp->regview);
12165
12166 if (bp->doorbells)
12167 iounmap(bp->doorbells);
12168
12169 free_netdev(dev);
12170
12171 if (atomic_read(&pdev->enable_cnt) == 1)
12172 pci_release_regions(pdev);
12173
12174 pci_disable_device(pdev);
12175 pci_set_drvdata(pdev, NULL);
12176
12177 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012178}
12179
12180static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12181{
12182 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012183 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012184
Eliezer Tamir228241e2008-02-28 11:56:57 -080012185 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012186 pr_err("BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080012187 return;
12188 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012189 bp = netdev_priv(dev);
12190
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012191 unregister_netdev(dev);
12192
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012193 kfree(bp->init_ops_offsets);
12194 kfree(bp->init_ops);
12195 kfree(bp->init_data);
12196 release_firmware(bp->firmware);
12197
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012198 if (bp->regview)
12199 iounmap(bp->regview);
12200
12201 if (bp->doorbells)
12202 iounmap(bp->doorbells);
12203
12204 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012205
12206 if (atomic_read(&pdev->enable_cnt) == 1)
12207 pci_release_regions(pdev);
12208
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012209 pci_disable_device(pdev);
12210 pci_set_drvdata(pdev, NULL);
12211}
12212
12213static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12214{
12215 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012216 struct bnx2x *bp;
12217
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012218 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012219 pr_err("BAD net device from bnx2x_init_one\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012220 return -ENODEV;
12221 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012222 bp = netdev_priv(dev);
12223
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012224 rtnl_lock();
12225
12226 pci_save_state(pdev);
12227
12228 if (!netif_running(dev)) {
12229 rtnl_unlock();
12230 return 0;
12231 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012232
12233 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012234
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012235 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012236
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012237 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080012238
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012239 rtnl_unlock();
12240
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012241 return 0;
12242}
12243
12244static int bnx2x_resume(struct pci_dev *pdev)
12245{
12246 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012247 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012248 int rc;
12249
Eliezer Tamir228241e2008-02-28 11:56:57 -080012250 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012251 pr_err("BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080012252 return -ENODEV;
12253 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012254 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012255
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012256 rtnl_lock();
12257
Eliezer Tamir228241e2008-02-28 11:56:57 -080012258 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012259
12260 if (!netif_running(dev)) {
12261 rtnl_unlock();
12262 return 0;
12263 }
12264
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012265 bnx2x_set_power_state(bp, PCI_D0);
12266 netif_device_attach(dev);
12267
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012268 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012269
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012270 rtnl_unlock();
12271
12272 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012273}
12274
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012275static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12276{
12277 int i;
12278
12279 bp->state = BNX2X_STATE_ERROR;
12280
12281 bp->rx_mode = BNX2X_RX_MODE_NONE;
12282
12283 bnx2x_netif_stop(bp, 0);
12284
12285 del_timer_sync(&bp->timer);
12286 bp->stats_state = STATS_STATE_DISABLED;
12287 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12288
12289 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +000012290 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012291
12292 if (CHIP_IS_E1(bp)) {
12293 struct mac_configuration_cmd *config =
12294 bnx2x_sp(bp, mcast_config);
12295
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012296 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012297 CAM_INVALIDATE(config->config_table[i]);
12298 }
12299
12300 /* Free SKBs, SGEs, TPA pool and driver internals */
12301 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012302 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012303 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012304 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012305 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012306 bnx2x_free_mem(bp);
12307
12308 bp->state = BNX2X_STATE_CLOSED;
12309
12310 netif_carrier_off(bp->dev);
12311
12312 return 0;
12313}
12314
12315static void bnx2x_eeh_recover(struct bnx2x *bp)
12316{
12317 u32 val;
12318
12319 mutex_init(&bp->port.phy_mutex);
12320
12321 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12322 bp->link_params.shmem_base = bp->common.shmem_base;
12323 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12324
12325 if (!bp->common.shmem_base ||
12326 (bp->common.shmem_base < 0xA0000) ||
12327 (bp->common.shmem_base >= 0xC0000)) {
12328 BNX2X_DEV_INFO("MCP not active\n");
12329 bp->flags |= NO_MCP_FLAG;
12330 return;
12331 }
12332
12333 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12334 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12335 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12336 BNX2X_ERR("BAD MCP validity signature\n");
12337
12338 if (!BP_NOMCP(bp)) {
12339 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12340 & DRV_MSG_SEQ_NUMBER_MASK);
12341 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12342 }
12343}
12344
Wendy Xiong493adb12008-06-23 20:36:22 -070012345/**
12346 * bnx2x_io_error_detected - called when PCI error is detected
12347 * @pdev: Pointer to PCI device
12348 * @state: The current pci connection state
12349 *
12350 * This function is called after a PCI bus error affecting
12351 * this device has been detected.
12352 */
12353static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12354 pci_channel_state_t state)
12355{
12356 struct net_device *dev = pci_get_drvdata(pdev);
12357 struct bnx2x *bp = netdev_priv(dev);
12358
12359 rtnl_lock();
12360
12361 netif_device_detach(dev);
12362
Dean Nelson07ce50e2009-07-31 09:13:25 +000012363 if (state == pci_channel_io_perm_failure) {
12364 rtnl_unlock();
12365 return PCI_ERS_RESULT_DISCONNECT;
12366 }
12367
Wendy Xiong493adb12008-06-23 20:36:22 -070012368 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012369 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012370
12371 pci_disable_device(pdev);
12372
12373 rtnl_unlock();
12374
12375 /* Request a slot reset */
12376 return PCI_ERS_RESULT_NEED_RESET;
12377}
12378
12379/**
12380 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12381 * @pdev: Pointer to PCI device
12382 *
12383 * Restart the card from scratch, as if from a cold-boot.
12384 */
12385static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12386{
12387 struct net_device *dev = pci_get_drvdata(pdev);
12388 struct bnx2x *bp = netdev_priv(dev);
12389
12390 rtnl_lock();
12391
12392 if (pci_enable_device(pdev)) {
12393 dev_err(&pdev->dev,
12394 "Cannot re-enable PCI device after reset\n");
12395 rtnl_unlock();
12396 return PCI_ERS_RESULT_DISCONNECT;
12397 }
12398
12399 pci_set_master(pdev);
12400 pci_restore_state(pdev);
12401
12402 if (netif_running(dev))
12403 bnx2x_set_power_state(bp, PCI_D0);
12404
12405 rtnl_unlock();
12406
12407 return PCI_ERS_RESULT_RECOVERED;
12408}
12409
12410/**
12411 * bnx2x_io_resume - called when traffic can start flowing again
12412 * @pdev: Pointer to PCI device
12413 *
12414 * This callback is called when the error recovery driver tells us that
12415 * its OK to resume normal operation.
12416 */
12417static void bnx2x_io_resume(struct pci_dev *pdev)
12418{
12419 struct net_device *dev = pci_get_drvdata(pdev);
12420 struct bnx2x *bp = netdev_priv(dev);
12421
12422 rtnl_lock();
12423
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012424 bnx2x_eeh_recover(bp);
12425
Wendy Xiong493adb12008-06-23 20:36:22 -070012426 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012427 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012428
12429 netif_device_attach(dev);
12430
12431 rtnl_unlock();
12432}
12433
12434static struct pci_error_handlers bnx2x_err_handler = {
12435 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012436 .slot_reset = bnx2x_io_slot_reset,
12437 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012438};
12439
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012440static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012441 .name = DRV_MODULE_NAME,
12442 .id_table = bnx2x_pci_tbl,
12443 .probe = bnx2x_init_one,
12444 .remove = __devexit_p(bnx2x_remove_one),
12445 .suspend = bnx2x_suspend,
12446 .resume = bnx2x_resume,
12447 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012448};
12449
12450static int __init bnx2x_init(void)
12451{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012452 int ret;
12453
Joe Perches7995c642010-02-17 15:01:52 +000012454 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +000012455
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012456 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12457 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +000012458 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012459 return -ENOMEM;
12460 }
12461
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012462 ret = pci_register_driver(&bnx2x_pci_driver);
12463 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +000012464 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012465 destroy_workqueue(bnx2x_wq);
12466 }
12467 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012468}
12469
12470static void __exit bnx2x_cleanup(void)
12471{
12472 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012473
12474 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012475}
12476
12477module_init(bnx2x_init);
12478module_exit(bnx2x_cleanup);
12479
Michael Chan993ac7b2009-10-10 13:46:56 +000012480#ifdef BCM_CNIC
12481
12482/* count denotes the number of new completions we have seen */
12483static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12484{
12485 struct eth_spe *spe;
12486
12487#ifdef BNX2X_STOP_ON_ERROR
12488 if (unlikely(bp->panic))
12489 return;
12490#endif
12491
12492 spin_lock_bh(&bp->spq_lock);
12493 bp->cnic_spq_pending -= count;
12494
12495 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12496 bp->cnic_spq_pending++) {
12497
12498 if (!bp->cnic_kwq_pending)
12499 break;
12500
12501 spe = bnx2x_sp_get_next(bp);
12502 *spe = *bp->cnic_kwq_cons;
12503
12504 bp->cnic_kwq_pending--;
12505
12506 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12507 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12508
12509 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12510 bp->cnic_kwq_cons = bp->cnic_kwq;
12511 else
12512 bp->cnic_kwq_cons++;
12513 }
12514 bnx2x_sp_prod_update(bp);
12515 spin_unlock_bh(&bp->spq_lock);
12516}
12517
12518static int bnx2x_cnic_sp_queue(struct net_device *dev,
12519 struct kwqe_16 *kwqes[], u32 count)
12520{
12521 struct bnx2x *bp = netdev_priv(dev);
12522 int i;
12523
12524#ifdef BNX2X_STOP_ON_ERROR
12525 if (unlikely(bp->panic))
12526 return -EIO;
12527#endif
12528
12529 spin_lock_bh(&bp->spq_lock);
12530
12531 for (i = 0; i < count; i++) {
12532 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12533
12534 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12535 break;
12536
12537 *bp->cnic_kwq_prod = *spe;
12538
12539 bp->cnic_kwq_pending++;
12540
12541 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12542 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12543 spe->data.mac_config_addr.hi,
12544 spe->data.mac_config_addr.lo,
12545 bp->cnic_kwq_pending);
12546
12547 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12548 bp->cnic_kwq_prod = bp->cnic_kwq;
12549 else
12550 bp->cnic_kwq_prod++;
12551 }
12552
12553 spin_unlock_bh(&bp->spq_lock);
12554
12555 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12556 bnx2x_cnic_sp_post(bp, 0);
12557
12558 return i;
12559}
12560
12561static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12562{
12563 struct cnic_ops *c_ops;
12564 int rc = 0;
12565
12566 mutex_lock(&bp->cnic_mutex);
12567 c_ops = bp->cnic_ops;
12568 if (c_ops)
12569 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12570 mutex_unlock(&bp->cnic_mutex);
12571
12572 return rc;
12573}
12574
12575static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12576{
12577 struct cnic_ops *c_ops;
12578 int rc = 0;
12579
12580 rcu_read_lock();
12581 c_ops = rcu_dereference(bp->cnic_ops);
12582 if (c_ops)
12583 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12584 rcu_read_unlock();
12585
12586 return rc;
12587}
12588
12589/*
12590 * for commands that have no data
12591 */
12592static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12593{
12594 struct cnic_ctl_info ctl = {0};
12595
12596 ctl.cmd = cmd;
12597
12598 return bnx2x_cnic_ctl_send(bp, &ctl);
12599}
12600
12601static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12602{
12603 struct cnic_ctl_info ctl;
12604
12605 /* first we tell CNIC and only then we count this as a completion */
12606 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12607 ctl.data.comp.cid = cid;
12608
12609 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12610 bnx2x_cnic_sp_post(bp, 1);
12611}
12612
12613static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12614{
12615 struct bnx2x *bp = netdev_priv(dev);
12616 int rc = 0;
12617
12618 switch (ctl->cmd) {
12619 case DRV_CTL_CTXTBL_WR_CMD: {
12620 u32 index = ctl->data.io.offset;
12621 dma_addr_t addr = ctl->data.io.dma_addr;
12622
12623 bnx2x_ilt_wr(bp, index, addr);
12624 break;
12625 }
12626
12627 case DRV_CTL_COMPLETION_CMD: {
12628 int count = ctl->data.comp.comp_count;
12629
12630 bnx2x_cnic_sp_post(bp, count);
12631 break;
12632 }
12633
12634 /* rtnl_lock is held. */
12635 case DRV_CTL_START_L2_CMD: {
12636 u32 cli = ctl->data.ring.client_id;
12637
12638 bp->rx_mode_cl_mask |= (1 << cli);
12639 bnx2x_set_storm_rx_mode(bp);
12640 break;
12641 }
12642
12643 /* rtnl_lock is held. */
12644 case DRV_CTL_STOP_L2_CMD: {
12645 u32 cli = ctl->data.ring.client_id;
12646
12647 bp->rx_mode_cl_mask &= ~(1 << cli);
12648 bnx2x_set_storm_rx_mode(bp);
12649 break;
12650 }
12651
12652 default:
12653 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12654 rc = -EINVAL;
12655 }
12656
12657 return rc;
12658}
12659
12660static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12661{
12662 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12663
12664 if (bp->flags & USING_MSIX_FLAG) {
12665 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12666 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12667 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12668 } else {
12669 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12670 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12671 }
12672 cp->irq_arr[0].status_blk = bp->cnic_sb;
12673 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12674 cp->irq_arr[1].status_blk = bp->def_status_blk;
12675 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12676
12677 cp->num_irq = 2;
12678}
12679
12680static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12681 void *data)
12682{
12683 struct bnx2x *bp = netdev_priv(dev);
12684 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12685
12686 if (ops == NULL)
12687 return -EINVAL;
12688
12689 if (atomic_read(&bp->intr_sem) != 0)
12690 return -EBUSY;
12691
12692 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12693 if (!bp->cnic_kwq)
12694 return -ENOMEM;
12695
12696 bp->cnic_kwq_cons = bp->cnic_kwq;
12697 bp->cnic_kwq_prod = bp->cnic_kwq;
12698 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12699
12700 bp->cnic_spq_pending = 0;
12701 bp->cnic_kwq_pending = 0;
12702
12703 bp->cnic_data = data;
12704
12705 cp->num_irq = 0;
12706 cp->drv_state = CNIC_DRV_STATE_REGD;
12707
12708 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12709
12710 bnx2x_setup_cnic_irq_info(bp);
12711 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12712 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12713 rcu_assign_pointer(bp->cnic_ops, ops);
12714
12715 return 0;
12716}
12717
12718static int bnx2x_unregister_cnic(struct net_device *dev)
12719{
12720 struct bnx2x *bp = netdev_priv(dev);
12721 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12722
12723 mutex_lock(&bp->cnic_mutex);
12724 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12725 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12726 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12727 }
12728 cp->drv_state = 0;
12729 rcu_assign_pointer(bp->cnic_ops, NULL);
12730 mutex_unlock(&bp->cnic_mutex);
12731 synchronize_rcu();
12732 kfree(bp->cnic_kwq);
12733 bp->cnic_kwq = NULL;
12734
12735 return 0;
12736}
12737
12738struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12739{
12740 struct bnx2x *bp = netdev_priv(dev);
12741 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12742
12743 cp->drv_owner = THIS_MODULE;
12744 cp->chip_id = CHIP_ID(bp);
12745 cp->pdev = bp->pdev;
12746 cp->io_base = bp->regview;
12747 cp->io_base2 = bp->doorbells;
12748 cp->max_kwqe_pending = 8;
12749 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12750 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12751 cp->ctx_tbl_len = CNIC_ILT_LINES;
12752 cp->starting_cid = BCM_CNIC_CID_START;
12753 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12754 cp->drv_ctl = bnx2x_drv_ctl;
12755 cp->drv_register_cnic = bnx2x_register_cnic;
12756 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12757
12758 return cp;
12759}
12760EXPORT_SYMBOL(bnx2x_cnic_probe);
12761
12762#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012763