blob: 0210dde760d94f01c79a4676d6ee543ed8c0f12f [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000054#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000058#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020060
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070061#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000064#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020085
Eilon Greenstein555f6c72009-02-12 08:36:11 +000086static int multi_mode = 1;
87module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070088MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000091static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000095
Eilon Greenstein19680c42008-08-13 15:47:33 -070096static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070097module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000098MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +000099
100static int int_mode;
101module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000104
Eilon Greensteina18f5122009-08-12 08:23:26 +0000105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
Eilon Greenstein9898f862009-02-12 08:38:27 +0000109static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200110module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
Eilon Greenstein9898f862009-02-12 08:38:27 +0000117static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200118module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800121static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200122
123enum bnx2x_board_type {
124 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700125 BCM57711 = 1,
126 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200127};
128
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800130static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131 char *name;
132} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200136};
137
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700138
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200174
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000175const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200200{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000201 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000214 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200215
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200219#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200221#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000222 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200223#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200234
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200245
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000246 mutex_lock(&bp->dmae_mutex);
247
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 *wb_comp = 0;
249
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200251
252 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700257 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000258 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200259 break;
260 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700261 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200267 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700268
269 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200270}
271
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200273{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000274 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000289 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200290
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200294#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200296#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000297 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200298#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200309
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200317
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200321 *wb_comp = 0;
322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200324
325 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700329 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000330 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200331 break;
332 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700333 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200339 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700343
344 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200345}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346
Eilon Greenstein573f2032009-08-12 08:24:14 +0000347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000351 int offset = 0;
352
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000353 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000354 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
371}
372
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200386 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200389
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200395
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200398
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200415 }
416 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
499 }
500 }
501
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200502 return rc;
503}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800504
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000507 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200508 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000509 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200510 int word;
511
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000520 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200521
Joe Perches7995c642010-02-17 15:01:52 +0000522 pr_err("");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200524 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000525 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000527 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200528 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200530 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000531 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200532 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000533 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200534 }
Joe Perches7995c642010-02-17 15:01:52 +0000535 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200536}
537
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000538void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200539{
540 int i;
541 u16 j, start, end;
542
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200546 BNX2X_ERR("begin crash dump -----------------\n");
547
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000548 /* Indices */
549 /* Common */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000557 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000558 struct bnx2x_fastpath *fp = &bp->fp[i];
559
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000563 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
572
573 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000574 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200576
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700584 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700585 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200587
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000588 /* Rings */
589 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000590 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000591 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000595 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200601 }
602
Eilon Greenstein3196a882008-08-13 15:58:49 -0700603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000605 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700611 }
612
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200620 }
621 }
622
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000624 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000643 }
644 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700646 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200649}
650
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000651void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700653 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200658
659 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800674
Eilon Greenstein8badd272009-02-12 08:36:15 +0000675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800677
678 REG_WR(bp, addr, val);
679
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
Eilon Greenstein8badd272009-02-12 08:36:15 +0000683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200685
686 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700697 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200709}
710
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800711static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700713 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
Eilon Greenstein8badd272009-02-12 08:36:15 +0000725 /* flush all outstanding writes */
726 mmiowb();
727
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200734{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000736 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700738 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200739 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745
746 /* make sure all ISRs are done */
747 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000750#ifdef BCM_CNIC
751 offset++;
752#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000754 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200761}
762
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700763/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200764
765/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700766 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200767 */
768
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +0000769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -0700784 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +0000785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200803
Michael Chan993ac7b2009-10-10 13:46:56 +0000804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700807
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700815 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000817 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700818 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200819
820 bp->spq_left++;
821
Eilon Greenstein0626b892009-02-12 08:38:14 +0000822 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700838 BNX2X_ERR("unexpected MC reply (%d) "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700841 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200842 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700843 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200844 return;
845 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800846
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200862 break;
863
Michael Chan993ac7b2009-10-10 13:46:56 +0000864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700870
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +0000874 bp->set_mac_pending--;
875 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200876 break;
877
Eliezer Tamir49d66772008-02-28 11:53:13 -0800878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +0000880 bp->set_mac_pending--;
881 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -0800882 break;
883
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200884 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700887 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700889 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890}
891
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200893{
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000894 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700896 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -0700897 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200898
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700899 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
Eilon Greensteinf5372252009-02-12 08:38:30 +0000904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200905
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700906 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
Eilon Greenstein3196a882008-08-13 15:58:49 -0700912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
Eilon Greensteinca003922009-08-12 22:53:28 -0700917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200919
Eilon Greensteinca003922009-08-12 22:53:28 -0700920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -0700930 status &= ~mask;
931 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932 }
933
Michael Chan993ac7b2009-10-10 13:46:56 +0000934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200948
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700949 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700959 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960
961 return IRQ_HANDLED;
962}
963
964/* end of fast path */
965
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700966
967/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968
969/*
970 * General service functions
971 */
972
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -0800974{
Eliezer Tamirf1410642008-02-28 11:51:50 -0800975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -0700977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700979 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -0800980
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
988
Yitchak Gertner4a37fb62008-08-13 15:50:23 -0700989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
Eliezer Tamirf1410642008-02-28 11:51:50 -0800996 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -0700997 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -0800998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
1003
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001006 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001009 if (lock_status & resource_bit)
1010 return 0;
1011
1012 msleep(5);
1013 }
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
1017
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001024
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
Eliezer Tamirf1410642008-02-28 11:51:50 -08001027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
Eliezer Tamirf1410642008-02-28 11:51:50 -08001042 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001043 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
1048 }
1049
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001050 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001051 return 0;
1052}
1053
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001054
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
1094
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
1099
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
1112
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
1120
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
1127
1128 default:
1129 break;
1130 }
1131
1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001134
1135 return 0;
1136}
1137
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
Eliezer Tamirf1410642008-02-28 11:51:50 -08001184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185{
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
1188
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
1193 }
1194
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
1206
Eilon Greenstein6378c022008-08-13 15:59:25 -07001207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
1213
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
1219
1220 default:
1221 break;
1222 }
1223
1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001226
1227 return 0;
1228}
1229
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001230void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001231{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001236 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001237 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001238
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001241 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001242 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001243
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001245 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001246 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001247
Eliezer Tamirf1410642008-02-28 11:51:50 -08001248 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001250 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001251 break;
1252 }
1253}
1254
Eilon Greenstein2691d512009-08-12 08:22:08 +00001255
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001257{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001258 if (!BP_NOMCP(bp)) {
1259 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001260
Eilon Greenstein19680c42008-08-13 15:47:33 -07001261 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00001264 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08001265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001266 else
David S. Millerc0700f92008-12-16 23:53:20 -08001267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001268
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001269 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001270
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
Eilon Greenstein19680c42008-08-13 15:47:33 -07001274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001275
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001276 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001277
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001278 bnx2x_calc_fc_adv(bp);
1279
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001282 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001283 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001284
Eilon Greenstein19680c42008-08-13 15:47:33 -07001285 return rc;
1286 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001288 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001289}
1290
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001291void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001292{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001293 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001294 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001295 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001296 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001297 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001298
Eilon Greenstein19680c42008-08-13 15:47:33 -07001299 bnx2x_calc_fc_adv(bp);
1300 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001301 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001302}
1303
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001304static void bnx2x__link_reset(struct bnx2x *bp)
1305{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001306 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001307 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001308 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001309 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001310 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001311 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001312}
1313
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001314u8 bnx2x_link_test(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001315{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001316 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001317
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001318 if (!BP_NOMCP(bp)) {
1319 bnx2x_acquire_phy_lock(bp);
1320 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321 bnx2x_release_phy_lock(bp);
1322 } else
1323 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001324
1325 return rc;
1326}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001327
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001328static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001329{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001330 u32 r_param = bp->link_vars.line_speed / 8;
1331 u32 fair_periodic_timeout_usec;
1332 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001333
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001334 memset(&(bp->cmng.rs_vars), 0,
1335 sizeof(struct rate_shaping_vars_per_port));
1336 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001337
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001338 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001340
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001341 /* this is the threshold below which no timer arming will occur
1342 1.25 coefficient is for the threshold to be a little bigger
1343 than the real time, to compensate for timer in-accuracy */
1344 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001345 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1346
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001347 /* resolution of fairness timer */
1348 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001351
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001352 /* this is the threshold below which we won't arm the timer anymore */
1353 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001354
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001355 /* we multiply by 1e3/8 to get bytes/msec.
1356 We don't want the credits to pass a credit
1357 of the t_fair*FAIR_MEM (algorithm resolution) */
1358 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359 /* since each tick is 4 usec */
1360 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001361}
1362
Eilon Greenstein2691d512009-08-12 08:22:08 +00001363/* Calculates the sum of vn_min_rates.
1364 It's needed for further normalizing of the min_rates.
1365 Returns:
1366 sum of vn_min_rates.
1367 or
1368 0 - if all the min_rates are 0.
1369 In the later case fainess algorithm should be deactivated.
1370 If not all min_rates are zero then those that are zeroes will be set to 1.
1371 */
1372static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1373{
1374 int all_zero = 1;
1375 int port = BP_PORT(bp);
1376 int vn;
1377
1378 bp->vn_weight_sum = 0;
1379 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380 int func = 2*vn + port;
1381 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1384
1385 /* Skip hidden vns */
1386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1387 continue;
1388
1389 /* If min rate is zero - set it to 1 */
1390 if (!vn_min_rate)
1391 vn_min_rate = DEF_MIN_RATE;
1392 else
1393 all_zero = 0;
1394
1395 bp->vn_weight_sum += vn_min_rate;
1396 }
1397
1398 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001399 if (all_zero) {
1400 bp->cmng.flags.cmng_enables &=
1401 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403 " fairness will be disabled\n");
1404 } else
1405 bp->cmng.flags.cmng_enables |=
1406 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001407}
1408
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001409static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001410{
1411 struct rate_shaping_vars_per_vn m_rs_vn;
1412 struct fairness_vars_per_vn m_fair_vn;
1413 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414 u16 vn_min_rate, vn_max_rate;
1415 int i;
1416
1417 /* If function is hidden - set min and max to zeroes */
1418 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1419 vn_min_rate = 0;
1420 vn_max_rate = 0;
1421
1422 } else {
1423 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001425 /* If min rate is zero - set it to 1 */
1426 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001427 vn_min_rate = DEF_MIN_RATE;
1428 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1430 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001431 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001432 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001433 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001434
1435 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1437
1438 /* global vn counter - maximal Mbps for this vn */
1439 m_rs_vn.vn_counter.rate = vn_max_rate;
1440
1441 /* quota - number of bytes transmitted in this period */
1442 m_rs_vn.vn_counter.quota =
1443 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1444
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001445 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001446 /* credit for each period of the fairness algorithm:
1447 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001448 vn_weight_sum should not be larger than 10000, thus
1449 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001451 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001452 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453 (8 * bp->vn_weight_sum))),
1454 (bp->cmng.fair_vars.fair_threshold * 2));
1455 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001456 m_fair_vn.vn_credit_delta);
1457 }
1458
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001459 /* Store it to internal memory */
1460 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463 ((u32 *)(&m_rs_vn))[i]);
1464
1465 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468 ((u32 *)(&m_fair_vn))[i]);
1469}
1470
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001471
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001472/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001473static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001474{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00001475 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001476 /* Make sure that we are synced with the current statistics */
1477 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1478
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001479 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001480
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001481 if (bp->link_vars.link_up) {
1482
Eilon Greenstein1c063282009-02-12 08:36:43 +00001483 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00001484 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00001485 int port = BP_PORT(bp);
1486 u32 pause_enabled = 0;
1487
1488 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1489 pause_enabled = 1;
1490
1491 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07001492 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00001493 pause_enabled);
1494 }
1495
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001496 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497 struct host_port_stats *pstats;
1498
1499 pstats = bnx2x_sp(bp, port_stats);
1500 /* reset old bmac stats */
1501 memset(&(pstats->mac_stx[0]), 0,
1502 sizeof(struct mac_stx));
1503 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001504 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001505 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1506 }
1507
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00001508 /* indicate link status only if link status actually changed */
1509 if (prev_link_status != bp->link_vars.link_status)
1510 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001511
1512 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001513 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001514 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001515 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001516
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001517 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001518 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519 if (vn == BP_E1HVN(bp))
1520 continue;
1521
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001522 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1525 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001526
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001527 if (bp->link_vars.link_up) {
1528 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001529
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001530 /* Init rate shaping and fairness contexts */
1531 bnx2x_init_port_minmax(bp);
1532
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001533 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001534 bnx2x_init_vn_minmax(bp, 2*vn + port);
1535
1536 /* Store it to internal memory */
1537 for (i = 0;
1538 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541 ((u32 *)(&bp->cmng))[i]);
1542 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001543 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544}
1545
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001546void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001548 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001549 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001550
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001551 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1552
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001553 if (bp->link_vars.link_up)
1554 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1555 else
1556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1557
Eilon Greenstein2691d512009-08-12 08:22:08 +00001558 bnx2x_calc_vn_weight_sum(bp);
1559
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001560 /* indicate link status */
1561 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001562}
1563
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001564static void bnx2x_pmf_update(struct bnx2x *bp)
1565{
1566 int port = BP_PORT(bp);
1567 u32 val;
1568
1569 bp->port.pmf = 1;
1570 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1571
1572 /* enable nig attention */
1573 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001576
1577 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001578}
1579
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001580/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001581
1582/* slow path */
1583
1584/*
1585 * General service functions
1586 */
1587
Eilon Greenstein2691d512009-08-12 08:22:08 +00001588/* send the MCP a request, block until there is a reply */
1589u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1590{
1591 int func = BP_FUNC(bp);
1592 u32 seq = ++bp->fw_seq;
1593 u32 rc = 0;
1594 u32 cnt = 1;
1595 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1596
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07001597 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001598 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1600
1601 do {
1602 /* let the FW do it's magic ... */
1603 msleep(delay);
1604
1605 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1606
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07001607 /* Give the FW up to 5 second (500*10ms) */
1608 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00001609
1610 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611 cnt*delay, rc, seq);
1612
1613 /* is this a reply to our command? */
1614 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615 rc &= FW_MSG_CODE_MASK;
1616 else {
1617 /* FW BUG! */
1618 BNX2X_ERR("FW failed to respond!\n");
1619 bnx2x_fw_dump(bp);
1620 rc = 0;
1621 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07001622 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001623
1624 return rc;
1625}
1626
Eilon Greenstein2691d512009-08-12 08:22:08 +00001627static void bnx2x_e1h_disable(struct bnx2x *bp)
1628{
1629 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001630
1631 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001632
1633 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1634
Eilon Greenstein2691d512009-08-12 08:22:08 +00001635 netif_carrier_off(bp->dev);
1636}
1637
1638static void bnx2x_e1h_enable(struct bnx2x *bp)
1639{
1640 int port = BP_PORT(bp);
1641
1642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1643
Eilon Greenstein2691d512009-08-12 08:22:08 +00001644 /* Tx queue should be only reenabled */
1645 netif_tx_wake_all_queues(bp->dev);
1646
Eilon Greenstein061bc702009-10-15 00:18:47 -07001647 /*
1648 * Should not call netif_carrier_on since it will be called if the link
1649 * is up when checking for link state
1650 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00001651}
1652
1653static void bnx2x_update_min_max(struct bnx2x *bp)
1654{
1655 int port = BP_PORT(bp);
1656 int vn, i;
1657
1658 /* Init rate shaping and fairness contexts */
1659 bnx2x_init_port_minmax(bp);
1660
1661 bnx2x_calc_vn_weight_sum(bp);
1662
1663 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664 bnx2x_init_vn_minmax(bp, 2*vn + port);
1665
1666 if (bp->port.pmf) {
1667 int func;
1668
1669 /* Set the attention towards other drivers on the same port */
1670 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671 if (vn == BP_E1HVN(bp))
1672 continue;
1673
1674 func = ((vn << 1) | port);
1675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1677 }
1678
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683 ((u32 *)(&bp->cmng))[i]);
1684 }
1685}
1686
1687static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1688{
Eilon Greenstein2691d512009-08-12 08:22:08 +00001689 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001690
1691 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1692
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001693 /*
1694 * This is the only place besides the function initialization
1695 * where the bp->flags can change so it is done without any
1696 * locks
1697 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00001698 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001700 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001701
1702 bnx2x_e1h_disable(bp);
1703 } else {
1704 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001705 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001706
1707 bnx2x_e1h_enable(bp);
1708 }
1709 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1710 }
1711 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1712
1713 bnx2x_update_min_max(bp);
1714 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1715 }
1716
1717 /* Report results to MCP */
1718 if (dcc_event)
1719 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1720 else
1721 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1722}
1723
Michael Chan28912902009-10-10 13:46:53 +00001724/* must be called under the spq lock */
1725static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1726{
1727 struct eth_spe *next_spe = bp->spq_prod_bd;
1728
1729 if (bp->spq_prod_bd == bp->spq_last_bd) {
1730 bp->spq_prod_bd = bp->spq;
1731 bp->spq_prod_idx = 0;
1732 DP(NETIF_MSG_TIMER, "end of spq\n");
1733 } else {
1734 bp->spq_prod_bd++;
1735 bp->spq_prod_idx++;
1736 }
1737 return next_spe;
1738}
1739
1740/* must be called under the spq lock */
1741static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1742{
1743 int func = BP_FUNC(bp);
1744
1745 /* Make sure that BD data is updated before writing the producer */
1746 wmb();
1747
1748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1749 bp->spq_prod_idx);
1750 mmiowb();
1751}
1752
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001753/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001754int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001755 u32 data_hi, u32 data_lo, int common)
1756{
Michael Chan28912902009-10-10 13:46:53 +00001757 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001758
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001759#ifdef BNX2X_STOP_ON_ERROR
1760 if (unlikely(bp->panic))
1761 return -EIO;
1762#endif
1763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001764 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765
1766 if (!bp->spq_left) {
1767 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001768 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001769 bnx2x_panic();
1770 return -EBUSY;
1771 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08001772
Michael Chan28912902009-10-10 13:46:53 +00001773 spe = bnx2x_sp_get_next(bp);
1774
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001775 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00001776 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001777 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1778 HW_CID(bp, cid));
Michael Chan28912902009-10-10 13:46:53 +00001779 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001780 if (common)
Michael Chan28912902009-10-10 13:46:53 +00001781 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001782 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1783
Michael Chan28912902009-10-10 13:46:53 +00001784 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001786
1787 bp->spq_left--;
1788
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792 (u32)(U64_LO(bp->spq_mapping) +
1793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1795
Michael Chan28912902009-10-10 13:46:53 +00001796 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001797 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001798 return 0;
1799}
1800
1801/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001802static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001803{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001804 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001805 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001806
1807 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001808 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001809 val = (1UL << 31);
1810 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812 if (val & (1L << 31))
1813 break;
1814
1815 msleep(5);
1816 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001817 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07001818 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819 rc = -EBUSY;
1820 }
1821
1822 return rc;
1823}
1824
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001825/* release split MCP access lock register */
1826static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001827{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001828 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001829}
1830
1831static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1832{
1833 struct host_def_status_block *def_sb = bp->def_status_blk;
1834 u16 rc = 0;
1835
1836 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001837 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1839 rc |= 1;
1840 }
1841 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1843 rc |= 2;
1844 }
1845 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1847 rc |= 4;
1848 }
1849 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1851 rc |= 8;
1852 }
1853 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1855 rc |= 16;
1856 }
1857 return rc;
1858}
1859
1860/*
1861 * slow path service functions
1862 */
1863
1864static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1865{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001866 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07001867 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001869 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001871 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07001873 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00001874 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001875
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001876 if (bp->attn_state & asserted)
1877 BNX2X_ERR("IGU ERROR\n");
1878
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07001879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880 aeu_mask = REG_RD(bp, aeu_addr);
1881
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07001883 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001884 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07001885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001886
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07001887 REG_WR(bp, aeu_addr, aeu_mask);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001889
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07001890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001891 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07001892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001893
1894 if (asserted & ATTN_HARD_WIRED_MASK) {
1895 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001896
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08001897 bnx2x_acquire_phy_lock(bp);
1898
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001899 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00001900 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001901 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001902
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001903 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001904
1905 /* handle unicore attn? */
1906 }
1907 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1909
1910 if (asserted & GPIO_2_FUNC)
1911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1912
1913 if (asserted & GPIO_3_FUNC)
1914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1915
1916 if (asserted & GPIO_4_FUNC)
1917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1918
1919 if (port == 0) {
1920 if (asserted & ATTN_GENERAL_ATTN_1) {
1921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1923 }
1924 if (asserted & ATTN_GENERAL_ATTN_2) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1927 }
1928 if (asserted & ATTN_GENERAL_ATTN_3) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1931 }
1932 } else {
1933 if (asserted & ATTN_GENERAL_ATTN_4) {
1934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1936 }
1937 if (asserted & ATTN_GENERAL_ATTN_5) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1940 }
1941 if (asserted & ATTN_GENERAL_ATTN_6) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1944 }
1945 }
1946
1947 } /* if hardwired */
1948
Eilon Greenstein5c862842008-08-13 15:51:48 -07001949 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1950 asserted, hc_addr);
1951 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001952
1953 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08001954 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00001955 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08001956 bnx2x_release_phy_lock(bp);
1957 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001958}
1959
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00001960static inline void bnx2x_fan_failure(struct bnx2x *bp)
1961{
1962 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00001963 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00001964 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00001965 ext_phy_config =
1966 SHMEM_RD(bp,
1967 dev_info.port_hw_config[port].external_phy_config);
1968
1969 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1970 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00001971 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00001972 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00001973
1974 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001975 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1976 " the driver to shutdown the card to prevent permanent"
1977 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00001978}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001979
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001980static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1981{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001982 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001983 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00001984 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001985
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001986 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1987 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001988
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001989 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08001990
1991 val = REG_RD(bp, reg_offset);
1992 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1993 REG_WR(bp, reg_offset, val);
1994
1995 BNX2X_ERR("SPIO5 hw attention\n");
1996
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00001997 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00001998 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00001999 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002000 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002001
Eilon Greenstein589abe32009-02-12 08:36:55 +00002002 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2003 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2004 bnx2x_acquire_phy_lock(bp);
2005 bnx2x_handle_module_detect_int(&bp->link_params);
2006 bnx2x_release_phy_lock(bp);
2007 }
2008
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002009 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2010
2011 val = REG_RD(bp, reg_offset);
2012 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2013 REG_WR(bp, reg_offset, val);
2014
2015 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002016 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002017 bnx2x_panic();
2018 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002019}
2020
2021static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2022{
2023 u32 val;
2024
Eilon Greenstein0626b892009-02-12 08:38:14 +00002025 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002026
2027 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2028 BNX2X_ERR("DB hw attention 0x%x\n", val);
2029 /* DORQ discard attention */
2030 if (val & 0x2)
2031 BNX2X_ERR("FATAL error from DORQ\n");
2032 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002033
2034 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2035
2036 int port = BP_PORT(bp);
2037 int reg_offset;
2038
2039 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2040 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2041
2042 val = REG_RD(bp, reg_offset);
2043 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2044 REG_WR(bp, reg_offset, val);
2045
2046 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002047 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002048 bnx2x_panic();
2049 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002050}
2051
2052static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2053{
2054 u32 val;
2055
2056 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2057
2058 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2059 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2060 /* CFC error attention */
2061 if (val & 0x2)
2062 BNX2X_ERR("FATAL error from CFC\n");
2063 }
2064
2065 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2066
2067 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2068 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2069 /* RQ_USDMDP_FIFO_OVERFLOW */
2070 if (val & 0x18000)
2071 BNX2X_ERR("FATAL error from PXP\n");
2072 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002073
2074 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2075
2076 int port = BP_PORT(bp);
2077 int reg_offset;
2078
2079 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2080 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2081
2082 val = REG_RD(bp, reg_offset);
2083 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2084 REG_WR(bp, reg_offset, val);
2085
2086 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002087 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002088 bnx2x_panic();
2089 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002090}
2091
2092static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2093{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002094 u32 val;
2095
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002096 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2097
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002098 if (attn & BNX2X_PMF_LINK_ASSERT) {
2099 int func = BP_FUNC(bp);
2100
2101 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002102 bp->mf_config = SHMEM_RD(bp,
2103 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002104 val = SHMEM_RD(bp, func_mb[func].drv_status);
2105 if (val & DRV_STATUS_DCC_EVENT_MASK)
2106 bnx2x_dcc_event(bp,
2107 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002108 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002109 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002110 bnx2x_pmf_update(bp);
2111
2112 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002113
2114 BNX2X_ERR("MC assert!\n");
2115 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2116 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2117 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2119 bnx2x_panic();
2120
2121 } else if (attn & BNX2X_MCP_ASSERT) {
2122
2123 BNX2X_ERR("MCP assert!\n");
2124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002125 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002126
2127 } else
2128 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2129 }
2130
2131 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002132 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2133 if (attn & BNX2X_GRC_TIMEOUT) {
2134 val = CHIP_IS_E1H(bp) ?
2135 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2136 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2137 }
2138 if (attn & BNX2X_GRC_RSV) {
2139 val = CHIP_IS_E1H(bp) ?
2140 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2141 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2142 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002143 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002144 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002145}
2146
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002147#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2148#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2149#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2150#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2151#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2152#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2153/*
2154 * should be run under rtnl lock
2155 */
2156static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2157{
2158 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2159 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2160 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2161 barrier();
2162 mmiowb();
2163}
2164
2165/*
2166 * should be run under rtnl lock
2167 */
2168static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2169{
2170 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2171 val |= (1 << 16);
2172 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2173 barrier();
2174 mmiowb();
2175}
2176
2177/*
2178 * should be run under rtnl lock
2179 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002180bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002181{
2182 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2183 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2184 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2185}
2186
2187/*
2188 * should be run under rtnl lock
2189 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002190inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002191{
2192 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2193
2194 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2195
2196 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2197 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2198 barrier();
2199 mmiowb();
2200}
2201
2202/*
2203 * should be run under rtnl lock
2204 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002205u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002206{
2207 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2208
2209 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2210
2211 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2212 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2213 barrier();
2214 mmiowb();
2215
2216 return val1;
2217}
2218
2219/*
2220 * should be run under rtnl lock
2221 */
2222static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2223{
2224 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2225}
2226
2227static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2228{
2229 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2230 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2231}
2232
2233static inline void _print_next_block(int idx, const char *blk)
2234{
2235 if (idx)
2236 pr_cont(", ");
2237 pr_cont("%s", blk);
2238}
2239
2240static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2241{
2242 int i = 0;
2243 u32 cur_bit = 0;
2244 for (i = 0; sig; i++) {
2245 cur_bit = ((u32)0x1 << i);
2246 if (sig & cur_bit) {
2247 switch (cur_bit) {
2248 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2249 _print_next_block(par_num++, "BRB");
2250 break;
2251 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2252 _print_next_block(par_num++, "PARSER");
2253 break;
2254 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2255 _print_next_block(par_num++, "TSDM");
2256 break;
2257 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2258 _print_next_block(par_num++, "SEARCHER");
2259 break;
2260 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2261 _print_next_block(par_num++, "TSEMI");
2262 break;
2263 }
2264
2265 /* Clear the bit */
2266 sig &= ~cur_bit;
2267 }
2268 }
2269
2270 return par_num;
2271}
2272
2273static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2274{
2275 int i = 0;
2276 u32 cur_bit = 0;
2277 for (i = 0; sig; i++) {
2278 cur_bit = ((u32)0x1 << i);
2279 if (sig & cur_bit) {
2280 switch (cur_bit) {
2281 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2282 _print_next_block(par_num++, "PBCLIENT");
2283 break;
2284 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2285 _print_next_block(par_num++, "QM");
2286 break;
2287 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2288 _print_next_block(par_num++, "XSDM");
2289 break;
2290 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2291 _print_next_block(par_num++, "XSEMI");
2292 break;
2293 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2294 _print_next_block(par_num++, "DOORBELLQ");
2295 break;
2296 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2297 _print_next_block(par_num++, "VAUX PCI CORE");
2298 break;
2299 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2300 _print_next_block(par_num++, "DEBUG");
2301 break;
2302 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2303 _print_next_block(par_num++, "USDM");
2304 break;
2305 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2306 _print_next_block(par_num++, "USEMI");
2307 break;
2308 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2309 _print_next_block(par_num++, "UPB");
2310 break;
2311 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2312 _print_next_block(par_num++, "CSDM");
2313 break;
2314 }
2315
2316 /* Clear the bit */
2317 sig &= ~cur_bit;
2318 }
2319 }
2320
2321 return par_num;
2322}
2323
2324static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2325{
2326 int i = 0;
2327 u32 cur_bit = 0;
2328 for (i = 0; sig; i++) {
2329 cur_bit = ((u32)0x1 << i);
2330 if (sig & cur_bit) {
2331 switch (cur_bit) {
2332 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2333 _print_next_block(par_num++, "CSEMI");
2334 break;
2335 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2336 _print_next_block(par_num++, "PXP");
2337 break;
2338 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2339 _print_next_block(par_num++,
2340 "PXPPCICLOCKCLIENT");
2341 break;
2342 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2343 _print_next_block(par_num++, "CFC");
2344 break;
2345 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2346 _print_next_block(par_num++, "CDU");
2347 break;
2348 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2349 _print_next_block(par_num++, "IGU");
2350 break;
2351 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2352 _print_next_block(par_num++, "MISC");
2353 break;
2354 }
2355
2356 /* Clear the bit */
2357 sig &= ~cur_bit;
2358 }
2359 }
2360
2361 return par_num;
2362}
2363
2364static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2365{
2366 int i = 0;
2367 u32 cur_bit = 0;
2368 for (i = 0; sig; i++) {
2369 cur_bit = ((u32)0x1 << i);
2370 if (sig & cur_bit) {
2371 switch (cur_bit) {
2372 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2373 _print_next_block(par_num++, "MCP ROM");
2374 break;
2375 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2376 _print_next_block(par_num++, "MCP UMP RX");
2377 break;
2378 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2379 _print_next_block(par_num++, "MCP UMP TX");
2380 break;
2381 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2382 _print_next_block(par_num++, "MCP SCPAD");
2383 break;
2384 }
2385
2386 /* Clear the bit */
2387 sig &= ~cur_bit;
2388 }
2389 }
2390
2391 return par_num;
2392}
2393
2394static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2395 u32 sig2, u32 sig3)
2396{
2397 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2398 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2399 int par_num = 0;
2400 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2401 "[0]:0x%08x [1]:0x%08x "
2402 "[2]:0x%08x [3]:0x%08x\n",
2403 sig0 & HW_PRTY_ASSERT_SET_0,
2404 sig1 & HW_PRTY_ASSERT_SET_1,
2405 sig2 & HW_PRTY_ASSERT_SET_2,
2406 sig3 & HW_PRTY_ASSERT_SET_3);
2407 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2408 bp->dev->name);
2409 par_num = bnx2x_print_blocks_with_parity0(
2410 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2411 par_num = bnx2x_print_blocks_with_parity1(
2412 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2413 par_num = bnx2x_print_blocks_with_parity2(
2414 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2415 par_num = bnx2x_print_blocks_with_parity3(
2416 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2417 printk("\n");
2418 return true;
2419 } else
2420 return false;
2421}
2422
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002423bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002424{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002425 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002426 int port = BP_PORT(bp);
2427
2428 attn.sig[0] = REG_RD(bp,
2429 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2430 port*4);
2431 attn.sig[1] = REG_RD(bp,
2432 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2433 port*4);
2434 attn.sig[2] = REG_RD(bp,
2435 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2436 port*4);
2437 attn.sig[3] = REG_RD(bp,
2438 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2439 port*4);
2440
2441 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2442 attn.sig[3]);
2443}
2444
2445static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2446{
2447 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002448 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002449 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002450 u32 reg_addr;
2451 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002452 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002453
2454 /* need to take HW lock because MCP or other port might also
2455 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002456 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002457
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002458 if (bnx2x_chk_parity_attn(bp)) {
2459 bp->recovery_state = BNX2X_RECOVERY_INIT;
2460 bnx2x_set_reset_in_progress(bp);
2461 schedule_delayed_work(&bp->reset_task, 0);
2462 /* Disable HW interrupts */
2463 bnx2x_int_disable(bp);
2464 bnx2x_release_alr(bp);
2465 /* In case of parity errors don't handle attentions so that
2466 * other function would "see" parity errors.
2467 */
2468 return;
2469 }
2470
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002471 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2472 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2473 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2474 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002475 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2476 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002477
2478 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2479 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002480 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002481
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002482 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002483 index, group_mask->sig[0], group_mask->sig[1],
2484 group_mask->sig[2], group_mask->sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002485
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002486 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002487 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002488 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002489 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002490 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002491 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002492 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002493 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002494 }
2495 }
2496
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002497 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002498
Eilon Greenstein5c862842008-08-13 15:51:48 -07002499 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002500
2501 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002502 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2503 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002504 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002506 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002507 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002508
2509 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2510 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2511
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2513 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002514
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002515 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2516 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002517 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2519
2520 REG_WR(bp, reg_addr, aeu_mask);
2521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002522
2523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2524 bp->attn_state &= ~deasserted;
2525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2526}
2527
2528static void bnx2x_attn_int(struct bnx2x *bp)
2529{
2530 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08002531 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2532 attn_bits);
2533 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2534 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002535 u32 attn_state = bp->attn_state;
2536
2537 /* look for changed bits */
2538 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2539 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2540
2541 DP(NETIF_MSG_HW,
2542 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2543 attn_bits, attn_ack, asserted, deasserted);
2544
2545 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002546 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002547
2548 /* handle bits that were raised */
2549 if (asserted)
2550 bnx2x_attn_int_asserted(bp, asserted);
2551
2552 if (deasserted)
2553 bnx2x_attn_int_deasserted(bp, deasserted);
2554}
2555
2556static void bnx2x_sp_task(struct work_struct *work)
2557{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002558 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002559 u16 status;
2560
2561 /* Return here if interrupt is disabled */
2562 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002563 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002564 return;
2565 }
2566
2567 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002568/* if (status == 0) */
2569/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002570
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002571 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002572
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002573 /* HW attentions */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002574 if (status & 0x1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002575 bnx2x_attn_int(bp);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002576 status &= ~0x1;
2577 }
2578
2579 /* CStorm events: STAT_QUERY */
2580 if (status & 0x2) {
2581 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2582 status &= ~0x2;
2583 }
2584
2585 if (unlikely(status))
2586 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2587 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002588
Eilon Greenstein68d59482009-01-14 21:27:36 -08002589 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002590 IGU_INT_NOP, 1);
2591 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2592 IGU_INT_NOP, 1);
2593 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2594 IGU_INT_NOP, 1);
2595 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2596 IGU_INT_NOP, 1);
2597 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2598 IGU_INT_ENABLE, 1);
2599}
2600
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002601irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002602{
2603 struct net_device *dev = dev_instance;
2604 struct bnx2x *bp = netdev_priv(dev);
2605
2606 /* Return here if interrupt is disabled */
2607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002609 return IRQ_HANDLED;
2610 }
2611
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08002612 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002613
2614#ifdef BNX2X_STOP_ON_ERROR
2615 if (unlikely(bp->panic))
2616 return IRQ_HANDLED;
2617#endif
2618
Michael Chan993ac7b2009-10-10 13:46:56 +00002619#ifdef BCM_CNIC
2620 {
2621 struct cnic_ops *c_ops;
2622
2623 rcu_read_lock();
2624 c_ops = rcu_dereference(bp->cnic_ops);
2625 if (c_ops)
2626 c_ops->cnic_handler(bp->cnic_data, NULL);
2627 rcu_read_unlock();
2628 }
2629#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002630 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002631
2632 return IRQ_HANDLED;
2633}
2634
2635/* end of slow path */
2636
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002637static void bnx2x_timer(unsigned long data)
2638{
2639 struct bnx2x *bp = (struct bnx2x *) data;
2640
2641 if (!netif_running(bp->dev))
2642 return;
2643
2644 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08002645 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002646
2647 if (poll) {
2648 struct bnx2x_fastpath *fp = &bp->fp[0];
2649 int rc;
2650
Eilon Greenstein7961f792009-03-02 07:59:31 +00002651 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002652 rc = bnx2x_rx_int(fp, 1000);
2653 }
2654
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002655 if (!BP_NOMCP(bp)) {
2656 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002657 u32 drv_pulse;
2658 u32 mcp_pulse;
2659
2660 ++bp->fw_drv_pulse_wr_seq;
2661 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2662 /* TBD - add SYSTEM_TIME */
2663 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002664 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002665
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002666 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002667 MCP_PULSE_SEQ_MASK);
2668 /* The delta between driver pulse and mcp response
2669 * should be 1 (before mcp response) or 0 (after mcp response)
2670 */
2671 if ((drv_pulse != mcp_pulse) &&
2672 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2673 /* someone lost a heartbeat... */
2674 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2675 drv_pulse, mcp_pulse);
2676 }
2677 }
2678
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002679 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002680 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002681
Eliezer Tamirf1410642008-02-28 11:51:50 -08002682timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002683 mod_timer(&bp->timer, jiffies + bp->current_interval);
2684}
2685
2686/* end of Statistics */
2687
2688/* nic init */
2689
2690/*
2691 * nic init service functions
2692 */
2693
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002694static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002695{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002696 int port = BP_PORT(bp);
2697
Eilon Greensteinca003922009-08-12 22:53:28 -07002698 /* "CSTORM" */
2699 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2700 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2701 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2702 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2703 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2704 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002705}
2706
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002707void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
Eilon Greenstein5c862842008-08-13 15:51:48 -07002708 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002709{
2710 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002711 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002712 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002713 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002714
2715 /* USTORM */
2716 section = ((u64)mapping) + offsetof(struct host_status_block,
2717 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002718 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002719
Eilon Greensteinca003922009-08-12 22:53:28 -07002720 REG_WR(bp, BAR_CSTRORM_INTMEM +
2721 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2722 REG_WR(bp, BAR_CSTRORM_INTMEM +
2723 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002724 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07002725 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2726 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002727
2728 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07002729 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2730 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731
2732 /* CSTORM */
2733 section = ((u64)mapping) + offsetof(struct host_status_block,
2734 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002735 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002736
2737 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002738 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002739 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002740 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002741 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07002742 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07002743 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002744
2745 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2746 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002747 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002748
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002749 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2750}
2751
2752static void bnx2x_zero_def_sb(struct bnx2x *bp)
2753{
2754 int func = BP_FUNC(bp);
2755
Eilon Greensteinca003922009-08-12 22:53:28 -07002756 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002757 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2758 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07002759 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2760 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2761 sizeof(struct cstorm_def_status_block_u)/4);
2762 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2764 sizeof(struct cstorm_def_status_block_c)/4);
2765 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00002766 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2767 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002768}
2769
2770static void bnx2x_init_def_sb(struct bnx2x *bp,
2771 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002772 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002773{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002774 int port = BP_PORT(bp);
2775 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002776 int index, val, reg_offset;
2777 u64 section;
2778
2779 /* ATTN */
2780 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2781 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002782 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002783
Eliezer Tamir49d66772008-02-28 11:53:13 -08002784 bp->attn_state = 0;
2785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002786 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2787 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2788
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002789 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002790 bp->attn_group[index].sig[0] = REG_RD(bp,
2791 reg_offset + 0x10*index);
2792 bp->attn_group[index].sig[1] = REG_RD(bp,
2793 reg_offset + 0x4 + 0x10*index);
2794 bp->attn_group[index].sig[2] = REG_RD(bp,
2795 reg_offset + 0x8 + 0x10*index);
2796 bp->attn_group[index].sig[3] = REG_RD(bp,
2797 reg_offset + 0xc + 0x10*index);
2798 }
2799
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002800 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2801 HC_REG_ATTN_MSG0_ADDR_L);
2802
2803 REG_WR(bp, reg_offset, U64_LO(section));
2804 REG_WR(bp, reg_offset + 4, U64_HI(section));
2805
2806 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2807
2808 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002809 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002810 REG_WR(bp, reg_offset, val);
2811
2812 /* USTORM */
2813 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2814 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002815 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002816
Eilon Greensteinca003922009-08-12 22:53:28 -07002817 REG_WR(bp, BAR_CSTRORM_INTMEM +
2818 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2819 REG_WR(bp, BAR_CSTRORM_INTMEM +
2820 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07002822 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2823 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002824
2825 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07002826 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2827 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002828
2829 /* CSTORM */
2830 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2831 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002832 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002833
2834 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002835 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002836 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002837 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002838 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07002839 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07002840 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002841
2842 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2843 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002844 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845
2846 /* TSTORM */
2847 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2848 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002849 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002850
2851 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002852 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002853 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002854 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002855 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07002856 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002857 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002858
2859 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2860 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002861 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002862
2863 /* XSTORM */
2864 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2865 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002866 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002867
2868 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002869 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002870 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002871 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002872 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07002873 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002874 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002875
2876 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2877 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002878 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002879
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002880 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07002881 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002882
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002883 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002884}
2885
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002886void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002887{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002888 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002889 int i;
2890
2891 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002892 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002893
2894 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07002895 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2896 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2897 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00002898 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07002899 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2900 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2901 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00002902 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002903
2904 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2905 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002906 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2907 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00002908 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002909 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002910 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2911 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00002912 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002913 }
2914}
2915
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002916static void bnx2x_init_sp_ring(struct bnx2x *bp)
2917{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002918 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002919
2920 spin_lock_init(&bp->spq_lock);
2921
2922 bp->spq_left = MAX_SPQ_PENDING;
2923 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002924 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2925 bp->spq_prod_bd = bp->spq;
2926 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2927
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002928 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002929 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002930 REG_WR(bp,
2931 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002932 U64_HI(bp->spq_mapping));
2933
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002934 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002935 bp->spq_prod_idx);
2936}
2937
2938static void bnx2x_init_context(struct bnx2x *bp)
2939{
2940 int i;
2941
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00002942 /* Rx */
2943 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002944 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2945 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00002946 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002947
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002948 context->ustorm_st_context.common.sb_index_numbers =
2949 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00002950 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07002951 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002952 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00002953 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2954 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2955 context->ustorm_st_context.common.statistics_counter_id =
2956 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08002957 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00002958 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002959 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07002960 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002961 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002962 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002963 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002964 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07002965 if (!fp->disable_tpa) {
2966 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07002967 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07002968 context->ustorm_st_context.common.sge_buff_size =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002969 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2970 0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07002971 context->ustorm_st_context.common.sge_page_base_hi =
2972 U64_HI(fp->rx_sge_mapping);
2973 context->ustorm_st_context.common.sge_page_base_lo =
2974 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07002975
2976 context->ustorm_st_context.common.max_sges_for_packet =
2977 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2978 context->ustorm_st_context.common.max_sges_for_packet =
2979 ((context->ustorm_st_context.common.
2980 max_sges_for_packet + PAGES_PER_SGE - 1) &
2981 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07002982 }
2983
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08002984 context->ustorm_ag_context.cdu_usage =
2985 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2986 CDU_REGION_NUMBER_UCM_AG,
2987 ETH_CONNECTION_TYPE);
2988
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002989 context->xstorm_ag_context.cdu_reserved =
2990 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2991 CDU_REGION_NUMBER_XCM_AG,
2992 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002993 }
Eilon Greensteinca003922009-08-12 22:53:28 -07002994
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00002995 /* Tx */
2996 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07002997 struct bnx2x_fastpath *fp = &bp->fp[i];
2998 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00002999 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07003000
3001 context->cstorm_st_context.sb_index_number =
3002 C_SB_ETH_TX_CQ_INDEX;
3003 context->cstorm_st_context.status_block_id = fp->sb_id;
3004
3005 context->xstorm_st_context.tx_bd_page_base_hi =
3006 U64_HI(fp->tx_desc_mapping);
3007 context->xstorm_st_context.tx_bd_page_base_lo =
3008 U64_LO(fp->tx_desc_mapping);
3009 context->xstorm_st_context.statistics_data = (fp->cl_id |
3010 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3011 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003012}
3013
3014static void bnx2x_init_ind_table(struct bnx2x *bp)
3015{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08003016 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003017 int i;
3018
Eilon Greenstein555f6c72009-02-12 08:36:11 +00003019 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003020 return;
3021
Eilon Greenstein555f6c72009-02-12 08:36:11 +00003022 DP(NETIF_MSG_IFUP,
3023 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003024 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003025 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08003026 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003027 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003028}
3029
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003030void bnx2x_set_client_config(struct bnx2x *bp)
Eliezer Tamir49d66772008-02-28 11:53:13 -08003031{
Eliezer Tamir49d66772008-02-28 11:53:13 -08003032 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003033 int port = BP_PORT(bp);
3034 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08003035
Eilon Greensteine7799c52009-01-14 21:30:27 -08003036 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08003037 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003038 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3039 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08003040#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08003041 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08003042 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003043 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08003044 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3045 }
3046#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08003047
3048 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00003049 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3050
Eliezer Tamir49d66772008-02-28 11:53:13 -08003051 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003052 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08003053 ((u32 *)&tstorm_client)[0]);
3054 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003055 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08003056 ((u32 *)&tstorm_client)[1]);
3057 }
3058
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003059 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3060 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08003061}
3062
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003063void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003064{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003065 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003066 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00003067 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003068 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00003069 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003070 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00003071 /* All but management unicast packets should pass to the host as well */
3072 u32 llh_mask =
3073 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3074 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3075 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3076 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003077
Eilon Greenstein3196a882008-08-13 15:58:49 -07003078 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003079
3080 switch (mode) {
3081 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003082 tstorm_mac_filter.ucast_drop_all = mask;
3083 tstorm_mac_filter.mcast_drop_all = mask;
3084 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003085 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003086
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003087 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003088 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003089 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003090
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003091 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003092 tstorm_mac_filter.mcast_accept_all = mask;
3093 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003094 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003095
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003096 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003097 tstorm_mac_filter.ucast_accept_all = mask;
3098 tstorm_mac_filter.mcast_accept_all = mask;
3099 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00003100 /* pass management unicast packets as well */
3101 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003102 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003103
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003104 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003105 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3106 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003107 }
3108
Eilon Greenstein581ce432009-07-29 00:20:04 +00003109 REG_WR(bp,
3110 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3111 llh_mask);
3112
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003113 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3114 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003115 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003116 ((u32 *)&tstorm_mac_filter)[i]);
3117
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003118/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003119 ((u32 *)&tstorm_mac_filter)[i]); */
3120 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003121
Eliezer Tamir49d66772008-02-28 11:53:13 -08003122 if (mode != BNX2X_RX_MODE_NONE)
3123 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003124}
3125
Eilon Greenstein471de712008-08-13 15:49:35 -07003126static void bnx2x_init_internal_common(struct bnx2x *bp)
3127{
3128 int i;
3129
3130 /* Zero this manually as its initialization is
3131 currently missing in the initTool */
3132 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3133 REG_WR(bp, BAR_USTRORM_INTMEM +
3134 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3135}
3136
3137static void bnx2x_init_internal_port(struct bnx2x *bp)
3138{
3139 int port = BP_PORT(bp);
3140
Eilon Greensteinca003922009-08-12 22:53:28 -07003141 REG_WR(bp,
3142 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3143 REG_WR(bp,
3144 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07003145 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3146 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3147}
3148
3149static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003150{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003151 struct tstorm_eth_function_common_config tstorm_config = {0};
3152 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003153 int port = BP_PORT(bp);
3154 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003155 int i, j;
3156 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07003157 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003158
Tom Herbertc68ed252010-04-23 00:10:52 -07003159 tstorm_config.config_flags = RSS_FLAGS(bp);
3160
3161 if (is_multi(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003162 tstorm_config.rss_result_mask = MULTI_MASK;
Eilon Greensteinca003922009-08-12 22:53:28 -07003163
3164 /* Enable TPA if needed */
3165 if (bp->flags & TPA_ENABLE_FLAG)
3166 tstorm_config.config_flags |=
3167 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3168
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003169 if (IS_E1HMF(bp))
3170 tstorm_config.config_flags |=
3171 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003172
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003173 tstorm_config.leading_client_id = BP_L_ID(bp);
3174
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003175 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003176 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003177 (*(u32 *)&tstorm_config));
3178
Eliezer Tamirc14423f2008-02-28 11:49:42 -08003179 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00003180 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003181 bnx2x_set_storm_rx_mode(bp);
3182
Eilon Greensteinde832a52009-02-12 08:36:33 +00003183 for_each_queue(bp, i) {
3184 u8 cl_id = bp->fp[i].cl_id;
3185
3186 /* reset xstorm per client statistics */
3187 offset = BAR_XSTRORM_INTMEM +
3188 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3189 for (j = 0;
3190 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3191 REG_WR(bp, offset + j*4, 0);
3192
3193 /* reset tstorm per client statistics */
3194 offset = BAR_TSTRORM_INTMEM +
3195 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3196 for (j = 0;
3197 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3198 REG_WR(bp, offset + j*4, 0);
3199
3200 /* reset ustorm per client statistics */
3201 offset = BAR_USTRORM_INTMEM +
3202 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3203 for (j = 0;
3204 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3205 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003206 }
3207
3208 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003209 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003210
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003211 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003212 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003213 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003214 ((u32 *)&stats_flags)[1]);
3215
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003216 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003217 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003218 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219 ((u32 *)&stats_flags)[1]);
3220
Eilon Greensteinde832a52009-02-12 08:36:33 +00003221 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3222 ((u32 *)&stats_flags)[0]);
3223 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3224 ((u32 *)&stats_flags)[1]);
3225
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003226 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003227 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003228 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003229 ((u32 *)&stats_flags)[1]);
3230
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003231 REG_WR(bp, BAR_XSTRORM_INTMEM +
3232 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3233 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3234 REG_WR(bp, BAR_XSTRORM_INTMEM +
3235 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3236 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3237
3238 REG_WR(bp, BAR_TSTRORM_INTMEM +
3239 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3240 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3241 REG_WR(bp, BAR_TSTRORM_INTMEM +
3242 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3243 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003244
Eilon Greensteinde832a52009-02-12 08:36:33 +00003245 REG_WR(bp, BAR_USTRORM_INTMEM +
3246 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3247 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3248 REG_WR(bp, BAR_USTRORM_INTMEM +
3249 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3250 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3251
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003252 if (CHIP_IS_E1H(bp)) {
3253 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3254 IS_E1HMF(bp));
3255 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3256 IS_E1HMF(bp));
3257 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3258 IS_E1HMF(bp));
3259 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3260 IS_E1HMF(bp));
3261
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003262 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3263 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003264 }
3265
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08003266 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003267 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3268 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003269 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003270 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003271
3272 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00003273 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003274 U64_LO(fp->rx_comp_mapping));
3275 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00003276 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003277 U64_HI(fp->rx_comp_mapping));
3278
Eilon Greensteinca003922009-08-12 22:53:28 -07003279 /* Next page */
3280 REG_WR(bp, BAR_USTRORM_INTMEM +
3281 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3282 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3283 REG_WR(bp, BAR_USTRORM_INTMEM +
3284 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3285 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3286
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003287 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00003288 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003289 max_agg_size);
3290 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00003291
Eilon Greenstein1c063282009-02-12 08:36:43 +00003292 /* dropless flow control */
3293 if (CHIP_IS_E1H(bp)) {
3294 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3295
3296 rx_pause.bd_thr_low = 250;
3297 rx_pause.cqe_thr_low = 250;
3298 rx_pause.cos = 1;
3299 rx_pause.sge_thr_low = 0;
3300 rx_pause.bd_thr_high = 350;
3301 rx_pause.cqe_thr_high = 350;
3302 rx_pause.sge_thr_high = 0;
3303
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003304 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00003305 struct bnx2x_fastpath *fp = &bp->fp[i];
3306
3307 if (!fp->disable_tpa) {
3308 rx_pause.sge_thr_low = 150;
3309 rx_pause.sge_thr_high = 250;
3310 }
3311
3312
3313 offset = BAR_USTRORM_INTMEM +
3314 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3315 fp->cl_id);
3316 for (j = 0;
3317 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3318 j++)
3319 REG_WR(bp, offset + j*4,
3320 ((u32 *)&rx_pause)[j]);
3321 }
3322 }
3323
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00003324 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3325
3326 /* Init rate shaping and fairness contexts */
3327 if (IS_E1HMF(bp)) {
3328 int vn;
3329
3330 /* During init there is no active link
3331 Until link is up, set link rate to 10Gbps */
3332 bp->link_vars.line_speed = SPEED_10000;
3333 bnx2x_init_port_minmax(bp);
3334
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003335 if (!BP_NOMCP(bp))
3336 bp->mf_config =
3337 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00003338 bnx2x_calc_vn_weight_sum(bp);
3339
3340 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3341 bnx2x_init_vn_minmax(bp, 2*vn + port);
3342
3343 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003344 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00003345 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003346
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00003347 } else {
3348 /* rate shaping and fairness are disabled */
3349 DP(NETIF_MSG_IFUP,
3350 "single function mode minmax will be disabled\n");
3351 }
3352
3353
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003354 /* Store cmng structures to internal memory */
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00003355 if (bp->port.pmf)
3356 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3357 REG_WR(bp, BAR_XSTRORM_INTMEM +
3358 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3359 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003360}
3361
Eilon Greenstein471de712008-08-13 15:49:35 -07003362static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3363{
3364 switch (load_code) {
3365 case FW_MSG_CODE_DRV_LOAD_COMMON:
3366 bnx2x_init_internal_common(bp);
3367 /* no break */
3368
3369 case FW_MSG_CODE_DRV_LOAD_PORT:
3370 bnx2x_init_internal_port(bp);
3371 /* no break */
3372
3373 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3374 bnx2x_init_internal_func(bp);
3375 break;
3376
3377 default:
3378 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3379 break;
3380 }
3381}
3382
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003383void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003384{
3385 int i;
3386
3387 for_each_queue(bp, i) {
3388 struct bnx2x_fastpath *fp = &bp->fp[i];
3389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003390 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003391 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003392 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003393 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00003394#ifdef BCM_CNIC
3395 fp->sb_id = fp->cl_id + 1;
3396#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003397 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00003398#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003399 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00003400 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3401 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003402 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00003403 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003404 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003405 }
3406
Eilon Greenstein16119782009-03-02 07:59:27 +00003407 /* ensure status block indices were read */
3408 rmb();
3409
3410
Eilon Greenstein5c862842008-08-13 15:51:48 -07003411 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3412 DEF_SB_ID);
3413 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003414 bnx2x_update_coalesce(bp);
3415 bnx2x_init_rx_rings(bp);
3416 bnx2x_init_tx_ring(bp);
3417 bnx2x_init_sp_ring(bp);
3418 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07003419 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003420 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08003421 bnx2x_stats_init(bp);
3422
3423 /* At this point, we are ready for interrupts */
3424 atomic_set(&bp->intr_sem, 0);
3425
3426 /* flush all before enabling interrupts */
3427 mb();
3428 mmiowb();
3429
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08003430 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00003431
3432 /* Check for SPIO5 */
3433 bnx2x_attn_int_deasserted0(bp,
3434 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3435 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003436}
3437
3438/* end of nic init */
3439
3440/*
3441 * gzip service functions
3442 */
3443
3444static int bnx2x_gunzip_init(struct bnx2x *bp)
3445{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00003446 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3447 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003448 if (bp->gunzip_buf == NULL)
3449 goto gunzip_nomem1;
3450
3451 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3452 if (bp->strm == NULL)
3453 goto gunzip_nomem2;
3454
3455 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3456 GFP_KERNEL);
3457 if (bp->strm->workspace == NULL)
3458 goto gunzip_nomem3;
3459
3460 return 0;
3461
3462gunzip_nomem3:
3463 kfree(bp->strm);
3464 bp->strm = NULL;
3465
3466gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00003467 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3468 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003469 bp->gunzip_buf = NULL;
3470
3471gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003472 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3473 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003474 return -ENOMEM;
3475}
3476
3477static void bnx2x_gunzip_end(struct bnx2x *bp)
3478{
3479 kfree(bp->strm->workspace);
3480
3481 kfree(bp->strm);
3482 bp->strm = NULL;
3483
3484 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00003485 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3486 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003487 bp->gunzip_buf = NULL;
3488 }
3489}
3490
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003491static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003492{
3493 int n, rc;
3494
3495 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003496 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3497 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003498 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003499 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003500
3501 n = 10;
3502
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003503#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003504
3505 if (zbuf[3] & FNAME)
3506 while ((zbuf[n++] != 0) && (n < len));
3507
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003508 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003509 bp->strm->avail_in = len - n;
3510 bp->strm->next_out = bp->gunzip_buf;
3511 bp->strm->avail_out = FW_BUF_SIZE;
3512
3513 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3514 if (rc != Z_OK)
3515 return rc;
3516
3517 rc = zlib_inflate(bp->strm, Z_FINISH);
3518 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00003519 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3520 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003521
3522 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3523 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003524 netdev_err(bp->dev, "Firmware decompression error:"
3525 " gunzip_outlen (%d) not aligned\n",
3526 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003527 bp->gunzip_outlen >>= 2;
3528
3529 zlib_inflateEnd(bp->strm);
3530
3531 if (rc == Z_STREAM_END)
3532 return 0;
3533
3534 return rc;
3535}
3536
3537/* nic load/unload */
3538
3539/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003540 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003541 */
3542
3543/* send a NIG loopback debug packet */
3544static void bnx2x_lb_pckt(struct bnx2x *bp)
3545{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003546 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003547
3548 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003549 wb_write[0] = 0x55555555;
3550 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003551 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003552 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003553
3554 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003555 wb_write[0] = 0x09000000;
3556 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003557 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003558 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003559}
3560
3561/* some of the internal memories
3562 * are not directly readable from the driver
3563 * to test them we send debug packets
3564 */
3565static int bnx2x_int_mem_test(struct bnx2x *bp)
3566{
3567 int factor;
3568 int count, i;
3569 u32 val = 0;
3570
Eilon Greensteinad8d3942008-06-23 20:29:02 -07003571 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003572 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07003573 else if (CHIP_REV_IS_EMUL(bp))
3574 factor = 200;
3575 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003576 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003577
3578 DP(NETIF_MSG_HW, "start part1\n");
3579
3580 /* Disable inputs of parser neighbor blocks */
3581 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3582 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3583 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07003584 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003585
3586 /* Write 0 to parser credits for CFC search request */
3587 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3588
3589 /* send Ethernet packet */
3590 bnx2x_lb_pckt(bp);
3591
3592 /* TODO do i reset NIG statistic? */
3593 /* Wait until NIG register shows 1 packet of size 0x10 */
3594 count = 1000 * factor;
3595 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003596
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003597 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3598 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003599 if (val == 0x10)
3600 break;
3601
3602 msleep(10);
3603 count--;
3604 }
3605 if (val != 0x10) {
3606 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3607 return -1;
3608 }
3609
3610 /* Wait until PRS register shows 1 packet */
3611 count = 1000 * factor;
3612 while (count) {
3613 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003614 if (val == 1)
3615 break;
3616
3617 msleep(10);
3618 count--;
3619 }
3620 if (val != 0x1) {
3621 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3622 return -2;
3623 }
3624
3625 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003626 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003627 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003628 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003629 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003630 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3631 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003632
3633 DP(NETIF_MSG_HW, "part2\n");
3634
3635 /* Disable inputs of parser neighbor blocks */
3636 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3637 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3638 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07003639 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003640
3641 /* Write 0 to parser credits for CFC search request */
3642 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3643
3644 /* send 10 Ethernet packets */
3645 for (i = 0; i < 10; i++)
3646 bnx2x_lb_pckt(bp);
3647
3648 /* Wait until NIG register shows 10 + 1
3649 packets of size 11*0x10 = 0xb0 */
3650 count = 1000 * factor;
3651 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003652
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003653 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3654 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003655 if (val == 0xb0)
3656 break;
3657
3658 msleep(10);
3659 count--;
3660 }
3661 if (val != 0xb0) {
3662 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3663 return -3;
3664 }
3665
3666 /* Wait until PRS register shows 2 packets */
3667 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3668 if (val != 2)
3669 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3670
3671 /* Write 1 to parser credits for CFC search request */
3672 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3673
3674 /* Wait until PRS register shows 3 packets */
3675 msleep(10 * factor);
3676 /* Wait until NIG register shows 1 packet of size 0x10 */
3677 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3678 if (val != 3)
3679 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3680
3681 /* clear NIG EOP FIFO */
3682 for (i = 0; i < 11; i++)
3683 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3684 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3685 if (val != 1) {
3686 BNX2X_ERR("clear of NIG failed\n");
3687 return -4;
3688 }
3689
3690 /* Reset and init BRB, PRS, NIG */
3691 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3692 msleep(50);
3693 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3694 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003695 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3696 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00003697#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003698 /* set NIC mode */
3699 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3700#endif
3701
3702 /* Enable inputs of parser neighbor blocks */
3703 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3704 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3705 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07003706 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003707
3708 DP(NETIF_MSG_HW, "done\n");
3709
3710 return 0; /* OK */
3711}
3712
3713static void enable_blocks_attention(struct bnx2x *bp)
3714{
3715 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3716 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3717 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3718 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3719 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3720 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3721 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3722 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3723 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003724/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3725/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003726 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3727 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3728 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003729/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3730/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003731 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3732 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3733 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3734 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003735/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3736/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3737 if (CHIP_REV_IS_FPGA(bp))
3738 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3739 else
3740 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003741 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3742 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3743 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003744/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3745/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003746 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3747 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003748/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3749 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003750}
3751
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003752static const struct {
3753 u32 addr;
3754 u32 mask;
3755} bnx2x_parity_mask[] = {
3756 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3757 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3758 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3759 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3760 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3761 {QM_REG_QM_PRTY_MASK, 0x0},
3762 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3763 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3764 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3765 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3766 {CDU_REG_CDU_PRTY_MASK, 0x0},
3767 {CFC_REG_CFC_PRTY_MASK, 0x0},
3768 {DBG_REG_DBG_PRTY_MASK, 0x0},
3769 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3770 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3771 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3772 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3773 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3774 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3775 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3776 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3777 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3778 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3779 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3780 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3781 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3782 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3783 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3784};
3785
3786static void enable_blocks_parity(struct bnx2x *bp)
3787{
3788 int i, mask_arr_len =
3789 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3790
3791 for (i = 0; i < mask_arr_len; i++)
3792 REG_WR(bp, bnx2x_parity_mask[i].addr,
3793 bnx2x_parity_mask[i].mask);
3794}
3795
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003796
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00003797static void bnx2x_reset_common(struct bnx2x *bp)
3798{
3799 /* reset_common */
3800 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3801 0xd3ffff7f);
3802 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3803}
3804
Eilon Greenstein573f2032009-08-12 08:24:14 +00003805static void bnx2x_init_pxp(struct bnx2x *bp)
3806{
3807 u16 devctl;
3808 int r_order, w_order;
3809
3810 pci_read_config_word(bp->pdev,
3811 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3812 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3813 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3814 if (bp->mrrs == -1)
3815 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3816 else {
3817 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3818 r_order = bp->mrrs;
3819 }
3820
3821 bnx2x_init_pxp_arb(bp, r_order, w_order);
3822}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00003823
3824static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3825{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00003826 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00003827 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00003828 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00003829
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00003830 if (BP_NOMCP(bp))
3831 return;
3832
3833 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00003834 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3835 SHARED_HW_CFG_FAN_FAILURE_MASK;
3836
3837 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3838 is_required = 1;
3839
3840 /*
3841 * The fan failure mechanism is usually related to the PHY type since
3842 * the power consumption of the board is affected by the PHY. Currently,
3843 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3844 */
3845 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3846 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00003847 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00003848 bnx2x_fan_failure_det_req(
3849 bp,
3850 bp->common.shmem_base,
3851 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00003852 }
3853
3854 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3855
3856 if (is_required == 0)
3857 return;
3858
3859 /* Fan failure is indicated by SPIO 5 */
3860 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3861 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3862
3863 /* set to active low mode */
3864 val = REG_RD(bp, MISC_REG_SPIO_INT);
3865 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003866 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00003867 REG_WR(bp, MISC_REG_SPIO_INT, val);
3868
3869 /* enable interrupt to signal the IGU */
3870 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3871 val |= (1 << MISC_REGISTERS_SPIO_5);
3872 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3873}
3874
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003875static int bnx2x_init_common(struct bnx2x *bp)
3876{
3877 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00003878#ifdef BCM_CNIC
3879 u32 wb_write[2];
3880#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003881
3882 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
3883
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00003884 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003885 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3886 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3887
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003888 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003889 if (CHIP_IS_E1H(bp))
3890 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3891
3892 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3893 msleep(30);
3894 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3895
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003896 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003897 if (CHIP_IS_E1(bp)) {
3898 /* enable HW interrupt from PXP on USDM overflow
3899 bit 16 on INT_MASK_0 */
3900 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003901 }
3902
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003903 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003904 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003905
3906#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003907 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3908 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3909 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3910 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3911 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00003912 /* make sure this value is 0 */
3913 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003914
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003915/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3916 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3917 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3918 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3919 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003920#endif
3921
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003922 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00003923#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003924 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3925 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3926 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003927#endif
3928
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003929 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3930 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003932 /* let the HW do it's magic ... */
3933 msleep(100);
3934 /* finish PXP init */
3935 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3936 if (val != 1) {
3937 BNX2X_ERR("PXP2 CFG failed\n");
3938 return -EBUSY;
3939 }
3940 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3941 if (val != 1) {
3942 BNX2X_ERR("PXP2 RD_INIT failed\n");
3943 return -EBUSY;
3944 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003945
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003946 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3947 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003948
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003949 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003950
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003951 /* clean the DMAE memory */
3952 bp->dmae_ready = 1;
3953 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003954
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003955 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3956 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3957 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3958 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003959
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003960 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3961 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3962 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3963 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3964
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003965 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00003966
3967#ifdef BCM_CNIC
3968 wb_write[0] = 0;
3969 wb_write[1] = 0;
3970 for (i = 0; i < 64; i++) {
3971 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3972 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3973
3974 if (CHIP_IS_E1H(bp)) {
3975 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
3976 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
3977 wb_write, 2);
3978 }
3979 }
3980#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003981 /* soft reset pulse */
3982 REG_WR(bp, QM_REG_SOFT_RESET, 1);
3983 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003984
Michael Chan37b091b2009-10-10 13:46:55 +00003985#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003986 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003987#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003988
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003989 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003990 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
3991 if (!CHIP_REV_IS_SLOW(bp)) {
3992 /* enable hw interrupt from doorbell Q */
3993 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3994 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003995
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07003996 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3997 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08003998 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00003999#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07004000 /* set NIC mode */
4001 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00004002#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004003 if (CHIP_IS_E1H(bp))
4004 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004005
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004006 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4007 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4008 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4009 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004010
Eilon Greensteinca003922009-08-12 22:53:28 -07004011 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4012 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4013 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4014 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004015
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004016 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4017 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4018 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4019 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004020
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004021 /* sync semi rtc */
4022 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4023 0x80000000);
4024 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4025 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004026
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004027 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4028 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4029 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004030
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004031 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07004032 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4033 REG_WR(bp, i, random32());
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004034 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004035#ifdef BCM_CNIC
4036 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4037 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4038 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4039 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4040 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4041 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4042 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4043 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4044 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4045 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4046#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004047 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004048
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004049 if (sizeof(union cdu_context) != 1024)
4050 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004051 dev_alert(&bp->pdev->dev, "please adjust the size "
4052 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00004053 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004054
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004055 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004056 val = (4 << 24) + (0 << 12) + 1024;
4057 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004059 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004060 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004061 /* enable context validation interrupt from CFC */
4062 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4063
4064 /* set the thresholds to prevent CFC/CDU race */
4065 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004066
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004067 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4068 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004069
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004070 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004071 /* Reset PCIE errors for debug */
4072 REG_WR(bp, 0x2814, 0xffffffff);
4073 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004074
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004075 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004076 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004077 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004078 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004079
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004080 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004081 if (CHIP_IS_E1H(bp)) {
4082 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4083 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4084 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004085
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004086 if (CHIP_REV_IS_SLOW(bp))
4087 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004088
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004089 /* finish CFC init */
4090 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4091 if (val != 1) {
4092 BNX2X_ERR("CFC LL_INIT failed\n");
4093 return -EBUSY;
4094 }
4095 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4096 if (val != 1) {
4097 BNX2X_ERR("CFC AC_INIT failed\n");
4098 return -EBUSY;
4099 }
4100 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4101 if (val != 1) {
4102 BNX2X_ERR("CFC CAM_INIT failed\n");
4103 return -EBUSY;
4104 }
4105 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004106
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004107 /* read NIG statistic
4108 to see if this is our first up since powerup */
4109 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4110 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004111
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004112 /* do internal memory self test */
4113 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4114 BNX2X_ERR("internal mem self test failed\n");
4115 return -EBUSY;
4116 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004117
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004118 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4119 bp->common.shmem_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004120
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004121 bnx2x_setup_fan_failure_detection(bp);
4122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004123 /* clear PXP2 attentions */
4124 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004125
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004126 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004127 if (CHIP_PARITY_SUPPORTED(bp))
4128 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004129
Yaniv Rosner6bbca912008-08-13 15:57:28 -07004130 if (!BP_NOMCP(bp)) {
4131 bnx2x_acquire_phy_lock(bp);
4132 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4133 bnx2x_release_phy_lock(bp);
4134 } else
4135 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4136
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004137 return 0;
4138}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004139
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004140static int bnx2x_init_port(struct bnx2x *bp)
4141{
4142 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004143 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00004144 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004145 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004146
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004147 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004148
4149 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004150
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004151 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004152 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004153
4154 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4155 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4156 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004157 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004158
Michael Chan37b091b2009-10-10 13:46:55 +00004159#ifdef BCM_CNIC
4160 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004161
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004162 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00004163 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4164 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004165#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004166
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004167 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004168
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004169 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004170 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4171 /* no pause for emulation and FPGA */
4172 low = 0;
4173 high = 513;
4174 } else {
4175 if (IS_E1HMF(bp))
4176 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4177 else if (bp->dev->mtu > 4096) {
4178 if (bp->flags & ONE_PORT_FLAG)
4179 low = 160;
4180 else {
4181 val = bp->dev->mtu;
4182 /* (24*1024 + val*4)/256 */
4183 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4184 }
4185 } else
4186 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4187 high = low + 56; /* 14*1024/256 */
4188 }
4189 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4190 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4191
4192
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004193 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004194
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004195 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004196 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004197 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004198 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004199
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004200 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4201 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4202 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4203 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004204
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004205 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004206 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004207
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004208 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004209
4210 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004211 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212
4213 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004214 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004215 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004216 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004217
4218 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004219 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004221 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004222
Michael Chan37b091b2009-10-10 13:46:55 +00004223#ifdef BCM_CNIC
4224 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004225#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004226 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004227 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004228
4229 if (CHIP_IS_E1(bp)) {
4230 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4231 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4232 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004233 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004234
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004235 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004236 /* init aeu_mask_attn_func_0/1:
4237 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4238 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4239 * bits 4-7 are used for "per vn group attention" */
4240 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4241 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4242
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004243 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004244 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004245 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004246 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004247 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004248
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004249 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004250
4251 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4252
4253 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004254 /* 0x2 disable e1hov, 0x1 enable */
4255 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4256 (IS_E1HMF(bp) ? 0x1 : 0x2));
4257
Eilon Greenstein1c063282009-02-12 08:36:43 +00004258 {
4259 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4260 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4261 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4262 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004263 }
4264
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004265 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004266 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004267 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4268 bp->common.shmem_base);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004269
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004270 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4271 port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00004272 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4273 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4274 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004275 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00004276 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004277 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07004278 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004279
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004280 return 0;
4281}
4282
4283#define ILT_PER_FUNC (768/2)
4284#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4285/* the phys address is shifted right 12 bits and has an added
4286 1=valid bit added to the 53rd bit
4287 then since this is a wide register(TM)
4288 we split it into two 32 bit writes
4289 */
4290#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4291#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4292#define PXP_ONE_ILT(x) (((x) << 10) | x)
4293#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4294
Michael Chan37b091b2009-10-10 13:46:55 +00004295#ifdef BCM_CNIC
4296#define CNIC_ILT_LINES 127
4297#define CNIC_CTX_PER_ILT 16
4298#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004299#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00004300#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004301
4302static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4303{
4304 int reg;
4305
4306 if (CHIP_IS_E1H(bp))
4307 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4308 else /* E1 */
4309 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4310
4311 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4312}
4313
4314static int bnx2x_init_func(struct bnx2x *bp)
4315{
4316 int port = BP_PORT(bp);
4317 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004318 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004319 int i;
4320
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004321 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004322
Eilon Greenstein8badd272009-02-12 08:36:15 +00004323 /* set MSI reconfigure capability */
4324 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4325 val = REG_RD(bp, addr);
4326 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4327 REG_WR(bp, addr, val);
4328
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004329 i = FUNC_ILT_BASE(func);
4330
4331 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4332 if (CHIP_IS_E1H(bp)) {
4333 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4334 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4335 } else /* E1 */
4336 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4337 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4338
Michael Chan37b091b2009-10-10 13:46:55 +00004339#ifdef BCM_CNIC
4340 i += 1 + CNIC_ILT_LINES;
4341 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4342 if (CHIP_IS_E1(bp))
4343 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4344 else {
4345 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4346 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4347 }
4348
4349 i++;
4350 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4351 if (CHIP_IS_E1(bp))
4352 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4353 else {
4354 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4355 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4356 }
4357
4358 i++;
4359 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4360 if (CHIP_IS_E1(bp))
4361 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4362 else {
4363 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4364 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4365 }
4366
4367 /* tell the searcher where the T2 table is */
4368 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4369
4370 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4371 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4372
4373 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4374 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4375 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4376
4377 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4378#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004379
4380 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00004381 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4382 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4383 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4384 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4385 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4386 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4387 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4388 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4389 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004390
4391 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4392 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4393 }
4394
4395 /* HC init per function */
4396 if (CHIP_IS_E1H(bp)) {
4397 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4398
4399 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4400 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4401 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004402 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004403
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004404 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004405 REG_WR(bp, 0x2114, 0xffffffff);
4406 REG_WR(bp, 0x2120, 0xffffffff);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00004407 bnx2x_phy_probe(&bp->link_params);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004408 return 0;
4409}
4410
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004411int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004412{
4413 int i, rc = 0;
4414
4415 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4416 BP_FUNC(bp), load_code);
4417
4418 bp->dmae_ready = 0;
4419 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00004420 rc = bnx2x_gunzip_init(bp);
4421 if (rc)
4422 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004423
4424 switch (load_code) {
4425 case FW_MSG_CODE_DRV_LOAD_COMMON:
4426 rc = bnx2x_init_common(bp);
4427 if (rc)
4428 goto init_hw_err;
4429 /* no break */
4430
4431 case FW_MSG_CODE_DRV_LOAD_PORT:
4432 bp->dmae_ready = 1;
4433 rc = bnx2x_init_port(bp);
4434 if (rc)
4435 goto init_hw_err;
4436 /* no break */
4437
4438 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4439 bp->dmae_ready = 1;
4440 rc = bnx2x_init_func(bp);
4441 if (rc)
4442 goto init_hw_err;
4443 break;
4444
4445 default:
4446 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4447 break;
4448 }
4449
4450 if (!BP_NOMCP(bp)) {
4451 int func = BP_FUNC(bp);
4452
4453 bp->fw_drv_pulse_wr_seq =
4454 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4455 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004456 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4457 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004458
4459 /* this needs to be done before gunzip end */
4460 bnx2x_zero_def_sb(bp);
4461 for_each_queue(bp, i)
4462 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00004463#ifdef BCM_CNIC
4464 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4465#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004466
4467init_hw_err:
4468 bnx2x_gunzip_end(bp);
4469
4470 return rc;
4471}
4472
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004473void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004474{
4475
4476#define BNX2X_PCI_FREE(x, y, size) \
4477 do { \
4478 if (x) { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004479 dma_free_coherent(&bp->pdev->dev, size, x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004480 x = NULL; \
4481 y = 0; \
4482 } \
4483 } while (0)
4484
4485#define BNX2X_FREE(x) \
4486 do { \
4487 if (x) { \
4488 vfree(x); \
4489 x = NULL; \
4490 } \
4491 } while (0)
4492
4493 int i;
4494
4495 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004496 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004497 for_each_queue(bp, i) {
4498
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004499 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004500 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4501 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07004502 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004503 }
4504 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004505 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004506
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004507 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004508 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4509 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4510 bnx2x_fp(bp, i, rx_desc_mapping),
4511 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4512
4513 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4514 bnx2x_fp(bp, i, rx_comp_mapping),
4515 sizeof(struct eth_fast_path_rx_cqe) *
4516 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004517
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004518 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07004519 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004520 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4521 bnx2x_fp(bp, i, rx_sge_mapping),
4522 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4523 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004524 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004525 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004526
4527 /* fastpath tx rings: tx_buf tx_desc */
4528 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4529 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4530 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07004531 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004532 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004533 /* end of fastpath */
4534
4535 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004536 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004537
4538 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004539 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004540
Michael Chan37b091b2009-10-10 13:46:55 +00004541#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004542 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4543 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4544 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4545 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00004546 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4547 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004548#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004549 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004550
4551#undef BNX2X_PCI_FREE
4552#undef BNX2X_KFREE
4553}
4554
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004555int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004556{
4557
4558#define BNX2X_PCI_ALLOC(x, y, size) \
4559 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004560 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004561 if (x == NULL) \
4562 goto alloc_mem_err; \
4563 memset(x, 0, size); \
4564 } while (0)
4565
4566#define BNX2X_ALLOC(x, size) \
4567 do { \
4568 x = vmalloc(size); \
4569 if (x == NULL) \
4570 goto alloc_mem_err; \
4571 memset(x, 0, size); \
4572 } while (0)
4573
4574 int i;
4575
4576 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004577 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004578 for_each_queue(bp, i) {
4579 bnx2x_fp(bp, i, bp) = bp;
4580
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004581 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004582 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4583 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07004584 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004585 }
4586 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004587 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004588
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004589 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004590 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4591 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4592 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4593 &bnx2x_fp(bp, i, rx_desc_mapping),
4594 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4595
4596 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4597 &bnx2x_fp(bp, i, rx_comp_mapping),
4598 sizeof(struct eth_fast_path_rx_cqe) *
4599 NUM_RCQ_BD);
4600
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004601 /* SGE ring */
4602 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4603 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4604 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4605 &bnx2x_fp(bp, i, rx_sge_mapping),
4606 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004607 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004608 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004609 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004610
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004611 /* fastpath tx rings: tx_buf tx_desc */
4612 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4613 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4614 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4615 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07004616 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004617 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004618 /* end of fastpath */
4619
4620 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4621 sizeof(struct host_def_status_block));
4622
4623 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4624 sizeof(struct bnx2x_slowpath));
4625
Michael Chan37b091b2009-10-10 13:46:55 +00004626#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004627 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4628
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004629 /* allocate searcher T2 table
4630 we allocate 1/4 of alloc num for T2
4631 (which is not entered into the ILT) */
4632 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4633
Michael Chan37b091b2009-10-10 13:46:55 +00004634 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004635 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00004636 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004637
Michael Chan37b091b2009-10-10 13:46:55 +00004638 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004639 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4640
4641 /* QM queues (128*MAX_CONN) */
4642 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00004643
4644 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4645 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004646#endif
4647
4648 /* Slow path ring */
4649 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4650
4651 return 0;
4652
4653alloc_mem_err:
4654 bnx2x_free_mem(bp);
4655 return -ENOMEM;
4656
4657#undef BNX2X_PCI_ALLOC
4658#undef BNX2X_ALLOC
4659}
4660
Yitchak Gertner65abd742008-08-25 15:26:24 -07004661
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004662/*
4663 * Init service functions
4664 */
4665
Michael Chane665bfd2009-10-10 13:46:54 +00004666/**
4667 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4668 *
4669 * @param bp driver descriptor
4670 * @param set set or clear an entry (1 or 0)
4671 * @param mac pointer to a buffer containing a MAC
4672 * @param cl_bit_vec bit vector of clients to register a MAC for
4673 * @param cam_offset offset in a CAM to use
4674 * @param with_bcast set broadcast MAC as well
4675 */
4676static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4677 u32 cl_bit_vec, u8 cam_offset,
4678 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004679{
4680 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004681 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004682
4683 /* CAM allocation
4684 * unicasts 0-31:port0 32-63:port1
4685 * multicast 64-127:port0 128-191:port1
4686 */
Michael Chane665bfd2009-10-10 13:46:54 +00004687 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4688 config->hdr.offset = cam_offset;
4689 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004690 config->hdr.reserved1 = 0;
4691
4692 /* primary MAC */
4693 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00004694 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004695 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00004696 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00004698 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004699 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07004700 if (set)
4701 config->config_table[0].target_table_entry.flags = 0;
4702 else
4703 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07004704 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00004705 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004706 config->config_table[0].target_table_entry.vlan_id = 0;
4707
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07004708 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4709 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004710 config->config_table[0].cam_entry.msb_mac_addr,
4711 config->config_table[0].cam_entry.middle_mac_addr,
4712 config->config_table[0].cam_entry.lsb_mac_addr);
4713
4714 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00004715 if (with_bcast) {
4716 config->config_table[1].cam_entry.msb_mac_addr =
4717 cpu_to_le16(0xffff);
4718 config->config_table[1].cam_entry.middle_mac_addr =
4719 cpu_to_le16(0xffff);
4720 config->config_table[1].cam_entry.lsb_mac_addr =
4721 cpu_to_le16(0xffff);
4722 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4723 if (set)
4724 config->config_table[1].target_table_entry.flags =
4725 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4726 else
4727 CAM_INVALIDATE(config->config_table[1]);
4728 config->config_table[1].target_table_entry.clients_bit_vector =
4729 cpu_to_le32(cl_bit_vec);
4730 config->config_table[1].target_table_entry.vlan_id = 0;
4731 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004732
4733 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4734 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4735 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4736}
4737
Michael Chane665bfd2009-10-10 13:46:54 +00004738/**
4739 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4740 *
4741 * @param bp driver descriptor
4742 * @param set set or clear an entry (1 or 0)
4743 * @param mac pointer to a buffer containing a MAC
4744 * @param cl_bit_vec bit vector of clients to register a MAC for
4745 * @param cam_offset offset in a CAM to use
4746 */
4747static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4748 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004749{
4750 struct mac_configuration_cmd_e1h *config =
4751 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4752
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004753 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00004754 config->hdr.offset = cam_offset;
4755 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004756 config->hdr.reserved1 = 0;
4757
4758 /* primary MAC */
4759 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00004760 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004761 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00004762 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004763 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00004764 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07004765 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00004766 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004767 config->config_table[0].vlan_id = 0;
4768 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07004769 if (set)
4770 config->config_table[0].flags = BP_PORT(bp);
4771 else
4772 config->config_table[0].flags =
4773 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004774
Michael Chane665bfd2009-10-10 13:46:54 +00004775 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07004776 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004777 config->config_table[0].msb_mac_addr,
4778 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00004779 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004780
4781 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4782 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4783 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4784}
4785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004786static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4787 int *state_p, int poll)
4788{
4789 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00004790 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004791
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004792 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4793 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004794
4795 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004796 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004797 if (poll) {
4798 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004799 /* if index is different from 0
4800 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07004801 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004802 */
4803 if (idx)
4804 bnx2x_rx_int(&bp->fp[idx], 10);
4805 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004806
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07004807 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00004808 if (*state_p == state) {
4809#ifdef BNX2X_STOP_ON_ERROR
4810 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4811#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004812 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00004813 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004814
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004815 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00004816
4817 if (bp->panic)
4818 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004819 }
4820
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004821 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08004822 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4823 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004824#ifdef BNX2X_STOP_ON_ERROR
4825 bnx2x_panic();
4826#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004827
Eliezer Tamir49d66772008-02-28 11:53:13 -08004828 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004829}
4830
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004831void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00004832{
4833 bp->set_mac_pending++;
4834 smp_wmb();
4835
4836 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4837 (1 << bp->fp->cl_id), BP_FUNC(bp));
4838
4839 /* Wait for a completion */
4840 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4841}
4842
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004843void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00004844{
4845 bp->set_mac_pending++;
4846 smp_wmb();
4847
4848 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4849 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4850 1);
4851
4852 /* Wait for a completion */
4853 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4854}
4855
Michael Chan993ac7b2009-10-10 13:46:56 +00004856#ifdef BCM_CNIC
4857/**
4858 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4859 * MAC(s). This function will wait until the ramdord completion
4860 * returns.
4861 *
4862 * @param bp driver handle
4863 * @param set set or clear the CAM entry
4864 *
4865 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4866 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004867int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00004868{
4869 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4870
4871 bp->set_mac_pending++;
4872 smp_wmb();
4873
4874 /* Send a SET_MAC ramrod */
4875 if (CHIP_IS_E1(bp))
4876 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4877 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4878 1);
4879 else
4880 /* CAM allocation for E1H
4881 * unicasts: by func number
4882 * multicast: 20+FUNC*20, 20 each
4883 */
4884 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4885 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4886
4887 /* Wait for a completion when setting */
4888 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4889
4890 return 0;
4891}
4892#endif
4893
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004894int bnx2x_setup_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004895{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004896 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004897
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004898 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004899 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004900
4901 /* SETUP ramrod */
4902 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4903
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004904 /* Wait for completion */
4905 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004906
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004907 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004908}
4909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004910int bnx2x_setup_multi(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004911{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004912 struct bnx2x_fastpath *fp = &bp->fp[index];
4913
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004914 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004915 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004916
Eliezer Tamir228241e2008-02-28 11:56:57 -08004917 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004918 fp->state = BNX2X_FP_STATE_OPENING;
4919 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4920 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004921
4922 /* Wait for completion */
4923 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004924 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004925}
4926
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004927
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004928void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004929{
Eilon Greensteinca003922009-08-12 22:53:28 -07004930
4931 switch (bp->multi_mode) {
4932 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004933 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07004934 break;
4935
4936 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004937 if (num_queues)
4938 bp->num_queues = min_t(u32, num_queues,
4939 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07004940 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004941 bp->num_queues = min_t(u32, num_online_cpus(),
4942 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07004943 break;
4944
4945
4946 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004947 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07004948 break;
4949 }
Eilon Greensteinca003922009-08-12 22:53:28 -07004950}
4951
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004952
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004953
4954static int bnx2x_stop_multi(struct bnx2x *bp, int index)
4955{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004956 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004957 int rc;
4958
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004959 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004960 fp->state = BNX2X_FP_STATE_HALTING;
4961 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004962
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004963 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004964 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004965 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004966 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004967 return rc;
4968
4969 /* delete cfc entry */
4970 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
4971
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004972 /* Wait for completion */
4973 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004974 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004975 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004976}
4977
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07004978static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004979{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004980 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004981 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004982 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004983 int cnt = 500;
4984 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004985
4986 might_sleep();
4987
4988 /* Send HALT ramrod */
4989 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00004990 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004991
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004992 /* Wait for completion */
4993 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
4994 &(bp->fp[0].state), 1);
4995 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07004996 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004997
Eliezer Tamir49d66772008-02-28 11:53:13 -08004998 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004999
Eliezer Tamir228241e2008-02-28 11:56:57 -08005000 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005001 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5002
Eliezer Tamir49d66772008-02-28 11:53:13 -08005003 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005004 we are going to reset the chip anyway
5005 so there is not much to do if this times out
5006 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005007 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005008 if (!cnt) {
5009 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5010 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5011 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5012#ifdef BNX2X_STOP_ON_ERROR
5013 bnx2x_panic();
5014#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00005015 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005016 break;
5017 }
5018 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005019 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00005020 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08005021 }
5022 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5023 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005024
5025 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005026}
5027
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005028static void bnx2x_reset_func(struct bnx2x *bp)
5029{
5030 int port = BP_PORT(bp);
5031 int func = BP_FUNC(bp);
5032 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005033
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005034 /* Configure IGU */
5035 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5036 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5037
Michael Chan37b091b2009-10-10 13:46:55 +00005038#ifdef BCM_CNIC
5039 /* Disable Timer scan */
5040 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5041 /*
5042 * Wait for at least 10ms and up to 2 second for the timers scan to
5043 * complete
5044 */
5045 for (i = 0; i < 200; i++) {
5046 msleep(10);
5047 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5048 break;
5049 }
5050#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005051 /* Clear ILT */
5052 base = FUNC_ILT_BASE(func);
5053 for (i = base; i < base + ILT_PER_FUNC; i++)
5054 bnx2x_ilt_wr(bp, i, 0);
5055}
5056
5057static void bnx2x_reset_port(struct bnx2x *bp)
5058{
5059 int port = BP_PORT(bp);
5060 u32 val;
5061
5062 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5063
5064 /* Do not rcv packets to BRB */
5065 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5066 /* Do not direct rcv packets that are not for MCP to the BRB */
5067 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5068 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5069
5070 /* Configure AEU */
5071 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5072
5073 msleep(100);
5074 /* Check for BRB port occupancy */
5075 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5076 if (val)
5077 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07005078 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005079
5080 /* TODO: Close Doorbell port? */
5081}
5082
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005083static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5084{
5085 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5086 BP_FUNC(bp), reset_code);
5087
5088 switch (reset_code) {
5089 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5090 bnx2x_reset_port(bp);
5091 bnx2x_reset_func(bp);
5092 bnx2x_reset_common(bp);
5093 break;
5094
5095 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5096 bnx2x_reset_port(bp);
5097 bnx2x_reset_func(bp);
5098 break;
5099
5100 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5101 bnx2x_reset_func(bp);
5102 break;
5103
5104 default:
5105 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5106 break;
5107 }
5108}
5109
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005110void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005111{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005112 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005113 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005114 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005115
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005116 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005117 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08005118 struct bnx2x_fastpath *fp = &bp->fp[i];
5119
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005120 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08005121 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005122
Eilon Greenstein7961f792009-03-02 07:59:31 +00005123 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005124 if (!cnt) {
5125 BNX2X_ERR("timeout waiting for queue[%d]\n",
5126 i);
5127#ifdef BNX2X_STOP_ON_ERROR
5128 bnx2x_panic();
5129 return -EBUSY;
5130#else
5131 break;
5132#endif
5133 }
5134 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005135 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005136 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08005137 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005138 /* Give HW time to discard old tx messages */
5139 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005140
Yitchak Gertner65abd742008-08-25 15:26:24 -07005141 if (CHIP_IS_E1(bp)) {
5142 struct mac_configuration_cmd *config =
5143 bnx2x_sp(bp, mcast_config);
5144
Michael Chane665bfd2009-10-10 13:46:54 +00005145 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07005146
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005147 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07005148 CAM_INVALIDATE(config->config_table[i]);
5149
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005150 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07005151 if (CHIP_REV_IS_SLOW(bp))
5152 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5153 else
5154 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00005155 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07005156 config->hdr.reserved1 = 0;
5157
Michael Chane665bfd2009-10-10 13:46:54 +00005158 bp->set_mac_pending++;
5159 smp_wmb();
5160
Yitchak Gertner65abd742008-08-25 15:26:24 -07005161 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5162 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5163 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5164
5165 } else { /* E1H */
5166 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5167
Michael Chane665bfd2009-10-10 13:46:54 +00005168 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07005169
5170 for (i = 0; i < MC_HASH_SIZE; i++)
5171 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00005172
5173 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07005174 }
Michael Chan993ac7b2009-10-10 13:46:56 +00005175#ifdef BCM_CNIC
5176 /* Clear iSCSI L2 MAC */
5177 mutex_lock(&bp->cnic_mutex);
5178 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5179 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5180 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5181 }
5182 mutex_unlock(&bp->cnic_mutex);
5183#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07005184
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005185 if (unload_mode == UNLOAD_NORMAL)
5186 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08005187
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00005188 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005189 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005190
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00005191 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005192 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005193 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005194 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005195 /* The mac address is written to entries 1-4 to
5196 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005197 u8 entry = (BP_E1HVN(bp) + 1)*8;
5198
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005199 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07005200 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005201
5202 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5203 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07005204 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005205
5206 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08005207
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005208 } else
5209 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5210
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005211 /* Close multi and leading connections
5212 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005213 for_each_nondefault_queue(bp, i)
5214 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08005215 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005216
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005217 rc = bnx2x_stop_leading(bp);
5218 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005219 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005220#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005221 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005222#else
5223 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005224#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08005225 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005226
Eliezer Tamir228241e2008-02-28 11:56:57 -08005227unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005228 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08005229 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005230 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00005231 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005232 load_count[0], load_count[1], load_count[2]);
5233 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005234 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00005235 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005236 load_count[0], load_count[1], load_count[2]);
5237 if (load_count[0] == 0)
5238 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005239 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005240 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5241 else
5242 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5243 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005244
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005245 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5246 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5247 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005248
5249 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08005250 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005251
5252 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005253 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005254 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005255
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005256}
5257
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005258void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005259{
5260 u32 val;
5261
5262 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5263
5264 if (CHIP_IS_E1(bp)) {
5265 int port = BP_PORT(bp);
5266 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5267 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5268
5269 val = REG_RD(bp, addr);
5270 val &= ~(0x300);
5271 REG_WR(bp, addr, val);
5272 } else if (CHIP_IS_E1H(bp)) {
5273 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5274 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5275 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5276 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5277 }
5278}
5279
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005280
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005281/* Close gates #2, #3 and #4: */
5282static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5283{
5284 u32 val, addr;
5285
5286 /* Gates #2 and #4a are closed/opened for "not E1" only */
5287 if (!CHIP_IS_E1(bp)) {
5288 /* #4 */
5289 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5290 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5291 close ? (val | 0x1) : (val & (~(u32)1)));
5292 /* #2 */
5293 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5294 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5295 close ? (val | 0x1) : (val & (~(u32)1)));
5296 }
5297
5298 /* #3 */
5299 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5300 val = REG_RD(bp, addr);
5301 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5302
5303 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5304 close ? "closing" : "opening");
5305 mmiowb();
5306}
5307
5308#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5309
5310static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5311{
5312 /* Do some magic... */
5313 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5314 *magic_val = val & SHARED_MF_CLP_MAGIC;
5315 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5316}
5317
5318/* Restore the value of the `magic' bit.
5319 *
5320 * @param pdev Device handle.
5321 * @param magic_val Old value of the `magic' bit.
5322 */
5323static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5324{
5325 /* Restore the `magic' bit value... */
5326 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5327 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5328 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5329 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5330 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5331 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5332}
5333
5334/* Prepares for MCP reset: takes care of CLP configurations.
5335 *
5336 * @param bp
5337 * @param magic_val Old value of 'magic' bit.
5338 */
5339static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5340{
5341 u32 shmem;
5342 u32 validity_offset;
5343
5344 DP(NETIF_MSG_HW, "Starting\n");
5345
5346 /* Set `magic' bit in order to save MF config */
5347 if (!CHIP_IS_E1(bp))
5348 bnx2x_clp_reset_prep(bp, magic_val);
5349
5350 /* Get shmem offset */
5351 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5352 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5353
5354 /* Clear validity map flags */
5355 if (shmem > 0)
5356 REG_WR(bp, shmem + validity_offset, 0);
5357}
5358
5359#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5360#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5361
5362/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5363 * depending on the HW type.
5364 *
5365 * @param bp
5366 */
5367static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5368{
5369 /* special handling for emulation and FPGA,
5370 wait 10 times longer */
5371 if (CHIP_REV_IS_SLOW(bp))
5372 msleep(MCP_ONE_TIMEOUT*10);
5373 else
5374 msleep(MCP_ONE_TIMEOUT);
5375}
5376
5377static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5378{
5379 u32 shmem, cnt, validity_offset, val;
5380 int rc = 0;
5381
5382 msleep(100);
5383
5384 /* Get shmem offset */
5385 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5386 if (shmem == 0) {
5387 BNX2X_ERR("Shmem 0 return failure\n");
5388 rc = -ENOTTY;
5389 goto exit_lbl;
5390 }
5391
5392 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5393
5394 /* Wait for MCP to come up */
5395 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5396 /* TBD: its best to check validity map of last port.
5397 * currently checks on port 0.
5398 */
5399 val = REG_RD(bp, shmem + validity_offset);
5400 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5401 shmem + validity_offset, val);
5402
5403 /* check that shared memory is valid. */
5404 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5405 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5406 break;
5407
5408 bnx2x_mcp_wait_one(bp);
5409 }
5410
5411 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5412
5413 /* Check that shared memory is valid. This indicates that MCP is up. */
5414 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5415 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5416 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5417 rc = -ENOTTY;
5418 goto exit_lbl;
5419 }
5420
5421exit_lbl:
5422 /* Restore the `magic' bit value */
5423 if (!CHIP_IS_E1(bp))
5424 bnx2x_clp_reset_done(bp, magic_val);
5425
5426 return rc;
5427}
5428
5429static void bnx2x_pxp_prep(struct bnx2x *bp)
5430{
5431 if (!CHIP_IS_E1(bp)) {
5432 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5433 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5434 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5435 mmiowb();
5436 }
5437}
5438
5439/*
5440 * Reset the whole chip except for:
5441 * - PCIE core
5442 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5443 * one reset bit)
5444 * - IGU
5445 * - MISC (including AEU)
5446 * - GRC
5447 * - RBCN, RBCP
5448 */
5449static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5450{
5451 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5452
5453 not_reset_mask1 =
5454 MISC_REGISTERS_RESET_REG_1_RST_HC |
5455 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5456 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5457
5458 not_reset_mask2 =
5459 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5460 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5461 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5462 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5463 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5464 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5465 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5466 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5467
5468 reset_mask1 = 0xffffffff;
5469
5470 if (CHIP_IS_E1(bp))
5471 reset_mask2 = 0xffff;
5472 else
5473 reset_mask2 = 0x1ffff;
5474
5475 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5476 reset_mask1 & (~not_reset_mask1));
5477 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5478 reset_mask2 & (~not_reset_mask2));
5479
5480 barrier();
5481 mmiowb();
5482
5483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5484 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5485 mmiowb();
5486}
5487
5488static int bnx2x_process_kill(struct bnx2x *bp)
5489{
5490 int cnt = 1000;
5491 u32 val = 0;
5492 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5493
5494
5495 /* Empty the Tetris buffer, wait for 1s */
5496 do {
5497 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5498 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5499 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5500 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5501 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5502 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5503 ((port_is_idle_0 & 0x1) == 0x1) &&
5504 ((port_is_idle_1 & 0x1) == 0x1) &&
5505 (pgl_exp_rom2 == 0xffffffff))
5506 break;
5507 msleep(1);
5508 } while (cnt-- > 0);
5509
5510 if (cnt <= 0) {
5511 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5512 " are still"
5513 " outstanding read requests after 1s!\n");
5514 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5515 " port_is_idle_0=0x%08x,"
5516 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5517 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5518 pgl_exp_rom2);
5519 return -EAGAIN;
5520 }
5521
5522 barrier();
5523
5524 /* Close gates #2, #3 and #4 */
5525 bnx2x_set_234_gates(bp, true);
5526
5527 /* TBD: Indicate that "process kill" is in progress to MCP */
5528
5529 /* Clear "unprepared" bit */
5530 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5531 barrier();
5532
5533 /* Make sure all is written to the chip before the reset */
5534 mmiowb();
5535
5536 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5537 * PSWHST, GRC and PSWRD Tetris buffer.
5538 */
5539 msleep(1);
5540
5541 /* Prepare to chip reset: */
5542 /* MCP */
5543 bnx2x_reset_mcp_prep(bp, &val);
5544
5545 /* PXP */
5546 bnx2x_pxp_prep(bp);
5547 barrier();
5548
5549 /* reset the chip */
5550 bnx2x_process_kill_chip_reset(bp);
5551 barrier();
5552
5553 /* Recover after reset: */
5554 /* MCP */
5555 if (bnx2x_reset_mcp_comp(bp, val))
5556 return -EAGAIN;
5557
5558 /* PXP */
5559 bnx2x_pxp_prep(bp);
5560
5561 /* Open the gates #2, #3 and #4 */
5562 bnx2x_set_234_gates(bp, false);
5563
5564 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5565 * reset state, re-enable attentions. */
5566
5567 return 0;
5568}
5569
5570static int bnx2x_leader_reset(struct bnx2x *bp)
5571{
5572 int rc = 0;
5573 /* Try to recover after the failure */
5574 if (bnx2x_process_kill(bp)) {
5575 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5576 bp->dev->name);
5577 rc = -EAGAIN;
5578 goto exit_leader_reset;
5579 }
5580
5581 /* Clear "reset is in progress" bit and update the driver state */
5582 bnx2x_set_reset_done(bp);
5583 bp->recovery_state = BNX2X_RECOVERY_DONE;
5584
5585exit_leader_reset:
5586 bp->is_leader = 0;
5587 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5588 smp_wmb();
5589 return rc;
5590}
5591
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005592/* Assumption: runs under rtnl lock. This together with the fact
5593 * that it's called only from bnx2x_reset_task() ensure that it
5594 * will never be called when netif_running(bp->dev) is false.
5595 */
5596static void bnx2x_parity_recover(struct bnx2x *bp)
5597{
5598 DP(NETIF_MSG_HW, "Handling parity\n");
5599 while (1) {
5600 switch (bp->recovery_state) {
5601 case BNX2X_RECOVERY_INIT:
5602 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5603 /* Try to get a LEADER_LOCK HW lock */
5604 if (bnx2x_trylock_hw_lock(bp,
5605 HW_LOCK_RESOURCE_RESERVED_08))
5606 bp->is_leader = 1;
5607
5608 /* Stop the driver */
5609 /* If interface has been removed - break */
5610 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5611 return;
5612
5613 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5614 /* Ensure "is_leader" and "recovery_state"
5615 * update values are seen on other CPUs
5616 */
5617 smp_wmb();
5618 break;
5619
5620 case BNX2X_RECOVERY_WAIT:
5621 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5622 if (bp->is_leader) {
5623 u32 load_counter = bnx2x_get_load_cnt(bp);
5624 if (load_counter) {
5625 /* Wait until all other functions get
5626 * down.
5627 */
5628 schedule_delayed_work(&bp->reset_task,
5629 HZ/10);
5630 return;
5631 } else {
5632 /* If all other functions got down -
5633 * try to bring the chip back to
5634 * normal. In any case it's an exit
5635 * point for a leader.
5636 */
5637 if (bnx2x_leader_reset(bp) ||
5638 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5639 printk(KERN_ERR"%s: Recovery "
5640 "has failed. Power cycle is "
5641 "needed.\n", bp->dev->name);
5642 /* Disconnect this device */
5643 netif_device_detach(bp->dev);
5644 /* Block ifup for all function
5645 * of this ASIC until
5646 * "process kill" or power
5647 * cycle.
5648 */
5649 bnx2x_set_reset_in_progress(bp);
5650 /* Shut down the power */
5651 bnx2x_set_power_state(bp,
5652 PCI_D3hot);
5653 return;
5654 }
5655
5656 return;
5657 }
5658 } else { /* non-leader */
5659 if (!bnx2x_reset_is_done(bp)) {
5660 /* Try to get a LEADER_LOCK HW lock as
5661 * long as a former leader may have
5662 * been unloaded by the user or
5663 * released a leadership by another
5664 * reason.
5665 */
5666 if (bnx2x_trylock_hw_lock(bp,
5667 HW_LOCK_RESOURCE_RESERVED_08)) {
5668 /* I'm a leader now! Restart a
5669 * switch case.
5670 */
5671 bp->is_leader = 1;
5672 break;
5673 }
5674
5675 schedule_delayed_work(&bp->reset_task,
5676 HZ/10);
5677 return;
5678
5679 } else { /* A leader has completed
5680 * the "process kill". It's an exit
5681 * point for a non-leader.
5682 */
5683 bnx2x_nic_load(bp, LOAD_NORMAL);
5684 bp->recovery_state =
5685 BNX2X_RECOVERY_DONE;
5686 smp_wmb();
5687 return;
5688 }
5689 }
5690 default:
5691 return;
5692 }
5693 }
5694}
5695
5696/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5697 * scheduled on a general queue in order to prevent a dead lock.
5698 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005699static void bnx2x_reset_task(struct work_struct *work)
5700{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005701 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005702
5703#ifdef BNX2X_STOP_ON_ERROR
5704 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5705 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005706 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005707 return;
5708#endif
5709
5710 rtnl_lock();
5711
5712 if (!netif_running(bp->dev))
5713 goto reset_task_exit;
5714
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005715 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5716 bnx2x_parity_recover(bp);
5717 else {
5718 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5719 bnx2x_nic_load(bp, LOAD_NORMAL);
5720 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005721
5722reset_task_exit:
5723 rtnl_unlock();
5724}
5725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005726/* end of nic load/unload */
5727
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005728/*
5729 * Init service functions
5730 */
5731
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00005732static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5733{
5734 switch (func) {
5735 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5736 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5737 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5738 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5739 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5740 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5741 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5742 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5743 default:
5744 BNX2X_ERR("Unsupported function index: %d\n", func);
5745 return (u32)(-1);
5746 }
5747}
5748
5749static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5750{
5751 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5752
5753 /* Flush all outstanding writes */
5754 mmiowb();
5755
5756 /* Pretend to be function 0 */
5757 REG_WR(bp, reg, 0);
5758 /* Flush the GRC transaction (in the chip) */
5759 new_val = REG_RD(bp, reg);
5760 if (new_val != 0) {
5761 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5762 new_val);
5763 BUG();
5764 }
5765
5766 /* From now we are in the "like-E1" mode */
5767 bnx2x_int_disable(bp);
5768
5769 /* Flush all outstanding writes */
5770 mmiowb();
5771
5772 /* Restore the original funtion settings */
5773 REG_WR(bp, reg, orig_func);
5774 new_val = REG_RD(bp, reg);
5775 if (new_val != orig_func) {
5776 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5777 orig_func, new_val);
5778 BUG();
5779 }
5780}
5781
5782static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5783{
5784 if (CHIP_IS_E1H(bp))
5785 bnx2x_undi_int_disable_e1h(bp, func);
5786 else
5787 bnx2x_int_disable(bp);
5788}
5789
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005790static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005791{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005792 u32 val;
5793
5794 /* Check if there is any driver already loaded */
5795 val = REG_RD(bp, MISC_REG_UNPREPARED);
5796 if (val == 0x1) {
5797 /* Check if it is the UNDI driver
5798 * UNDI driver initializes CID offset for normal bell to 0x7
5799 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07005800 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005801 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5802 if (val == 0x7) {
5803 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005804 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005805 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005806 u32 swap_en;
5807 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005808
Eilon Greensteinb4661732009-01-14 06:43:56 +00005809 /* clear the UNDI indication */
5810 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5811
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005812 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5813
5814 /* try unload UNDI on port 0 */
5815 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005816 bp->fw_seq =
5817 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5818 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005819 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005820
5821 /* if UNDI is loaded on the other port */
5822 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5823
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005824 /* send "DONE" for previous unload */
5825 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5826
5827 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005828 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005829 bp->fw_seq =
5830 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5831 DRV_MSG_SEQ_NUMBER_MASK);
5832 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005833
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005834 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005835 }
5836
Eilon Greensteinb4661732009-01-14 06:43:56 +00005837 /* now it's safe to release the lock */
5838 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5839
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00005840 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005841
5842 /* close input traffic and wait for it */
5843 /* Do not rcv packets to BRB */
5844 REG_WR(bp,
5845 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5846 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5847 /* Do not direct rcv packets that are not for MCP to
5848 * the BRB */
5849 REG_WR(bp,
5850 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5851 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5852 /* clear AEU */
5853 REG_WR(bp,
5854 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5855 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5856 msleep(10);
5857
5858 /* save NIG port swap info */
5859 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5860 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005861 /* reset device */
5862 REG_WR(bp,
5863 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005864 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005865 REG_WR(bp,
5866 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5867 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005868 /* take the NIG out of reset and restore swap values */
5869 REG_WR(bp,
5870 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5871 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5872 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5873 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5874
5875 /* send unload done to the MCP */
5876 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5877
5878 /* restore our func and fw_seq */
5879 bp->func = func;
5880 bp->fw_seq =
5881 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5882 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00005883
5884 } else
5885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005886 }
5887}
5888
5889static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5890{
5891 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07005892 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005893
5894 /* Get the chip revision id and number. */
5895 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5896 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5897 id = ((val & 0xffff) << 16);
5898 val = REG_RD(bp, MISC_REG_CHIP_REV);
5899 id |= ((val & 0xf) << 12);
5900 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5901 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00005902 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005903 id |= (val & 0xf);
5904 bp->common.chip_id = id;
5905 bp->link_params.chip_id = bp->common.chip_id;
5906 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5907
Eilon Greenstein1c063282009-02-12 08:36:43 +00005908 val = (REG_RD(bp, 0x2874) & 0x55);
5909 if ((bp->common.chip_id & 0x1) ||
5910 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5911 bp->flags |= ONE_PORT_FLAG;
5912 BNX2X_DEV_INFO("single port device\n");
5913 }
5914
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005915 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5916 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5917 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5918 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5919 bp->common.flash_size, bp->common.flash_size);
5920
5921 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00005922 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005923 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00005924 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5925 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005926
5927 if (!bp->common.shmem_base ||
5928 (bp->common.shmem_base < 0xA0000) ||
5929 (bp->common.shmem_base >= 0xC0000)) {
5930 BNX2X_DEV_INFO("MCP not active\n");
5931 bp->flags |= NO_MCP_FLAG;
5932 return;
5933 }
5934
5935 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5936 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5937 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005938 BNX2X_ERROR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005939
5940 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005941 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005942
5943 bp->link_params.hw_led_mode = ((bp->common.hw_config &
5944 SHARED_HW_CFG_LED_MODE_MASK) >>
5945 SHARED_HW_CFG_LED_MODE_SHIFT);
5946
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00005947 bp->link_params.feature_config_flags = 0;
5948 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
5949 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
5950 bp->link_params.feature_config_flags |=
5951 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
5952 else
5953 bp->link_params.feature_config_flags &=
5954 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
5955
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005956 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
5957 bp->common.bc_ver = val;
5958 BNX2X_DEV_INFO("bc_ver %X\n", val);
5959 if (val < BNX2X_BC_VER) {
5960 /* for now only warn
5961 * later we might need to enforce this */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005962 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
5963 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005964 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005965 bp->link_params.feature_config_flags |=
5966 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
5967 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07005968
5969 if (BP_E1HVN(bp) == 0) {
5970 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
5971 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
5972 } else {
5973 /* no WOL capability for E1HVN != 0 */
5974 bp->flags |= NO_WOL_FLAG;
5975 }
5976 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00005977 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005978
5979 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
5980 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
5981 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
5982 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
5983
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005984 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
5985 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005986}
5987
5988static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
5989 u32 switch_cfg)
5990{
5991 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005992 bp->port.supported = 0;
5993 switch (bp->link_params.num_phys) {
5994 case 1:
5995 bp->port.supported = bp->link_params.phy[INT_PHY].supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005996 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005997 case 2:
5998 bp->port.supported = bp->link_params.phy[EXT_PHY1].supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005999 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006000 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006001
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006002 if (!(bp->port.supported)) {
6003 BNX2X_ERR("NVRAM config error. BAD phy config."
6004 "PHY1 config 0x%x\n",
6005 SHMEM_RD(bp,
6006 dev_info.port_hw_config[port].external_phy_config));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006007 return;
6008 }
6009
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006010 switch (switch_cfg) {
6011 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006012 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6013 port*0x10);
6014 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006015 break;
6016
6017 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006018 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6019 port*0x18);
6020 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006021
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006022 break;
6023
6024 default:
6025 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006026 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006027 return;
6028 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006029 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006030 if (!(bp->link_params.speed_cap_mask &
6031 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006032 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006033
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006034 if (!(bp->link_params.speed_cap_mask &
6035 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006036 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006037
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006038 if (!(bp->link_params.speed_cap_mask &
6039 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006040 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006041
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006042 if (!(bp->link_params.speed_cap_mask &
6043 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006044 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006045
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006046 if (!(bp->link_params.speed_cap_mask &
6047 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006048 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6049 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006050
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006051 if (!(bp->link_params.speed_cap_mask &
6052 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006053 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006054
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006055 if (!(bp->link_params.speed_cap_mask &
6056 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006057 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006058
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006059 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006060}
6061
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006062static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006063{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006064 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006065
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006066 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006067 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006068 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006069 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006070 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006071 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006072 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006073 bp->link_params.req_line_speed = SPEED_10000;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006074 bp->port.advertising = (ADVERTISED_10000baseT_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006075 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006076 }
6077 break;
6078
6079 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006080 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006081 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006082 bp->port.advertising = (ADVERTISED_10baseT_Full |
6083 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006084 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006085 BNX2X_ERROR("NVRAM config error. "
6086 "Invalid link_config 0x%x"
6087 " speed_cap_mask 0x%x\n",
6088 bp->port.link_config,
6089 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006090 return;
6091 }
6092 break;
6093
6094 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006095 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006096 bp->link_params.req_line_speed = SPEED_10;
6097 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006098 bp->port.advertising = (ADVERTISED_10baseT_Half |
6099 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006100 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006101 BNX2X_ERROR("NVRAM config error. "
6102 "Invalid link_config 0x%x"
6103 " speed_cap_mask 0x%x\n",
6104 bp->port.link_config,
6105 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006106 return;
6107 }
6108 break;
6109
6110 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006111 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006112 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006113 bp->port.advertising = (ADVERTISED_100baseT_Full |
6114 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006115 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006116 BNX2X_ERROR("NVRAM config error. "
6117 "Invalid link_config 0x%x"
6118 " speed_cap_mask 0x%x\n",
6119 bp->port.link_config,
6120 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006121 return;
6122 }
6123 break;
6124
6125 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006126 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006127 bp->link_params.req_line_speed = SPEED_100;
6128 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006129 bp->port.advertising = (ADVERTISED_100baseT_Half |
6130 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006131 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006132 BNX2X_ERROR("NVRAM config error. "
6133 "Invalid link_config 0x%x"
6134 " speed_cap_mask 0x%x\n",
6135 bp->port.link_config,
6136 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006137 return;
6138 }
6139 break;
6140
6141 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006142 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006143 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006144 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6145 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006146 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006147 BNX2X_ERROR("NVRAM config error. "
6148 "Invalid link_config 0x%x"
6149 " speed_cap_mask 0x%x\n",
6150 bp->port.link_config,
6151 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006152 return;
6153 }
6154 break;
6155
6156 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006157 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006158 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006159 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6160 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006161 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006162 BNX2X_ERROR("NVRAM config error. "
6163 "Invalid link_config 0x%x"
6164 " speed_cap_mask 0x%x\n",
6165 bp->port.link_config,
6166 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006167 return;
6168 }
6169 break;
6170
6171 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6172 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6173 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006174 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006175 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006176 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6177 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006178 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006179 BNX2X_ERROR("NVRAM config error. "
6180 "Invalid link_config 0x%x"
6181 " speed_cap_mask 0x%x\n",
6182 bp->port.link_config,
6183 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006184 return;
6185 }
6186 break;
6187
6188 default:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006189 BNX2X_ERROR("NVRAM config error. "
6190 "BAD link speed link_config 0x%x\n",
6191 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006192 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006193 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006194 break;
6195 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006196
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006197 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6198 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08006199 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07006200 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08006201 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006202
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006203 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08006204 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006205 bp->link_params.req_line_speed,
6206 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006207 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006208}
6209
Michael Chane665bfd2009-10-10 13:46:54 +00006210static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6211{
6212 mac_hi = cpu_to_be16(mac_hi);
6213 mac_lo = cpu_to_be32(mac_lo);
6214 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6215 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6216}
6217
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006218static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006219{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006220 int port = BP_PORT(bp);
6221 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00006222 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006223 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006224
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006225 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006226 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006227
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006228 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006229 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006230
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006231 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006232 SHMEM_RD(bp,
6233 dev_info.port_hw_config[port].speed_capability_mask);
6234
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006235 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006236 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6237
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00006238
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00006239 /* If the device is capable of WoL, set the default state according
6240 * to the HW
6241 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006242 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00006243 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6244 (config & PORT_FEATURE_WOL_ENABLED));
6245
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006246 BNX2X_DEV_INFO("lane_config 0x%08x"
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00006247 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006248 bp->link_params.lane_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006249 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006250
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006251 bp->link_params.switch_cfg |= (bp->port.link_config &
6252 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006253 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006254 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006255
6256 bnx2x_link_settings_requested(bp);
6257
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006258 /*
6259 * If connected directly, work with the internal PHY, otherwise, work
6260 * with the external PHY
6261 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006262 ext_phy_config =
6263 SHMEM_RD(bp,
6264 dev_info.port_hw_config[port].external_phy_config);
6265 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006266 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006267 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006268
6269 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6270 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6271 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006272 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006273
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006274 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6275 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00006276 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006277 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6278 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00006279
6280#ifdef BCM_CNIC
6281 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6282 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6283 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6284#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006285}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006286
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006287static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6288{
6289 int func = BP_FUNC(bp);
6290 u32 val, val2;
6291 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006292
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006293 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006294
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006295 bp->e1hov = 0;
6296 bp->e1hmf = 0;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00006297 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006298 bp->mf_config =
6299 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006300
Eilon Greenstein2691d512009-08-12 08:22:08 +00006301 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07006302 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00006303 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006304 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00006305 BNX2X_DEV_INFO("%s function mode\n",
6306 IS_E1HMF(bp) ? "multi" : "single");
6307
6308 if (IS_E1HMF(bp)) {
6309 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6310 e1hov_tag) &
6311 FUNC_MF_CFG_E1HOV_TAG_MASK);
6312 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6313 bp->e1hov = val;
6314 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6315 "(0x%04x)\n",
6316 func, bp->e1hov, bp->e1hov);
6317 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006318 BNX2X_ERROR("No valid E1HOV for func %d,"
6319 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006320 rc = -EPERM;
6321 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00006322 } else {
6323 if (BP_E1HVN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006324 BNX2X_ERROR("VN %d in single function mode,"
6325 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00006326 rc = -EPERM;
6327 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006328 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006329 }
6330
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006331 if (!BP_NOMCP(bp)) {
6332 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006333
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006334 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6335 DRV_MSG_SEQ_NUMBER_MASK);
6336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6337 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006338
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006339 if (IS_E1HMF(bp)) {
6340 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6341 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6342 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6343 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6344 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6345 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6346 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6347 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6348 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6349 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6350 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6351 ETH_ALEN);
6352 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6353 ETH_ALEN);
6354 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006355
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006356 return rc;
6357 }
6358
6359 if (BP_NOMCP(bp)) {
6360 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006361 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006362 random_ether_addr(bp->dev->dev_addr);
6363 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6364 }
6365
6366 return rc;
6367}
6368
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00006369static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6370{
6371 int cnt, i, block_end, rodi;
6372 char vpd_data[BNX2X_VPD_LEN+1];
6373 char str_id_reg[VENDOR_ID_LEN+1];
6374 char str_id_cap[VENDOR_ID_LEN+1];
6375 u8 len;
6376
6377 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6378 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6379
6380 if (cnt < BNX2X_VPD_LEN)
6381 goto out_not_found;
6382
6383 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6384 PCI_VPD_LRDT_RO_DATA);
6385 if (i < 0)
6386 goto out_not_found;
6387
6388
6389 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6390 pci_vpd_lrdt_size(&vpd_data[i]);
6391
6392 i += PCI_VPD_LRDT_TAG_SIZE;
6393
6394 if (block_end > BNX2X_VPD_LEN)
6395 goto out_not_found;
6396
6397 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6398 PCI_VPD_RO_KEYWORD_MFR_ID);
6399 if (rodi < 0)
6400 goto out_not_found;
6401
6402 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6403
6404 if (len != VENDOR_ID_LEN)
6405 goto out_not_found;
6406
6407 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6408
6409 /* vendor specific info */
6410 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6411 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6412 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6413 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6414
6415 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6416 PCI_VPD_RO_KEYWORD_VENDOR0);
6417 if (rodi >= 0) {
6418 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6419
6420 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6421
6422 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6423 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6424 bp->fw_ver[len] = ' ';
6425 }
6426 }
6427 return;
6428 }
6429out_not_found:
6430 return;
6431}
6432
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006433static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6434{
6435 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00006436 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006437 int rc;
6438
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006439 /* Disable interrupt handling until HW is initialized */
6440 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00006441 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006442
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006443 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07006444 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07006445 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00006446#ifdef BCM_CNIC
6447 mutex_init(&bp->cnic_mutex);
6448#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006449
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08006450 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006451 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006452
6453 rc = bnx2x_get_hwinfo(bp);
6454
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00006455 bnx2x_read_fwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006456 /* need to reset chip if undi was active */
6457 if (!BP_NOMCP(bp))
6458 bnx2x_undi_unload(bp);
6459
6460 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006461 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006462
6463 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006464 dev_err(&bp->pdev->dev, "MCP disabled, "
6465 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006466
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006467 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00006468 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6469 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006470 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6471 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006472 multi_mode = ETH_RSS_MODE_DISABLED;
6473 }
6474 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00006475 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006476
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07006477 bp->dev->features |= NETIF_F_GRO;
6478
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006479 /* Set TPA flags */
6480 if (disable_tpa) {
6481 bp->flags &= ~TPA_ENABLE_FLAG;
6482 bp->dev->features &= ~NETIF_F_LRO;
6483 } else {
6484 bp->flags |= TPA_ENABLE_FLAG;
6485 bp->dev->features |= NETIF_F_LRO;
6486 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00006487 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006488
Eilon Greensteina18f5122009-08-12 08:23:26 +00006489 if (CHIP_IS_E1(bp))
6490 bp->dropless_fc = 0;
6491 else
6492 bp->dropless_fc = dropless_fc;
6493
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00006494 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006495
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006496 bp->tx_ring_size = MAX_TX_AVAIL;
6497 bp->rx_ring_size = MAX_RX_AVAIL;
6498
6499 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006500
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00006501 /* make sure that the numbers are in the right granularity */
6502 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6503 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006504
Eilon Greenstein87942b42009-02-12 08:36:49 +00006505 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6506 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006507
6508 init_timer(&bp->timer);
6509 bp->timer.expires = jiffies + bp->current_interval;
6510 bp->timer.data = (unsigned long) bp;
6511 bp->timer.function = bnx2x_timer;
6512
6513 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006514}
6515
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006516
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00006517/****************************************************************************
6518* General service functions
6519****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006520
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07006521/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006522static int bnx2x_open(struct net_device *dev)
6523{
6524 struct bnx2x *bp = netdev_priv(dev);
6525
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00006526 netif_carrier_off(dev);
6527
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006528 bnx2x_set_power_state(bp, PCI_D0);
6529
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006530 if (!bnx2x_reset_is_done(bp)) {
6531 do {
6532 /* Reset MCP mail box sequence if there is on going
6533 * recovery
6534 */
6535 bp->fw_seq = 0;
6536
6537 /* If it's the first function to load and reset done
6538 * is still not cleared it may mean that. We don't
6539 * check the attention state here because it may have
6540 * already been cleared by a "common" reset but we
6541 * shell proceed with "process kill" anyway.
6542 */
6543 if ((bnx2x_get_load_cnt(bp) == 0) &&
6544 bnx2x_trylock_hw_lock(bp,
6545 HW_LOCK_RESOURCE_RESERVED_08) &&
6546 (!bnx2x_leader_reset(bp))) {
6547 DP(NETIF_MSG_HW, "Recovered in open\n");
6548 break;
6549 }
6550
6551 bnx2x_set_power_state(bp, PCI_D3hot);
6552
6553 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6554 " completed yet. Try again later. If u still see this"
6555 " message after a few retries then power cycle is"
6556 " required.\n", bp->dev->name);
6557
6558 return -EAGAIN;
6559 } while (0);
6560 }
6561
6562 bp->recovery_state = BNX2X_RECOVERY_DONE;
6563
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07006564 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006565}
6566
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07006567/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006568static int bnx2x_close(struct net_device *dev)
6569{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006570 struct bnx2x *bp = netdev_priv(dev);
6571
6572 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07006573 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00006574 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006575
6576 return 0;
6577}
6578
Eilon Greensteinf5372252009-02-12 08:38:30 +00006579/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006580void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006581{
6582 struct bnx2x *bp = netdev_priv(dev);
6583 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6584 int port = BP_PORT(bp);
6585
6586 if (bp->state != BNX2X_STATE_OPEN) {
6587 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6588 return;
6589 }
6590
6591 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6592
6593 if (dev->flags & IFF_PROMISC)
6594 rx_mode = BNX2X_RX_MODE_PROMISC;
6595
6596 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00006597 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6598 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006599 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6600
6601 else { /* some multicasts */
6602 if (CHIP_IS_E1(bp)) {
6603 int i, old, offset;
Jiri Pirko22bedad32010-04-01 21:22:57 +00006604 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006605 struct mac_configuration_cmd *config =
6606 bnx2x_sp(bp, mcast_config);
6607
Jiri Pirko0ddf4772010-02-20 00:13:58 +00006608 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00006609 netdev_for_each_mc_addr(ha, dev) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006610 config->config_table[i].
6611 cam_entry.msb_mac_addr =
Jiri Pirko22bedad32010-04-01 21:22:57 +00006612 swab16(*(u16 *)&ha->addr[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006613 config->config_table[i].
6614 cam_entry.middle_mac_addr =
Jiri Pirko22bedad32010-04-01 21:22:57 +00006615 swab16(*(u16 *)&ha->addr[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006616 config->config_table[i].
6617 cam_entry.lsb_mac_addr =
Jiri Pirko22bedad32010-04-01 21:22:57 +00006618 swab16(*(u16 *)&ha->addr[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006619 config->config_table[i].cam_entry.flags =
6620 cpu_to_le16(port);
6621 config->config_table[i].
6622 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006623 config->config_table[i].target_table_entry.
6624 clients_bit_vector =
6625 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006626 config->config_table[i].
6627 target_table_entry.vlan_id = 0;
6628
6629 DP(NETIF_MSG_IFUP,
6630 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6631 config->config_table[i].
6632 cam_entry.msb_mac_addr,
6633 config->config_table[i].
6634 cam_entry.middle_mac_addr,
6635 config->config_table[i].
6636 cam_entry.lsb_mac_addr);
Jiri Pirko0ddf4772010-02-20 00:13:58 +00006637 i++;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006638 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006639 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006640 if (old > i) {
6641 for (; i < old; i++) {
6642 if (CAM_IS_INVALID(config->
6643 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +00006644 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006645 break;
6646 }
6647 /* invalidate */
6648 CAM_INVALIDATE(config->
6649 config_table[i]);
6650 }
6651 }
6652
6653 if (CHIP_REV_IS_SLOW(bp))
6654 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6655 else
6656 offset = BNX2X_MAX_MULTICAST*(1 + port);
6657
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006658 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006659 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006660 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006661 config->hdr.reserved1 = 0;
6662
Michael Chane665bfd2009-10-10 13:46:54 +00006663 bp->set_mac_pending++;
6664 smp_wmb();
6665
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006666 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6667 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6668 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6669 0);
6670 } else { /* E1H */
6671 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00006672 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006673 u32 mc_filter[MC_HASH_SIZE];
6674 u32 crc, bit, regidx;
6675 int i;
6676
6677 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6678
Jiri Pirko22bedad32010-04-01 21:22:57 +00006679 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07006680 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Jiri Pirko22bedad32010-04-01 21:22:57 +00006681 ha->addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006682
Jiri Pirko22bedad32010-04-01 21:22:57 +00006683 crc = crc32c_le(0, ha->addr, ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006684 bit = (crc >> 24) & 0xff;
6685 regidx = bit >> 5;
6686 bit &= 0x1f;
6687 mc_filter[regidx] |= (1 << bit);
6688 }
6689
6690 for (i = 0; i < MC_HASH_SIZE; i++)
6691 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6692 mc_filter[i]);
6693 }
6694 }
6695
6696 bp->rx_mode = rx_mode;
6697 bnx2x_set_storm_rx_mode(bp);
6698}
6699
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006700
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006701/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006702static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6703 int devad, u16 addr)
6704{
6705 struct bnx2x *bp = netdev_priv(netdev);
6706 u16 value;
6707 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006708
6709 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6710 prtad, devad, addr);
6711
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006712 /* The HW expects different devad if CL22 is used */
6713 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6714
6715 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00006716 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006717 bnx2x_release_phy_lock(bp);
6718 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6719
6720 if (!rc)
6721 rc = value;
6722 return rc;
6723}
6724
6725/* called with rtnl_lock */
6726static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6727 u16 addr, u16 value)
6728{
6729 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006730 int rc;
6731
6732 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6733 " value 0x%x\n", prtad, devad, addr, value);
6734
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006735 /* The HW expects different devad if CL22 is used */
6736 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6737
6738 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00006739 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006740 bnx2x_release_phy_lock(bp);
6741 return rc;
6742}
6743
6744/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006745static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6746{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006747 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006748 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006749
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006750 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6751 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006752
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006753 if (!netif_running(dev))
6754 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006755
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006756 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006757}
6758
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00006759#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006760static void poll_bnx2x(struct net_device *dev)
6761{
6762 struct bnx2x *bp = netdev_priv(dev);
6763
6764 disable_irq(bp->pdev->irq);
6765 bnx2x_interrupt(bp->pdev->irq, dev);
6766 enable_irq(bp->pdev->irq);
6767}
6768#endif
6769
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08006770static const struct net_device_ops bnx2x_netdev_ops = {
6771 .ndo_open = bnx2x_open,
6772 .ndo_stop = bnx2x_close,
6773 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00006774 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08006775 .ndo_set_mac_address = bnx2x_change_mac_addr,
6776 .ndo_validate_addr = eth_validate_addr,
6777 .ndo_do_ioctl = bnx2x_ioctl,
6778 .ndo_change_mtu = bnx2x_change_mtu,
6779 .ndo_tx_timeout = bnx2x_tx_timeout,
6780#ifdef BCM_VLAN
6781 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
6782#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00006783#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08006784 .ndo_poll_controller = poll_bnx2x,
6785#endif
6786};
6787
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006788static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6789 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006790{
6791 struct bnx2x *bp;
6792 int rc;
6793
6794 SET_NETDEV_DEV(dev, &pdev->dev);
6795 bp = netdev_priv(dev);
6796
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006797 bp->dev = dev;
6798 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006799 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006800 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006801
6802 rc = pci_enable_device(pdev);
6803 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006804 dev_err(&bp->pdev->dev,
6805 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006806 goto err_out;
6807 }
6808
6809 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006810 dev_err(&bp->pdev->dev,
6811 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006812 rc = -ENODEV;
6813 goto err_out_disable;
6814 }
6815
6816 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006817 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6818 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006819 rc = -ENODEV;
6820 goto err_out_disable;
6821 }
6822
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006823 if (atomic_read(&pdev->enable_cnt) == 1) {
6824 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6825 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006826 dev_err(&bp->pdev->dev,
6827 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006828 goto err_out_disable;
6829 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006830
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006831 pci_set_master(pdev);
6832 pci_save_state(pdev);
6833 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006834
6835 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6836 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006837 dev_err(&bp->pdev->dev,
6838 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006839 rc = -EIO;
6840 goto err_out_release;
6841 }
6842
6843 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6844 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006845 dev_err(&bp->pdev->dev,
6846 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006847 rc = -EIO;
6848 goto err_out_release;
6849 }
6850
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006851 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006852 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006853 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006854 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6855 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006856 rc = -EIO;
6857 goto err_out_release;
6858 }
6859
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006860 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006861 dev_err(&bp->pdev->dev,
6862 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006863 rc = -EIO;
6864 goto err_out_release;
6865 }
6866
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006867 dev->mem_start = pci_resource_start(pdev, 0);
6868 dev->base_addr = dev->mem_start;
6869 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006870
6871 dev->irq = pdev->irq;
6872
Arjan van de Ven275f1652008-10-20 21:42:39 -07006873 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006874 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006875 dev_err(&bp->pdev->dev,
6876 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006877 rc = -ENOMEM;
6878 goto err_out_release;
6879 }
6880
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006881 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
6882 min_t(u64, BNX2X_DB_SIZE,
6883 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006884 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006885 dev_err(&bp->pdev->dev,
6886 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006887 rc = -ENOMEM;
6888 goto err_out_unmap;
6889 }
6890
6891 bnx2x_set_power_state(bp, PCI_D0);
6892
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006893 /* clean indirect addresses */
6894 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
6895 PCICFG_VENDOR_ID_OFFSET);
6896 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
6897 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
6898 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
6899 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006900
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006901 /* Reset the load counter */
6902 bnx2x_clear_load_cnt(bp);
6903
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006904 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006905
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08006906 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00006907 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006908 dev->features |= NETIF_F_SG;
6909 dev->features |= NETIF_F_HW_CSUM;
6910 if (bp->flags & USING_DAC_FLAG)
6911 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00006912 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6913 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006914#ifdef BCM_VLAN
6915 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08006916 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00006917
6918 dev->vlan_features |= NETIF_F_SG;
6919 dev->vlan_features |= NETIF_F_HW_CSUM;
6920 if (bp->flags & USING_DAC_FLAG)
6921 dev->vlan_features |= NETIF_F_HIGHDMA;
6922 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6923 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006924#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006925
Eilon Greenstein01cd4522009-08-12 08:23:08 +00006926 /* get_port_hwinfo() will set prtad and mmds properly */
6927 bp->mdio.prtad = MDIO_PRTAD_NONE;
6928 bp->mdio.mmds = 0;
6929 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
6930 bp->mdio.dev = dev;
6931 bp->mdio.mdio_read = bnx2x_mdio_read;
6932 bp->mdio.mdio_write = bnx2x_mdio_write;
6933
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006934 return 0;
6935
6936err_out_unmap:
6937 if (bp->regview) {
6938 iounmap(bp->regview);
6939 bp->regview = NULL;
6940 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006941 if (bp->doorbells) {
6942 iounmap(bp->doorbells);
6943 bp->doorbells = NULL;
6944 }
6945
6946err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006947 if (atomic_read(&pdev->enable_cnt) == 1)
6948 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006949
6950err_out_disable:
6951 pci_disable_device(pdev);
6952 pci_set_drvdata(pdev, NULL);
6953
6954err_out:
6955 return rc;
6956}
6957
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00006958static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
6959 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08006960{
6961 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
6962
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00006963 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
6964
6965 /* return value of 1=2.5GHz 2=5GHz */
6966 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08006967}
6968
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00006969static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006970{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00006971 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006972 struct bnx2x_fw_file_hdr *fw_hdr;
6973 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006974 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00006975 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006976 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00006977 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006978
6979 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
6980 return -EINVAL;
6981
6982 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
6983 sections = (struct bnx2x_fw_file_section *)fw_hdr;
6984
6985 /* Make sure none of the offsets and sizes make us read beyond
6986 * the end of the firmware data */
6987 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
6988 offset = be32_to_cpu(sections[i].offset);
6989 len = be32_to_cpu(sections[i].len);
6990 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006991 dev_err(&bp->pdev->dev,
6992 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006993 return -EINVAL;
6994 }
6995 }
6996
6997 /* Likewise for the init_ops offsets */
6998 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
6999 ops_offsets = (u16 *)(firmware->data + offset);
7000 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7001
7002 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7003 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007004 dev_err(&bp->pdev->dev,
7005 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007006 return -EINVAL;
7007 }
7008 }
7009
7010 /* Check FW version */
7011 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7012 fw_ver = firmware->data + offset;
7013 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7014 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7015 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7016 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007017 dev_err(&bp->pdev->dev,
7018 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007019 fw_ver[0], fw_ver[1], fw_ver[2],
7020 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7021 BCM_5710_FW_MINOR_VERSION,
7022 BCM_5710_FW_REVISION_VERSION,
7023 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007024 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007025 }
7026
7027 return 0;
7028}
7029
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007030static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007031{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007032 const __be32 *source = (const __be32 *)_source;
7033 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007034 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007035
7036 for (i = 0; i < n/4; i++)
7037 target[i] = be32_to_cpu(source[i]);
7038}
7039
7040/*
7041 Ops array is stored in the following format:
7042 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7043 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007044static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007045{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007046 const __be32 *source = (const __be32 *)_source;
7047 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007048 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007049
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007050 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007051 tmp = be32_to_cpu(source[j]);
7052 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007053 target[i].offset = tmp & 0xffffff;
7054 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007055 }
7056}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007057
7058static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007059{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007060 const __be16 *source = (const __be16 *)_source;
7061 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007062 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007063
7064 for (i = 0; i < n/2; i++)
7065 target[i] = be16_to_cpu(source[i]);
7066}
7067
Joe Perches7995c642010-02-17 15:01:52 +00007068#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7069do { \
7070 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7071 bp->arr = kmalloc(len, GFP_KERNEL); \
7072 if (!bp->arr) { \
7073 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7074 goto lbl; \
7075 } \
7076 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7077 (u8 *)bp->arr, len); \
7078} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007079
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007080int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007081{
Ben Hutchings45229b42009-11-07 11:53:39 +00007082 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007083 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00007084 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007085
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007086 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00007087 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007088 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00007089 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007090 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007091 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007092 return -EINVAL;
7093 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007094
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007095 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007096
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007097 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007098 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007099 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007100 goto request_firmware_exit;
7101 }
7102
7103 rc = bnx2x_check_firmware(bp);
7104 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007105 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007106 goto request_firmware_exit;
7107 }
7108
7109 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7110
7111 /* Initialize the pointers to the init arrays */
7112 /* Blob */
7113 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7114
7115 /* Opcodes */
7116 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7117
7118 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007119 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7120 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007121
7122 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00007123 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7124 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7125 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7126 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7127 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7128 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7129 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7130 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7131 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7132 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7133 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7134 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7135 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7136 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7137 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7138 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007139
7140 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007141
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007142init_offsets_alloc_err:
7143 kfree(bp->init_ops);
7144init_ops_alloc_err:
7145 kfree(bp->init_data);
7146request_firmware_exit:
7147 release_firmware(bp->firmware);
7148
7149 return rc;
7150}
7151
7152
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007153static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7154 const struct pci_device_id *ent)
7155{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007156 struct net_device *dev = NULL;
7157 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007158 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -08007159 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007160
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007161 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007162 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007163 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007164 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007165 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007166 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007167
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007168 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00007169 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007170
Eilon Greensteindf4770de2009-08-12 08:23:28 +00007171 pci_set_drvdata(pdev, dev);
7172
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007173 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007174 if (rc < 0) {
7175 free_netdev(dev);
7176 return rc;
7177 }
7178
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007179 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00007180 if (rc)
7181 goto init_one_exit;
7182
7183 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007184 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00007185 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007186 goto init_one_exit;
7187 }
7188
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007189 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007190 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7191 " IRQ %d, ", board_info[ent->driver_data].name,
7192 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7193 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7194 dev->base_addr, bp->pdev->irq);
7195 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00007196
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007197 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007198
7199init_one_exit:
7200 if (bp->regview)
7201 iounmap(bp->regview);
7202
7203 if (bp->doorbells)
7204 iounmap(bp->doorbells);
7205
7206 free_netdev(dev);
7207
7208 if (atomic_read(&pdev->enable_cnt) == 1)
7209 pci_release_regions(pdev);
7210
7211 pci_disable_device(pdev);
7212 pci_set_drvdata(pdev, NULL);
7213
7214 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007215}
7216
7217static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7218{
7219 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08007220 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007221
Eliezer Tamir228241e2008-02-28 11:56:57 -08007222 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007223 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08007224 return;
7225 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007226 bp = netdev_priv(dev);
7227
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007228 unregister_netdev(dev);
7229
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007230 /* Make sure RESET task is not scheduled before continuing */
7231 cancel_delayed_work_sync(&bp->reset_task);
7232
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007233 if (bp->regview)
7234 iounmap(bp->regview);
7235
7236 if (bp->doorbells)
7237 iounmap(bp->doorbells);
7238
7239 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007240
7241 if (atomic_read(&pdev->enable_cnt) == 1)
7242 pci_release_regions(pdev);
7243
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007244 pci_disable_device(pdev);
7245 pci_set_drvdata(pdev, NULL);
7246}
7247
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007248static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7249{
7250 int i;
7251
7252 bp->state = BNX2X_STATE_ERROR;
7253
7254 bp->rx_mode = BNX2X_RX_MODE_NONE;
7255
7256 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07007257 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007258
7259 del_timer_sync(&bp->timer);
7260 bp->stats_state = STATS_STATE_DISABLED;
7261 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7262
7263 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007264 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007265
7266 if (CHIP_IS_E1(bp)) {
7267 struct mac_configuration_cmd *config =
7268 bnx2x_sp(bp, mcast_config);
7269
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007270 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007271 CAM_INVALIDATE(config->config_table[i]);
7272 }
7273
7274 /* Free SKBs, SGEs, TPA pool and driver internals */
7275 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007276 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007277 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007278 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007279 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007280 bnx2x_free_mem(bp);
7281
7282 bp->state = BNX2X_STATE_CLOSED;
7283
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007284 return 0;
7285}
7286
7287static void bnx2x_eeh_recover(struct bnx2x *bp)
7288{
7289 u32 val;
7290
7291 mutex_init(&bp->port.phy_mutex);
7292
7293 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7294 bp->link_params.shmem_base = bp->common.shmem_base;
7295 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7296
7297 if (!bp->common.shmem_base ||
7298 (bp->common.shmem_base < 0xA0000) ||
7299 (bp->common.shmem_base >= 0xC0000)) {
7300 BNX2X_DEV_INFO("MCP not active\n");
7301 bp->flags |= NO_MCP_FLAG;
7302 return;
7303 }
7304
7305 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7306 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7307 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7308 BNX2X_ERR("BAD MCP validity signature\n");
7309
7310 if (!BP_NOMCP(bp)) {
7311 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7312 & DRV_MSG_SEQ_NUMBER_MASK);
7313 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7314 }
7315}
7316
Wendy Xiong493adb12008-06-23 20:36:22 -07007317/**
7318 * bnx2x_io_error_detected - called when PCI error is detected
7319 * @pdev: Pointer to PCI device
7320 * @state: The current pci connection state
7321 *
7322 * This function is called after a PCI bus error affecting
7323 * this device has been detected.
7324 */
7325static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7326 pci_channel_state_t state)
7327{
7328 struct net_device *dev = pci_get_drvdata(pdev);
7329 struct bnx2x *bp = netdev_priv(dev);
7330
7331 rtnl_lock();
7332
7333 netif_device_detach(dev);
7334
Dean Nelson07ce50e2009-07-31 09:13:25 +00007335 if (state == pci_channel_io_perm_failure) {
7336 rtnl_unlock();
7337 return PCI_ERS_RESULT_DISCONNECT;
7338 }
7339
Wendy Xiong493adb12008-06-23 20:36:22 -07007340 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007341 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07007342
7343 pci_disable_device(pdev);
7344
7345 rtnl_unlock();
7346
7347 /* Request a slot reset */
7348 return PCI_ERS_RESULT_NEED_RESET;
7349}
7350
7351/**
7352 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7353 * @pdev: Pointer to PCI device
7354 *
7355 * Restart the card from scratch, as if from a cold-boot.
7356 */
7357static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7358{
7359 struct net_device *dev = pci_get_drvdata(pdev);
7360 struct bnx2x *bp = netdev_priv(dev);
7361
7362 rtnl_lock();
7363
7364 if (pci_enable_device(pdev)) {
7365 dev_err(&pdev->dev,
7366 "Cannot re-enable PCI device after reset\n");
7367 rtnl_unlock();
7368 return PCI_ERS_RESULT_DISCONNECT;
7369 }
7370
7371 pci_set_master(pdev);
7372 pci_restore_state(pdev);
7373
7374 if (netif_running(dev))
7375 bnx2x_set_power_state(bp, PCI_D0);
7376
7377 rtnl_unlock();
7378
7379 return PCI_ERS_RESULT_RECOVERED;
7380}
7381
7382/**
7383 * bnx2x_io_resume - called when traffic can start flowing again
7384 * @pdev: Pointer to PCI device
7385 *
7386 * This callback is called when the error recovery driver tells us that
7387 * its OK to resume normal operation.
7388 */
7389static void bnx2x_io_resume(struct pci_dev *pdev)
7390{
7391 struct net_device *dev = pci_get_drvdata(pdev);
7392 struct bnx2x *bp = netdev_priv(dev);
7393
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007394 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7395 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7396 return;
7397 }
7398
Wendy Xiong493adb12008-06-23 20:36:22 -07007399 rtnl_lock();
7400
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007401 bnx2x_eeh_recover(bp);
7402
Wendy Xiong493adb12008-06-23 20:36:22 -07007403 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007404 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07007405
7406 netif_device_attach(dev);
7407
7408 rtnl_unlock();
7409}
7410
7411static struct pci_error_handlers bnx2x_err_handler = {
7412 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00007413 .slot_reset = bnx2x_io_slot_reset,
7414 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07007415};
7416
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007417static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07007418 .name = DRV_MODULE_NAME,
7419 .id_table = bnx2x_pci_tbl,
7420 .probe = bnx2x_init_one,
7421 .remove = __devexit_p(bnx2x_remove_one),
7422 .suspend = bnx2x_suspend,
7423 .resume = bnx2x_resume,
7424 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007425};
7426
7427static int __init bnx2x_init(void)
7428{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00007429 int ret;
7430
Joe Perches7995c642010-02-17 15:01:52 +00007431 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00007432
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08007433 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7434 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00007435 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08007436 return -ENOMEM;
7437 }
7438
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00007439 ret = pci_register_driver(&bnx2x_pci_driver);
7440 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00007441 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00007442 destroy_workqueue(bnx2x_wq);
7443 }
7444 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007445}
7446
7447static void __exit bnx2x_cleanup(void)
7448{
7449 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08007450
7451 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007452}
7453
7454module_init(bnx2x_init);
7455module_exit(bnx2x_cleanup);
7456
Michael Chan993ac7b2009-10-10 13:46:56 +00007457#ifdef BCM_CNIC
7458
7459/* count denotes the number of new completions we have seen */
7460static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7461{
7462 struct eth_spe *spe;
7463
7464#ifdef BNX2X_STOP_ON_ERROR
7465 if (unlikely(bp->panic))
7466 return;
7467#endif
7468
7469 spin_lock_bh(&bp->spq_lock);
7470 bp->cnic_spq_pending -= count;
7471
7472 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7473 bp->cnic_spq_pending++) {
7474
7475 if (!bp->cnic_kwq_pending)
7476 break;
7477
7478 spe = bnx2x_sp_get_next(bp);
7479 *spe = *bp->cnic_kwq_cons;
7480
7481 bp->cnic_kwq_pending--;
7482
7483 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7484 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7485
7486 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7487 bp->cnic_kwq_cons = bp->cnic_kwq;
7488 else
7489 bp->cnic_kwq_cons++;
7490 }
7491 bnx2x_sp_prod_update(bp);
7492 spin_unlock_bh(&bp->spq_lock);
7493}
7494
7495static int bnx2x_cnic_sp_queue(struct net_device *dev,
7496 struct kwqe_16 *kwqes[], u32 count)
7497{
7498 struct bnx2x *bp = netdev_priv(dev);
7499 int i;
7500
7501#ifdef BNX2X_STOP_ON_ERROR
7502 if (unlikely(bp->panic))
7503 return -EIO;
7504#endif
7505
7506 spin_lock_bh(&bp->spq_lock);
7507
7508 for (i = 0; i < count; i++) {
7509 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7510
7511 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7512 break;
7513
7514 *bp->cnic_kwq_prod = *spe;
7515
7516 bp->cnic_kwq_pending++;
7517
7518 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7519 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7520 spe->data.mac_config_addr.hi,
7521 spe->data.mac_config_addr.lo,
7522 bp->cnic_kwq_pending);
7523
7524 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7525 bp->cnic_kwq_prod = bp->cnic_kwq;
7526 else
7527 bp->cnic_kwq_prod++;
7528 }
7529
7530 spin_unlock_bh(&bp->spq_lock);
7531
7532 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7533 bnx2x_cnic_sp_post(bp, 0);
7534
7535 return i;
7536}
7537
7538static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7539{
7540 struct cnic_ops *c_ops;
7541 int rc = 0;
7542
7543 mutex_lock(&bp->cnic_mutex);
7544 c_ops = bp->cnic_ops;
7545 if (c_ops)
7546 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7547 mutex_unlock(&bp->cnic_mutex);
7548
7549 return rc;
7550}
7551
7552static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7553{
7554 struct cnic_ops *c_ops;
7555 int rc = 0;
7556
7557 rcu_read_lock();
7558 c_ops = rcu_dereference(bp->cnic_ops);
7559 if (c_ops)
7560 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7561 rcu_read_unlock();
7562
7563 return rc;
7564}
7565
7566/*
7567 * for commands that have no data
7568 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007569int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00007570{
7571 struct cnic_ctl_info ctl = {0};
7572
7573 ctl.cmd = cmd;
7574
7575 return bnx2x_cnic_ctl_send(bp, &ctl);
7576}
7577
7578static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7579{
7580 struct cnic_ctl_info ctl;
7581
7582 /* first we tell CNIC and only then we count this as a completion */
7583 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7584 ctl.data.comp.cid = cid;
7585
7586 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7587 bnx2x_cnic_sp_post(bp, 1);
7588}
7589
7590static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7591{
7592 struct bnx2x *bp = netdev_priv(dev);
7593 int rc = 0;
7594
7595 switch (ctl->cmd) {
7596 case DRV_CTL_CTXTBL_WR_CMD: {
7597 u32 index = ctl->data.io.offset;
7598 dma_addr_t addr = ctl->data.io.dma_addr;
7599
7600 bnx2x_ilt_wr(bp, index, addr);
7601 break;
7602 }
7603
7604 case DRV_CTL_COMPLETION_CMD: {
7605 int count = ctl->data.comp.comp_count;
7606
7607 bnx2x_cnic_sp_post(bp, count);
7608 break;
7609 }
7610
7611 /* rtnl_lock is held. */
7612 case DRV_CTL_START_L2_CMD: {
7613 u32 cli = ctl->data.ring.client_id;
7614
7615 bp->rx_mode_cl_mask |= (1 << cli);
7616 bnx2x_set_storm_rx_mode(bp);
7617 break;
7618 }
7619
7620 /* rtnl_lock is held. */
7621 case DRV_CTL_STOP_L2_CMD: {
7622 u32 cli = ctl->data.ring.client_id;
7623
7624 bp->rx_mode_cl_mask &= ~(1 << cli);
7625 bnx2x_set_storm_rx_mode(bp);
7626 break;
7627 }
7628
7629 default:
7630 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7631 rc = -EINVAL;
7632 }
7633
7634 return rc;
7635}
7636
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007637void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00007638{
7639 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7640
7641 if (bp->flags & USING_MSIX_FLAG) {
7642 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7643 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7644 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7645 } else {
7646 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7647 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7648 }
7649 cp->irq_arr[0].status_blk = bp->cnic_sb;
7650 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7651 cp->irq_arr[1].status_blk = bp->def_status_blk;
7652 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7653
7654 cp->num_irq = 2;
7655}
7656
7657static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7658 void *data)
7659{
7660 struct bnx2x *bp = netdev_priv(dev);
7661 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7662
7663 if (ops == NULL)
7664 return -EINVAL;
7665
7666 if (atomic_read(&bp->intr_sem) != 0)
7667 return -EBUSY;
7668
7669 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7670 if (!bp->cnic_kwq)
7671 return -ENOMEM;
7672
7673 bp->cnic_kwq_cons = bp->cnic_kwq;
7674 bp->cnic_kwq_prod = bp->cnic_kwq;
7675 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7676
7677 bp->cnic_spq_pending = 0;
7678 bp->cnic_kwq_pending = 0;
7679
7680 bp->cnic_data = data;
7681
7682 cp->num_irq = 0;
7683 cp->drv_state = CNIC_DRV_STATE_REGD;
7684
7685 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7686
7687 bnx2x_setup_cnic_irq_info(bp);
7688 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7689 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7690 rcu_assign_pointer(bp->cnic_ops, ops);
7691
7692 return 0;
7693}
7694
7695static int bnx2x_unregister_cnic(struct net_device *dev)
7696{
7697 struct bnx2x *bp = netdev_priv(dev);
7698 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7699
7700 mutex_lock(&bp->cnic_mutex);
7701 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7702 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7703 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7704 }
7705 cp->drv_state = 0;
7706 rcu_assign_pointer(bp->cnic_ops, NULL);
7707 mutex_unlock(&bp->cnic_mutex);
7708 synchronize_rcu();
7709 kfree(bp->cnic_kwq);
7710 bp->cnic_kwq = NULL;
7711
7712 return 0;
7713}
7714
7715struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7716{
7717 struct bnx2x *bp = netdev_priv(dev);
7718 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7719
7720 cp->drv_owner = THIS_MODULE;
7721 cp->chip_id = CHIP_ID(bp);
7722 cp->pdev = bp->pdev;
7723 cp->io_base = bp->regview;
7724 cp->io_base2 = bp->doorbells;
7725 cp->max_kwqe_pending = 8;
7726 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7727 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7728 cp->ctx_tbl_len = CNIC_ILT_LINES;
7729 cp->starting_cid = BCM_CNIC_CID_START;
7730 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7731 cp->drv_ctl = bnx2x_drv_ctl;
7732 cp->drv_register_cnic = bnx2x_register_cnic;
7733 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7734
7735 return cp;
7736}
7737EXPORT_SYMBOL(bnx2x_cnic_probe);
7738
7739#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007740