blob: 8af142ccf373feb060b82fae91e7a5fe9adae10b [file] [log] [blame]
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001/* bnx2x.c: Broadcom Everest network driver.
2 *
Eliezer Tamirf1410642008-02-28 11:51:50 -08003 * Copyright (c) 2007-2008 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080013 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020014 *
15 */
16
17/* define this to make the driver freeze on error
18 * to allow getting debug info
Eliezer Tamirc14423f2008-02-28 11:49:42 -080019 * (you will need to reboot afterwards)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020020 */
21/*#define BNX2X_STOP_ON_ERROR*/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/kernel.h>
26#include <linux/device.h> /* for dev_info() */
27#include <linux/timer.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/interrupt.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/dma-mapping.h>
39#include <linux/bitops.h>
40#include <linux/irq.h>
41#include <linux/delay.h>
42#include <asm/byteorder.h>
43#include <linux/time.h>
44#include <linux/ethtool.h>
45#include <linux/mii.h>
46#ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
48 #define BCM_VLAN 1
49#endif
50#include <net/ip.h>
51#include <net/tcp.h>
52#include <net/checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/prefetch.h>
56#include <linux/zlib.h>
57#include <linux/version.h>
58#include <linux/io.h>
59
60#include "bnx2x_reg.h"
61#include "bnx2x_fw_defs.h"
62#include "bnx2x_hsi.h"
63#include "bnx2x.h"
64#include "bnx2x_init.h"
65
Eliezer Tamir8d84a442008-02-28 11:58:49 -080066#define DRV_MODULE_VERSION "1.40.22"
67#define DRV_MODULE_RELDATE "2007/11/27"
Eliezer Tamirf1410642008-02-28 11:51:50 -080068#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020069
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ)
72
Andrew Morton53a10562008-02-09 23:16:41 -080073static char version[] __devinitdata =
Eliezer Tamirc14423f2008-02-28 11:49:42 -080074 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020075 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081
82static int use_inta;
83static int poll;
84static int onefunc;
85static int nomcp;
86static int debug;
87static int use_multi;
88
89module_param(use_inta, int, 0);
90module_param(poll, int, 0);
91module_param(onefunc, int, 0);
92module_param(debug, int, 0);
93MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
94MODULE_PARM_DESC(poll, "use polling (for debug)");
95MODULE_PARM_DESC(onefunc, "enable only first function");
Eliezer Tamirc14423f2008-02-28 11:49:42 -080096MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
97MODULE_PARM_DESC(debug, "default debug msglevel");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020098
99#ifdef BNX2X_MULTI
100module_param(use_multi, int, 0);
101MODULE_PARM_DESC(use_multi, "use per-CPU queues");
102#endif
103
104enum bnx2x_board_type {
105 BCM57710 = 0,
106};
107
108/* indexed by board_t, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800109static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200110 char *name;
111} board_info[] __devinitdata = {
112 { "Broadcom NetXtreme II BCM57710 XGb" }
113};
114
115static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118 { 0 }
119};
120
121MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
122
123/****************************************************************************
124* General service functions
125****************************************************************************/
126
127/* used only at init
128 * locking is done by mcp
129 */
130static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
131{
132 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
135 PCICFG_VENDOR_ID_OFFSET);
136}
137
138#ifdef BNX2X_IND_RD
139static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
140{
141 u32 val;
142
143 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
144 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
146 PCICFG_VENDOR_ID_OFFSET);
147
148 return val;
149}
150#endif
151
152static const u32 dmae_reg_go_c[] = {
153 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
154 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
155 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
156 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
157};
158
159/* copy command into DMAE command memory and set DMAE command go */
160static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
161 int idx)
162{
163 u32 cmd_offset;
164 int i;
165
166 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
167 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
168 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
169
170/* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
171 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
172 }
173 REG_WR(bp, dmae_reg_go_c[idx], 1);
174}
175
176static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
177 u32 dst_addr, u32 len32)
178{
179 struct dmae_command *dmae = &bp->dmae;
180 int port = bp->port;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182 int timeout = 200;
183
184 memset(dmae, 0, sizeof(struct dmae_command));
185
186 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
187 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
188 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
189#ifdef __BIG_ENDIAN
190 DMAE_CMD_ENDIANITY_B_DW_SWAP |
191#else
192 DMAE_CMD_ENDIANITY_DW_SWAP |
193#endif
194 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
195 dmae->src_addr_lo = U64_LO(dma_addr);
196 dmae->src_addr_hi = U64_HI(dma_addr);
197 dmae->dst_addr_lo = dst_addr >> 2;
198 dmae->dst_addr_hi = 0;
199 dmae->len = len32;
200 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
201 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_val = BNX2X_WB_COMP_VAL;
203
204/*
205 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
206 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
207 "dst_addr [%x:%08x (%08x)]\n"
208 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
209 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
210 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
211 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
212*/
213/*
214 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
215 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
216 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
217*/
218
219 *wb_comp = 0;
220
221 bnx2x_post_dmae(bp, dmae, port * 8);
222
223 udelay(5);
224 /* adjust timeout for emulation/FPGA */
225 if (CHIP_REV_IS_SLOW(bp))
226 timeout *= 100;
227 while (*wb_comp != BNX2X_WB_COMP_VAL) {
228/* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
229 udelay(5);
230 if (!timeout) {
231 BNX2X_ERR("dmae timeout!\n");
232 break;
233 }
234 timeout--;
235 }
236}
237
238#ifdef BNX2X_DMAE_RD
239static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
240{
241 struct dmae_command *dmae = &bp->dmae;
242 int port = bp->port;
243 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
244 int timeout = 200;
245
246 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
247 memset(dmae, 0, sizeof(struct dmae_command));
248
249 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
250 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
251 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
252#ifdef __BIG_ENDIAN
253 DMAE_CMD_ENDIANITY_B_DW_SWAP |
254#else
255 DMAE_CMD_ENDIANITY_DW_SWAP |
256#endif
257 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
258 dmae->src_addr_lo = src_addr >> 2;
259 dmae->src_addr_hi = 0;
260 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
261 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
262 dmae->len = len32;
263 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
264 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_val = BNX2X_WB_COMP_VAL;
266
267/*
268 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
269 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
270 "dst_addr [%x:%08x (%08x)]\n"
271 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
272 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
273 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
274 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
275*/
276
277 *wb_comp = 0;
278
279 bnx2x_post_dmae(bp, dmae, port * 8);
280
281 udelay(5);
282 while (*wb_comp != BNX2X_WB_COMP_VAL) {
283 udelay(5);
284 if (!timeout) {
285 BNX2X_ERR("dmae timeout!\n");
286 break;
287 }
288 timeout--;
289 }
290/*
291 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
292 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
293 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
294*/
295}
296#endif
297
298static int bnx2x_mc_assert(struct bnx2x *bp)
299{
Eliezer Tamir49d66772008-02-28 11:53:13 -0800300 int i, j, rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301 char last_idx;
302 const char storm[] = {"XTCU"};
303 const u32 intmem_base[] = {
304 BAR_XSTRORM_INTMEM,
305 BAR_TSTRORM_INTMEM,
306 BAR_CSTRORM_INTMEM,
307 BAR_USTRORM_INTMEM
308 };
309
310 /* Go through all instances of all SEMIs */
311 for (i = 0; i < 4; i++) {
312 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
313 intmem_base[i]);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800314 if (last_idx)
315 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
316 storm[i], last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200317
318 /* print the asserts */
319 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
320 u32 row0, row1, row2, row3;
321
322 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
323 intmem_base[i]);
324 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
325 intmem_base[i]);
326 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
327 intmem_base[i]);
328 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
329 intmem_base[i]);
330
331 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
Eliezer Tamir49d66772008-02-28 11:53:13 -0800332 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200333 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
334 storm[i], j, row3, row2, row1, row0);
335 rc++;
336 } else {
337 break;
338 }
339 }
340 }
341 return rc;
342}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800343
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344static void bnx2x_fw_dump(struct bnx2x *bp)
345{
346 u32 mark, offset;
347 u32 data[9];
348 int word;
349
350 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800351 mark = ((mark + 0x3) & ~0x3);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200353
354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355 for (word = 0; word < 8; word++)
356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
357 offset + 4*word));
358 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800359 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200360 }
361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362 for (word = 0; word < 8; word++)
363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
364 offset + 4*word));
365 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800366 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200367 }
368 printk("\n" KERN_ERR PFX "end of fw dump\n");
369}
370
371static void bnx2x_panic_dump(struct bnx2x *bp)
372{
373 int i;
374 u16 j, start, end;
375
376 BNX2X_ERR("begin crash dump -----------------\n");
377
378 for_each_queue(bp, i) {
379 struct bnx2x_fastpath *fp = &bp->fp[i];
380 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
381
382 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
383 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
384 " *rx_cons_sb(%x) rx_comp_prod(%x)"
385 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
386 " bd data(%x,%x)\n",
387 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390 fp->fp_u_idx, hw_prods->packets_prod,
391 hw_prods->bds_prod);
392
393 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395 for (j = start; j < end; j++) {
396 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
397
398 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399 sw_bd->skb, sw_bd->first_bd);
400 }
401
402 start = TX_BD(fp->tx_bd_cons - 10);
403 end = TX_BD(fp->tx_bd_cons + 254);
404 for (j = start; j < end; j++) {
405 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
406
407 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
409 }
410
411 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413 for (j = start; j < end; j++) {
414 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
416
417 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
418 j, rx_bd[0], rx_bd[1], sw_bd->skb);
419 }
420
421 start = RCQ_BD(fp->rx_comp_cons - 10);
422 end = RCQ_BD(fp->rx_comp_cons + 503);
423 for (j = start; j < end; j++) {
424 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
425
426 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427 j, cqe[0], cqe[1], cqe[2], cqe[3]);
428 }
429 }
430
Eliezer Tamir49d66772008-02-28 11:53:13 -0800431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
432 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200433 " spq_prod_idx(%u)\n",
Eliezer Tamir49d66772008-02-28 11:53:13 -0800434 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
436
437
438 bnx2x_mc_assert(bp);
439 BNX2X_ERR("end crash dump -----------------\n");
440
441 bp->stats_state = STATS_STATE_DISABLE;
442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
443}
444
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800445static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200446{
447 int port = bp->port;
448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449 u32 val = REG_RD(bp, addr);
450 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
451
452 if (msix) {
453 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
456 } else {
457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800458 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200459 HC_CONFIG_0_REG_INT_LINE_EN_0 |
460 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800461
462 /* Errata A0.158 workaround */
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
464 val, port, addr, msix);
465
466 REG_WR(bp, addr, val);
467
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200468 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
469 }
470
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800471 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200472 val, port, addr, msix);
473
474 REG_WR(bp, addr, val);
475}
476
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800477static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200478{
479 int port = bp->port;
480 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
481 u32 val = REG_RD(bp, addr);
482
483 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
484 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
485 HC_CONFIG_0_REG_INT_LINE_EN_0 |
486 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
487
488 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
489 val, port, addr);
490
491 REG_WR(bp, addr, val);
492 if (REG_RD(bp, addr) != val)
493 BNX2X_ERR("BUG! proper val not read from IGU!\n");
494}
495
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800496static void bnx2x_int_disable_sync(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200497{
498
499 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
500 int i;
501
502 atomic_inc(&bp->intr_sem);
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800503 /* prevent the HW from sending interrupts */
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800504 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505
506 /* make sure all ISRs are done */
507 if (msix) {
508 for_each_queue(bp, i)
509 synchronize_irq(bp->msix_table[i].vector);
510
511 /* one more for the Slow Path IRQ */
512 synchronize_irq(bp->msix_table[i].vector);
513 } else
514 synchronize_irq(bp->pdev->irq);
515
516 /* make sure sp_task is not running */
517 cancel_work_sync(&bp->sp_task);
518
519}
520
521/* fast path code */
522
523/*
524 * general service functions
525 */
526
527static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
528 u8 storm, u16 index, u8 op, u8 update)
529{
530 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
531 struct igu_ack_register igu_ack;
532
533 igu_ack.status_block_index = index;
534 igu_ack.sb_id_and_flags =
535 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
536 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
537 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
538 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
539
540/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
541 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
542 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
543}
544
545static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
546{
547 struct host_status_block *fpsb = fp->status_blk;
548 u16 rc = 0;
549
550 barrier(); /* status block is written to by the chip */
551 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
552 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
553 rc |= 1;
554 }
555 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
556 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
557 rc |= 2;
558 }
559 return rc;
560}
561
562static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
563{
564 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
565
566 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
567 rx_cons_sb++;
568
569 if ((rx_cons_sb != fp->rx_comp_cons) ||
570 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
571 return 1;
572
573 return 0;
574}
575
576static u16 bnx2x_ack_int(struct bnx2x *bp)
577{
578 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
579 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
580
581/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
582 result, BAR_IGU_INTMEM + igu_addr); */
583
584#ifdef IGU_DEBUG
585#warning IGU_DEBUG active
586 if (result == 0) {
587 BNX2X_ERR("read %x from IGU\n", result);
588 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
589 }
590#endif
591 return result;
592}
593
594
595/*
596 * fast path service functions
597 */
598
599/* free skb in the packet ring at pos idx
600 * return idx of last bd freed
601 */
602static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
603 u16 idx)
604{
605 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
606 struct eth_tx_bd *tx_bd;
607 struct sk_buff *skb = tx_buf->skb;
608 u16 bd_idx = tx_buf->first_bd;
609 int nbd;
610
611 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
612 idx, tx_buf, skb);
613
614 /* unmap first bd */
615 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
616 tx_bd = &fp->tx_desc_ring[bd_idx];
617 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
618 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
619
620 nbd = le16_to_cpu(tx_bd->nbd) - 1;
621#ifdef BNX2X_STOP_ON_ERROR
622 if (nbd > (MAX_SKB_FRAGS + 2)) {
623 BNX2X_ERR("bad nbd!\n");
624 bnx2x_panic();
625 }
626#endif
627
628 /* Skip a parse bd and the TSO split header bd
629 since they have no mapping */
630 if (nbd)
631 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
632
633 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
634 ETH_TX_BD_FLAGS_TCP_CSUM |
635 ETH_TX_BD_FLAGS_SW_LSO)) {
636 if (--nbd)
637 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
638 tx_bd = &fp->tx_desc_ring[bd_idx];
639 /* is this a TSO split header bd? */
640 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
641 if (--nbd)
642 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
643 }
644 }
645
646 /* now free frags */
647 while (nbd > 0) {
648
649 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
650 tx_bd = &fp->tx_desc_ring[bd_idx];
651 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
652 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
653 if (--nbd)
654 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
655 }
656
657 /* release skb */
658 BUG_TRAP(skb);
659 dev_kfree_skb(skb);
660 tx_buf->first_bd = 0;
661 tx_buf->skb = NULL;
662
663 return bd_idx;
664}
665
666static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
667{
668 u16 used;
669 u32 prod;
670 u32 cons;
671
672 /* Tell compiler that prod and cons can change */
673 barrier();
674 prod = fp->tx_bd_prod;
675 cons = fp->tx_bd_cons;
676
677 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
678 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
679
680 if (prod >= cons) {
681 /* used = prod - cons - prod/size + cons/size */
682 used -= NUM_TX_BD - NUM_TX_RINGS;
683 }
684
685 BUG_TRAP(used <= fp->bp->tx_ring_size);
686 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
687
688 return (fp->bp->tx_ring_size - used);
689}
690
691static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
692{
693 struct bnx2x *bp = fp->bp;
694 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
695 int done = 0;
696
697#ifdef BNX2X_STOP_ON_ERROR
698 if (unlikely(bp->panic))
699 return;
700#endif
701
702 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
703 sw_cons = fp->tx_pkt_cons;
704
705 while (sw_cons != hw_cons) {
706 u16 pkt_cons;
707
708 pkt_cons = TX_BD(sw_cons);
709
710 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
711
712 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
713 hw_cons, sw_cons, pkt_cons);
714
715/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
716 rmb();
717 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
718 }
719*/
720 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
721 sw_cons++;
722 done++;
723
724 if (done == work)
725 break;
726 }
727
728 fp->tx_pkt_cons = sw_cons;
729 fp->tx_bd_cons = bd_cons;
730
731 /* Need to make the tx_cons update visible to start_xmit()
732 * before checking for netif_queue_stopped(). Without the
733 * memory barrier, there is a small possibility that start_xmit()
734 * will miss it and cause the queue to be stopped forever.
735 */
736 smp_mb();
737
738 /* TBD need a thresh? */
739 if (unlikely(netif_queue_stopped(bp->dev))) {
740
741 netif_tx_lock(bp->dev);
742
743 if (netif_queue_stopped(bp->dev) &&
744 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
745 netif_wake_queue(bp->dev);
746
747 netif_tx_unlock(bp->dev);
748
749 }
750}
751
752static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
753 union eth_rx_cqe *rr_cqe)
754{
755 struct bnx2x *bp = fp->bp;
756 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
757 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
758
759 DP(NETIF_MSG_RX_STATUS,
760 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
761 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
762
763 bp->spq_left++;
764
765 if (fp->index) {
766 switch (command | fp->state) {
767 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
768 BNX2X_FP_STATE_OPENING):
769 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
770 cid);
771 fp->state = BNX2X_FP_STATE_OPEN;
772 break;
773
774 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
775 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
776 cid);
777 fp->state = BNX2X_FP_STATE_HALTED;
778 break;
779
780 default:
781 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
782 command, fp->state);
783 }
784 mb(); /* force bnx2x_wait_ramrod to see the change */
785 return;
786 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200788 switch (command | bp->state) {
789 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
790 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
791 bp->state = BNX2X_STATE_OPEN;
792 break;
793
794 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
795 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
796 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
797 fp->state = BNX2X_FP_STATE_HALTED;
798 break;
799
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200800 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eliezer Tamir49d66772008-02-28 11:53:13 -0800801 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
802 cid);
803 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804 break;
805
806 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
807 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
808 break;
809
Eliezer Tamir49d66772008-02-28 11:53:13 -0800810 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
811 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
812 break;
813
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200814 default:
815 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
816 command, bp->state);
817 }
818
819 mb(); /* force bnx2x_wait_ramrod to see the change */
820}
821
822static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
823 struct bnx2x_fastpath *fp, u16 index)
824{
825 struct sk_buff *skb;
826 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
827 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
828 dma_addr_t mapping;
829
830 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
831 if (unlikely(skb == NULL))
832 return -ENOMEM;
833
834 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
835 PCI_DMA_FROMDEVICE);
836 if (unlikely(dma_mapping_error(mapping))) {
837
838 dev_kfree_skb(skb);
839 return -ENOMEM;
840 }
841
842 rx_buf->skb = skb;
843 pci_unmap_addr_set(rx_buf, mapping, mapping);
844
845 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
846 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
847
848 return 0;
849}
850
851/* note that we are not allocating a new skb,
852 * we are just moving one from cons to prod
853 * we are not creating a new mapping,
854 * so there is no need to check for dma_mapping_error().
855 */
856static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
857 struct sk_buff *skb, u16 cons, u16 prod)
858{
859 struct bnx2x *bp = fp->bp;
860 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
861 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
862 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
863 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
864
865 pci_dma_sync_single_for_device(bp->pdev,
866 pci_unmap_addr(cons_rx_buf, mapping),
867 bp->rx_offset + RX_COPY_THRESH,
868 PCI_DMA_FROMDEVICE);
869
870 prod_rx_buf->skb = cons_rx_buf->skb;
871 pci_unmap_addr_set(prod_rx_buf, mapping,
872 pci_unmap_addr(cons_rx_buf, mapping));
873 *prod_bd = *cons_bd;
874}
875
876static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
877{
878 struct bnx2x *bp = fp->bp;
879 u16 bd_cons, bd_prod, comp_ring_cons;
880 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
881 int rx_pkt = 0;
882
883#ifdef BNX2X_STOP_ON_ERROR
884 if (unlikely(bp->panic))
885 return 0;
886#endif
887
888 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
889 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
890 hw_comp_cons++;
891
892 bd_cons = fp->rx_bd_cons;
893 bd_prod = fp->rx_bd_prod;
894 sw_comp_cons = fp->rx_comp_cons;
895 sw_comp_prod = fp->rx_comp_prod;
896
897 /* Memory barrier necessary as speculative reads of the rx
898 * buffer can be ahead of the index in the status block
899 */
900 rmb();
901
902 DP(NETIF_MSG_RX_STATUS,
903 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
904 fp->index, hw_comp_cons, sw_comp_cons);
905
906 while (sw_comp_cons != hw_comp_cons) {
907 unsigned int len, pad;
908 struct sw_rx_bd *rx_buf;
909 struct sk_buff *skb;
910 union eth_rx_cqe *cqe;
911
912 comp_ring_cons = RCQ_BD(sw_comp_cons);
913 bd_prod = RX_BD(bd_prod);
914 bd_cons = RX_BD(bd_cons);
915
916 cqe = &fp->rx_comp_ring[comp_ring_cons];
917
918 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
919 " comp_ring (%u) bd_ring (%u,%u)\n",
920 hw_comp_cons, sw_comp_cons,
921 comp_ring_cons, bd_prod, bd_cons);
922 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
923 " queue %x vlan %x len %x\n",
924 cqe->fast_path_cqe.type,
925 cqe->fast_path_cqe.error_type_flags,
926 cqe->fast_path_cqe.status_flags,
927 cqe->fast_path_cqe.rss_hash_result,
928 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
929
930 /* is this a slowpath msg? */
931 if (unlikely(cqe->fast_path_cqe.type)) {
932 bnx2x_sp_event(fp, cqe);
933 goto next_cqe;
934
935 /* this is an rx packet */
936 } else {
937 rx_buf = &fp->rx_buf_ring[bd_cons];
938 skb = rx_buf->skb;
939
940 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
941 pad = cqe->fast_path_cqe.placement_offset;
942
943 pci_dma_sync_single_for_device(bp->pdev,
944 pci_unmap_addr(rx_buf, mapping),
945 pad + RX_COPY_THRESH,
946 PCI_DMA_FROMDEVICE);
947 prefetch(skb);
948 prefetch(((char *)(skb)) + 128);
949
950 /* is this an error packet? */
951 if (unlikely(cqe->fast_path_cqe.error_type_flags &
952 ETH_RX_ERROR_FALGS)) {
953 /* do we sometimes forward error packets anyway? */
954 DP(NETIF_MSG_RX_ERR,
955 "ERROR flags(%u) Rx packet(%u)\n",
956 cqe->fast_path_cqe.error_type_flags,
957 sw_comp_cons);
958 /* TBD make sure MC counts this as a drop */
959 goto reuse_rx;
960 }
961
962 /* Since we don't have a jumbo ring
963 * copy small packets if mtu > 1500
964 */
965 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
966 (len <= RX_COPY_THRESH)) {
967 struct sk_buff *new_skb;
968
969 new_skb = netdev_alloc_skb(bp->dev,
970 len + pad);
971 if (new_skb == NULL) {
972 DP(NETIF_MSG_RX_ERR,
973 "ERROR packet dropped "
974 "because of alloc failure\n");
975 /* TBD count this as a drop? */
976 goto reuse_rx;
977 }
978
979 /* aligned copy */
980 skb_copy_from_linear_data_offset(skb, pad,
981 new_skb->data + pad, len);
982 skb_reserve(new_skb, pad);
983 skb_put(new_skb, len);
984
985 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
986
987 skb = new_skb;
988
989 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
990 pci_unmap_single(bp->pdev,
991 pci_unmap_addr(rx_buf, mapping),
992 bp->rx_buf_use_size,
993 PCI_DMA_FROMDEVICE);
994 skb_reserve(skb, pad);
995 skb_put(skb, len);
996
997 } else {
998 DP(NETIF_MSG_RX_ERR,
999 "ERROR packet dropped because "
1000 "of alloc failure\n");
1001reuse_rx:
1002 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1003 goto next_rx;
1004 }
1005
1006 skb->protocol = eth_type_trans(skb, bp->dev);
1007
1008 skb->ip_summed = CHECKSUM_NONE;
1009 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1010 skb->ip_summed = CHECKSUM_UNNECESSARY;
1011
1012 /* TBD do we pass bad csum packets in promisc */
1013 }
1014
1015#ifdef BCM_VLAN
1016 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1017 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1018 && (bp->vlgrp != NULL))
1019 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1020 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1021 else
1022#endif
1023 netif_receive_skb(skb);
1024
1025 bp->dev->last_rx = jiffies;
1026
1027next_rx:
1028 rx_buf->skb = NULL;
1029
1030 bd_cons = NEXT_RX_IDX(bd_cons);
1031 bd_prod = NEXT_RX_IDX(bd_prod);
1032next_cqe:
1033 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1034 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1035 rx_pkt++;
1036
1037 if ((rx_pkt == budget))
1038 break;
1039 } /* while */
1040
1041 fp->rx_bd_cons = bd_cons;
1042 fp->rx_bd_prod = bd_prod;
1043 fp->rx_comp_cons = sw_comp_cons;
1044 fp->rx_comp_prod = sw_comp_prod;
1045
1046 REG_WR(bp, BAR_TSTRORM_INTMEM +
1047 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1048
1049 mmiowb(); /* keep prod updates ordered */
1050
1051 fp->rx_pkt += rx_pkt;
1052 fp->rx_calls++;
1053
1054 return rx_pkt;
1055}
1056
1057static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1058{
1059 struct bnx2x_fastpath *fp = fp_cookie;
1060 struct bnx2x *bp = fp->bp;
1061 struct net_device *dev = bp->dev;
1062 int index = fp->index;
1063
1064 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1065 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1066
1067#ifdef BNX2X_STOP_ON_ERROR
1068 if (unlikely(bp->panic))
1069 return IRQ_HANDLED;
1070#endif
1071
1072 prefetch(fp->rx_cons_sb);
1073 prefetch(fp->tx_cons_sb);
1074 prefetch(&fp->status_blk->c_status_block.status_block_index);
1075 prefetch(&fp->status_blk->u_status_block.status_block_index);
1076
1077 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1078 return IRQ_HANDLED;
1079}
1080
1081static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1082{
1083 struct net_device *dev = dev_instance;
1084 struct bnx2x *bp = netdev_priv(dev);
1085 u16 status = bnx2x_ack_int(bp);
1086
1087 if (unlikely(status == 0)) {
1088 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1089 return IRQ_NONE;
1090 }
1091
1092 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1093
1094#ifdef BNX2X_STOP_ON_ERROR
1095 if (unlikely(bp->panic))
1096 return IRQ_HANDLED;
1097#endif
1098
1099 /* Return here if interrupt is shared and is disabled */
1100 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1101 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1102 return IRQ_HANDLED;
1103 }
1104
1105 if (status & 0x2) {
1106 struct bnx2x_fastpath *fp = &bp->fp[0];
1107
1108 prefetch(fp->rx_cons_sb);
1109 prefetch(fp->tx_cons_sb);
1110 prefetch(&fp->status_blk->c_status_block.status_block_index);
1111 prefetch(&fp->status_blk->u_status_block.status_block_index);
1112
1113 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1114
1115 status &= ~0x2;
1116 if (!status)
1117 return IRQ_HANDLED;
1118 }
1119
1120 if (unlikely(status & 0x1)) {
1121
1122 schedule_work(&bp->sp_task);
1123
1124 status &= ~0x1;
1125 if (!status)
1126 return IRQ_HANDLED;
1127 }
1128
1129 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1130 status);
1131
1132 return IRQ_HANDLED;
1133}
1134
1135/* end of fast path */
1136
1137/* PHY/MAC */
1138
1139/*
1140 * General service functions
1141 */
1142
1143static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1144{
1145 int port = bp->port;
1146
1147 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1148 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1149 SHARED_HW_CFG_LED_MODE_SHIFT));
1150 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1151
1152 /* Set blinking rate to ~15.9Hz */
1153 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1154 LED_BLINK_RATE_VAL);
1155 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1156
1157 /* On Ax chip versions for speeds less than 10G
1158 LED scheme is different */
1159 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1160 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1161 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1162 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1163 }
1164}
1165
1166static void bnx2x_leds_unset(struct bnx2x *bp)
1167{
1168 int port = bp->port;
1169
1170 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1171 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1172}
1173
1174static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1175{
1176 u32 val = REG_RD(bp, reg);
1177
1178 val |= bits;
1179 REG_WR(bp, reg, val);
1180 return val;
1181}
1182
1183static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1184{
1185 u32 val = REG_RD(bp, reg);
1186
1187 val &= ~bits;
1188 REG_WR(bp, reg, val);
1189 return val;
1190}
1191
Eliezer Tamirf1410642008-02-28 11:51:50 -08001192static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1193{
1194 u32 cnt;
1195 u32 lock_status;
1196 u32 resource_bit = (1 << resource);
1197 u8 func = bp->port;
1198
1199 /* Validating that the resource is within range */
1200 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1201 DP(NETIF_MSG_HW,
1202 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1203 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1204 return -EINVAL;
1205 }
1206
1207 /* Validating that the resource is not already taken */
1208 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1209 if (lock_status & resource_bit) {
1210 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1211 lock_status, resource_bit);
1212 return -EEXIST;
1213 }
1214
1215 /* Try for 1 second every 5ms */
1216 for (cnt = 0; cnt < 200; cnt++) {
1217 /* Try to acquire the lock */
1218 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1219 resource_bit);
1220 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1221 if (lock_status & resource_bit)
1222 return 0;
1223
1224 msleep(5);
1225 }
1226 DP(NETIF_MSG_HW, "Timeout\n");
1227 return -EAGAIN;
1228}
1229
1230static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1231{
1232 u32 lock_status;
1233 u32 resource_bit = (1 << resource);
1234 u8 func = bp->port;
1235
1236 /* Validating that the resource is within range */
1237 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1238 DP(NETIF_MSG_HW,
1239 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1240 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1241 return -EINVAL;
1242 }
1243
1244 /* Validating that the resource is currently taken */
1245 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1246 if (!(lock_status & resource_bit)) {
1247 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1248 lock_status, resource_bit);
1249 return -EFAULT;
1250 }
1251
1252 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1253 return 0;
1254}
1255
1256static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1257{
1258 /* The GPIO should be swapped if swap register is set and active */
1259 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1260 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1261 int gpio_shift = gpio_num +
1262 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1263 u32 gpio_mask = (1 << gpio_shift);
1264 u32 gpio_reg;
1265
1266 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1267 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1268 return -EINVAL;
1269 }
1270
1271 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1272 /* read GPIO and mask except the float bits */
1273 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1274
1275 switch (mode) {
1276 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1277 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1278 gpio_num, gpio_shift);
1279 /* clear FLOAT and set CLR */
1280 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1281 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1282 break;
1283
1284 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1285 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1286 gpio_num, gpio_shift);
1287 /* clear FLOAT and set SET */
1288 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1289 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1290 break;
1291
1292 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1293 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1294 gpio_num, gpio_shift);
1295 /* set FLOAT */
1296 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1297 break;
1298
1299 default:
1300 break;
1301 }
1302
1303 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1304 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1305
1306 return 0;
1307}
1308
1309static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1310{
1311 u32 spio_mask = (1 << spio_num);
1312 u32 spio_reg;
1313
1314 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1315 (spio_num > MISC_REGISTERS_SPIO_7)) {
1316 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1317 return -EINVAL;
1318 }
1319
1320 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1321 /* read SPIO and mask except the float bits */
1322 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1323
1324 switch (mode) {
1325 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1326 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1327 /* clear FLOAT and set CLR */
1328 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1329 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1330 break;
1331
1332 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1333 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1334 /* clear FLOAT and set SET */
1335 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1336 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1337 break;
1338
1339 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1340 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1341 /* set FLOAT */
1342 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1343 break;
1344
1345 default:
1346 break;
1347 }
1348
1349 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1350 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1351
1352 return 0;
1353}
1354
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001355static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1356{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001357 int port = bp->port;
1358 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001359 u32 tmp;
1360 int i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001361
1362/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1363 bp->phy_addr, reg, val); */
1364
1365 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1366
1367 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1368 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1369 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1370 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1371 udelay(40);
1372 }
1373
1374 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1375 (val & EMAC_MDIO_COMM_DATA) |
1376 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1377 EMAC_MDIO_COMM_START_BUSY);
1378 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1379
1380 for (i = 0; i < 50; i++) {
1381 udelay(10);
1382
1383 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1384 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1385 udelay(5);
1386 break;
1387 }
1388 }
1389
1390 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1391 BNX2X_ERR("write phy register failed\n");
1392
1393 rc = -EBUSY;
1394 } else {
1395 rc = 0;
1396 }
1397
1398 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1399
1400 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1401 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1402 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1403 }
1404
1405 return rc;
1406}
1407
1408static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1409{
1410 int port = bp->port;
1411 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001412 u32 val;
1413 int i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001414
1415 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1416
1417 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1418 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1419 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1420 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1421 udelay(40);
1422 }
1423
1424 val = ((bp->phy_addr << 21) | (reg << 16) |
1425 EMAC_MDIO_COMM_COMMAND_READ_22 |
1426 EMAC_MDIO_COMM_START_BUSY);
1427 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1428
1429 for (i = 0; i < 50; i++) {
1430 udelay(10);
1431
1432 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1433 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1434 val &= EMAC_MDIO_COMM_DATA;
1435 break;
1436 }
1437 }
1438
1439 if (val & EMAC_MDIO_COMM_START_BUSY) {
1440 BNX2X_ERR("read phy register failed\n");
1441
1442 *ret_val = 0x0;
1443 rc = -EBUSY;
1444 } else {
1445 *ret_val = val;
1446 rc = 0;
1447 }
1448
1449 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1450
1451 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1452 val |= EMAC_MDIO_MODE_AUTO_POLL;
1453 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1454 }
1455
1456/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1457 bp->phy_addr, reg, *ret_val); */
1458
1459 return rc;
1460}
1461
Eliezer Tamirf1410642008-02-28 11:51:50 -08001462static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1463 u32 phy_addr, u32 reg, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001464{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001465 u32 tmp;
1466 int i, rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001467
Eliezer Tamirf1410642008-02-28 11:51:50 -08001468 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1469 * (a value of 49==0x31) and make sure that the AUTO poll is off
1470 */
1471 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1472 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1473 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1474 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1475 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1476 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1477 udelay(40);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001478
1479 /* address */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001480 tmp = ((phy_addr << 21) | (reg << 16) | addr |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001481 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1482 EMAC_MDIO_COMM_START_BUSY);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001483 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001484
1485 for (i = 0; i < 50; i++) {
1486 udelay(10);
1487
Eliezer Tamirf1410642008-02-28 11:51:50 -08001488 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001489 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1490 udelay(5);
1491 break;
1492 }
1493 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001494 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1495 BNX2X_ERR("write phy register failed\n");
1496
1497 rc = -EBUSY;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001498
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001499 } else {
1500 /* data */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001501 tmp = ((phy_addr << 21) | (reg << 16) | val |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001502 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1503 EMAC_MDIO_COMM_START_BUSY);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001504 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001505
1506 for (i = 0; i < 50; i++) {
1507 udelay(10);
1508
Eliezer Tamirf1410642008-02-28 11:51:50 -08001509 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001510 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1511 udelay(5);
1512 break;
1513 }
1514 }
1515
1516 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1517 BNX2X_ERR("write phy register failed\n");
1518
1519 rc = -EBUSY;
1520 }
1521 }
1522
Eliezer Tamirf1410642008-02-28 11:51:50 -08001523 /* unset clause 45 mode, set the MDIO clock to a faster value
1524 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1525 */
1526 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1527 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1528 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1529 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001530 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001531 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001532
1533 return rc;
1534}
1535
Eliezer Tamirf1410642008-02-28 11:51:50 -08001536static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1537 u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001538{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001539 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001540
Eliezer Tamirf1410642008-02-28 11:51:50 -08001541 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1542 reg, addr, val);
1543}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544
Eliezer Tamirf1410642008-02-28 11:51:50 -08001545static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1546 u32 phy_addr, u32 reg, u32 addr,
1547 u32 *ret_val)
1548{
1549 u32 val;
1550 int i, rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001551
Eliezer Tamirf1410642008-02-28 11:51:50 -08001552 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1553 * (a value of 49==0x31) and make sure that the AUTO poll is off
1554 */
1555 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1556 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1557 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1558 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1559 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1560 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1561 udelay(40);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001562
1563 /* address */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001564 val = ((phy_addr << 21) | (reg << 16) | addr |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001565 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1566 EMAC_MDIO_COMM_START_BUSY);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001567 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001568
1569 for (i = 0; i < 50; i++) {
1570 udelay(10);
1571
Eliezer Tamirf1410642008-02-28 11:51:50 -08001572 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001573 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1574 udelay(5);
1575 break;
1576 }
1577 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001578 if (val & EMAC_MDIO_COMM_START_BUSY) {
1579 BNX2X_ERR("read phy register failed\n");
1580
1581 *ret_val = 0;
1582 rc = -EBUSY;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001583
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001584 } else {
1585 /* data */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001586 val = ((phy_addr << 21) | (reg << 16) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001587 EMAC_MDIO_COMM_COMMAND_READ_45 |
1588 EMAC_MDIO_COMM_START_BUSY);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001589 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001590
1591 for (i = 0; i < 50; i++) {
1592 udelay(10);
1593
Eliezer Tamirf1410642008-02-28 11:51:50 -08001594 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001595 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1596 val &= EMAC_MDIO_COMM_DATA;
1597 break;
1598 }
1599 }
1600
1601 if (val & EMAC_MDIO_COMM_START_BUSY) {
1602 BNX2X_ERR("read phy register failed\n");
1603
1604 val = 0;
1605 rc = -EBUSY;
1606 }
1607
1608 *ret_val = val;
1609 }
1610
Eliezer Tamirf1410642008-02-28 11:51:50 -08001611 /* unset clause 45 mode, set the MDIO clock to a faster value
1612 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1613 */
1614 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1615 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1616 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1617 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001618 val |= EMAC_MDIO_MODE_AUTO_POLL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001619 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001620
1621 return rc;
1622}
1623
Eliezer Tamirf1410642008-02-28 11:51:50 -08001624static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1625 u32 addr, u32 *ret_val)
1626{
1627 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1628
1629 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1630 reg, addr, ret_val);
1631}
1632
1633static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1634 u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001635{
1636 int i;
1637 u32 rd_val;
1638
1639 might_sleep();
1640 for (i = 0; i < 10; i++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001641 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001642 msleep(5);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001643 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001644 /* if the read value is not the same as the value we wrote,
1645 we should write it again */
1646 if (rd_val == val)
1647 return 0;
1648 }
1649 BNX2X_ERR("MDIO write in CL45 failed\n");
1650 return -EBUSY;
1651}
1652
1653/*
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001654 * link management
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001655 */
1656
Eliezer Tamirf1410642008-02-28 11:51:50 -08001657static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1658{
1659 switch (pause_result) { /* ASYM P ASYM P */
1660 case 0xb: /* 1 0 1 1 */
1661 bp->flow_ctrl = FLOW_CTRL_TX;
1662 break;
1663
1664 case 0xe: /* 1 1 1 0 */
1665 bp->flow_ctrl = FLOW_CTRL_RX;
1666 break;
1667
1668 case 0x5: /* 0 1 0 1 */
1669 case 0x7: /* 0 1 1 1 */
1670 case 0xd: /* 1 1 0 1 */
1671 case 0xf: /* 1 1 1 1 */
1672 bp->flow_ctrl = FLOW_CTRL_BOTH;
1673 break;
1674
1675 default:
1676 break;
1677 }
1678}
1679
1680static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1681{
1682 u32 ext_phy_addr;
1683 u32 ld_pause; /* local */
1684 u32 lp_pause; /* link partner */
1685 u32 an_complete; /* AN complete */
1686 u32 pause_result;
1687 u8 ret = 0;
1688
1689 ext_phy_addr = ((bp->ext_phy_config &
1690 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1691 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1692
1693 /* read twice */
1694 bnx2x_mdio45_read(bp, ext_phy_addr,
1695 EXT_PHY_KR_AUTO_NEG_DEVAD,
1696 EXT_PHY_KR_STATUS, &an_complete);
1697 bnx2x_mdio45_read(bp, ext_phy_addr,
1698 EXT_PHY_KR_AUTO_NEG_DEVAD,
1699 EXT_PHY_KR_STATUS, &an_complete);
1700
1701 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1702 ret = 1;
1703 bnx2x_mdio45_read(bp, ext_phy_addr,
1704 EXT_PHY_KR_AUTO_NEG_DEVAD,
1705 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1706 bnx2x_mdio45_read(bp, ext_phy_addr,
1707 EXT_PHY_KR_AUTO_NEG_DEVAD,
1708 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1709 pause_result = (ld_pause &
1710 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1711 pause_result |= (lp_pause &
1712 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1713 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1714 pause_result);
1715 bnx2x_pause_resolve(bp, pause_result);
1716 }
1717 return ret;
1718}
1719
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1721{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001722 u32 ld_pause; /* local driver */
1723 u32 lp_pause; /* link partner */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001724 u32 pause_result;
1725
1726 bp->flow_ctrl = 0;
1727
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001728 /* resolve from gp_status in case of AN complete and not sgmii */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001729 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1730 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1731 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1732 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1733
1734 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1735 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1736 &ld_pause);
1737 bnx2x_mdio22_read(bp,
1738 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1739 &lp_pause);
1740 pause_result = (ld_pause &
1741 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1742 pause_result |= (lp_pause &
1743 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1744 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001745 bnx2x_pause_resolve(bp, pause_result);
1746 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1747 !(bnx2x_ext_phy_resove_fc(bp))) {
1748 /* forced speed */
1749 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1750 switch (bp->req_flow_ctrl) {
1751 case FLOW_CTRL_AUTO:
1752 if (bp->dev->mtu <= 4500)
1753 bp->flow_ctrl = FLOW_CTRL_BOTH;
1754 else
1755 bp->flow_ctrl = FLOW_CTRL_TX;
1756 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001757
Eliezer Tamirf1410642008-02-28 11:51:50 -08001758 case FLOW_CTRL_TX:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001759 bp->flow_ctrl = FLOW_CTRL_TX;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001760 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001761
Eliezer Tamirf1410642008-02-28 11:51:50 -08001762 case FLOW_CTRL_RX:
1763 if (bp->dev->mtu <= 4500)
1764 bp->flow_ctrl = FLOW_CTRL_RX;
1765 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001766
Eliezer Tamirf1410642008-02-28 11:51:50 -08001767 case FLOW_CTRL_BOTH:
1768 if (bp->dev->mtu <= 4500)
1769 bp->flow_ctrl = FLOW_CTRL_BOTH;
1770 else
1771 bp->flow_ctrl = FLOW_CTRL_TX;
1772 break;
1773
1774 case FLOW_CTRL_NONE:
1775 default:
1776 break;
1777 }
1778 } else { /* forced mode */
1779 switch (bp->req_flow_ctrl) {
1780 case FLOW_CTRL_AUTO:
1781 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1782 " req_autoneg 0x%x\n",
1783 bp->req_flow_ctrl, bp->req_autoneg);
1784 break;
1785
1786 case FLOW_CTRL_TX:
1787 case FLOW_CTRL_RX:
1788 case FLOW_CTRL_BOTH:
1789 bp->flow_ctrl = bp->req_flow_ctrl;
1790 break;
1791
1792 case FLOW_CTRL_NONE:
1793 default:
1794 break;
1795 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001796 }
1797 }
1798 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1799}
1800
1801static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1802{
1803 bp->link_status = 0;
1804
1805 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001806 DP(NETIF_MSG_LINK, "phy link up\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001807
Eliezer Tamirf1410642008-02-28 11:51:50 -08001808 bp->phy_link_up = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001809 bp->link_status |= LINK_STATUS_LINK_UP;
1810
1811 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1812 bp->duplex = DUPLEX_FULL;
1813 else
1814 bp->duplex = DUPLEX_HALF;
1815
1816 bnx2x_flow_ctrl_resolve(bp, gp_status);
1817
1818 switch (gp_status & GP_STATUS_SPEED_MASK) {
1819 case GP_STATUS_10M:
1820 bp->line_speed = SPEED_10;
1821 if (bp->duplex == DUPLEX_FULL)
1822 bp->link_status |= LINK_10TFD;
1823 else
1824 bp->link_status |= LINK_10THD;
1825 break;
1826
1827 case GP_STATUS_100M:
1828 bp->line_speed = SPEED_100;
1829 if (bp->duplex == DUPLEX_FULL)
1830 bp->link_status |= LINK_100TXFD;
1831 else
1832 bp->link_status |= LINK_100TXHD;
1833 break;
1834
1835 case GP_STATUS_1G:
1836 case GP_STATUS_1G_KX:
1837 bp->line_speed = SPEED_1000;
1838 if (bp->duplex == DUPLEX_FULL)
1839 bp->link_status |= LINK_1000TFD;
1840 else
1841 bp->link_status |= LINK_1000THD;
1842 break;
1843
1844 case GP_STATUS_2_5G:
1845 bp->line_speed = SPEED_2500;
1846 if (bp->duplex == DUPLEX_FULL)
1847 bp->link_status |= LINK_2500TFD;
1848 else
1849 bp->link_status |= LINK_2500THD;
1850 break;
1851
1852 case GP_STATUS_5G:
1853 case GP_STATUS_6G:
1854 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1855 gp_status);
1856 break;
1857
1858 case GP_STATUS_10G_KX4:
1859 case GP_STATUS_10G_HIG:
1860 case GP_STATUS_10G_CX4:
1861 bp->line_speed = SPEED_10000;
1862 bp->link_status |= LINK_10GTFD;
1863 break;
1864
1865 case GP_STATUS_12G_HIG:
1866 bp->line_speed = SPEED_12000;
1867 bp->link_status |= LINK_12GTFD;
1868 break;
1869
1870 case GP_STATUS_12_5G:
1871 bp->line_speed = SPEED_12500;
1872 bp->link_status |= LINK_12_5GTFD;
1873 break;
1874
1875 case GP_STATUS_13G:
1876 bp->line_speed = SPEED_13000;
1877 bp->link_status |= LINK_13GTFD;
1878 break;
1879
1880 case GP_STATUS_15G:
1881 bp->line_speed = SPEED_15000;
1882 bp->link_status |= LINK_15GTFD;
1883 break;
1884
1885 case GP_STATUS_16G:
1886 bp->line_speed = SPEED_16000;
1887 bp->link_status |= LINK_16GTFD;
1888 break;
1889
1890 default:
1891 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1892 gp_status);
1893 break;
1894 }
1895
1896 bp->link_status |= LINK_STATUS_SERDES_LINK;
1897
1898 if (bp->req_autoneg & AUTONEG_SPEED) {
1899 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1900
1901 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1902 bp->link_status |=
1903 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1904
1905 if (bp->autoneg & AUTONEG_PARALLEL)
1906 bp->link_status |=
1907 LINK_STATUS_PARALLEL_DETECTION_USED;
1908 }
1909
1910 if (bp->flow_ctrl & FLOW_CTRL_TX)
1911 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1912
1913 if (bp->flow_ctrl & FLOW_CTRL_RX)
1914 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1915
1916 } else { /* link_down */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001917 DP(NETIF_MSG_LINK, "phy link down\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001918
Eliezer Tamirf1410642008-02-28 11:51:50 -08001919 bp->phy_link_up = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001920
1921 bp->line_speed = 0;
1922 bp->duplex = DUPLEX_FULL;
1923 bp->flow_ctrl = 0;
1924 }
1925
Eliezer Tamirf1410642008-02-28 11:51:50 -08001926 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001927 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1928 " link_status 0x%x\n",
Eliezer Tamirf1410642008-02-28 11:51:50 -08001929 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1930 bp->flow_ctrl, bp->link_status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001931}
1932
1933static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1934{
1935 int port = bp->port;
1936
1937 /* first reset all status
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001938 * we assume only one line will be change at a time */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001939 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
Eliezer Tamirf1410642008-02-28 11:51:50 -08001940 (NIG_STATUS_XGXS0_LINK10G |
1941 NIG_STATUS_XGXS0_LINK_STATUS |
1942 NIG_STATUS_SERDES0_LINK_STATUS));
1943 if (bp->phy_link_up) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001944 if (is_10g) {
1945 /* Disable the 10G link interrupt
1946 * by writing 1 to the status register
1947 */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001948 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001949 bnx2x_bits_en(bp,
1950 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
Eliezer Tamirf1410642008-02-28 11:51:50 -08001951 NIG_STATUS_XGXS0_LINK10G);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001952
1953 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1954 /* Disable the link interrupt
1955 * by writing 1 to the relevant lane
1956 * in the status register
1957 */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001958 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001959 bnx2x_bits_en(bp,
1960 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1961 ((1 << bp->ser_lane) <<
Eliezer Tamirf1410642008-02-28 11:51:50 -08001962 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001963
1964 } else { /* SerDes */
Eliezer Tamirf1410642008-02-28 11:51:50 -08001965 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001966 /* Disable the link interrupt
1967 * by writing 1 to the status register
1968 */
1969 bnx2x_bits_en(bp,
1970 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
Eliezer Tamirf1410642008-02-28 11:51:50 -08001971 NIG_STATUS_SERDES0_LINK_STATUS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001972 }
1973
1974 } else { /* link_down */
1975 }
1976}
1977
1978static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1979{
1980 u32 ext_phy_type;
1981 u32 ext_phy_addr;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001982 u32 val1 = 0, val2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001983 u32 rx_sd, pcs_status;
1984
1985 if (bp->phy_flags & PHY_XGXS_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001986 ext_phy_addr = ((bp->ext_phy_config &
1987 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1988 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001989
1990 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1991 switch (ext_phy_type) {
1992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1993 DP(NETIF_MSG_LINK, "XGXS Direct\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08001994 val1 = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001995 break;
1996
1997 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1998 DP(NETIF_MSG_LINK, "XGXS 8705\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08001999 bnx2x_mdio45_read(bp, ext_phy_addr,
2000 EXT_PHY_OPT_WIS_DEVAD,
2001 EXT_PHY_OPT_LASI_STATUS, &val1);
2002 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002003
Eliezer Tamirf1410642008-02-28 11:51:50 -08002004 bnx2x_mdio45_read(bp, ext_phy_addr,
2005 EXT_PHY_OPT_WIS_DEVAD,
2006 EXT_PHY_OPT_LASI_STATUS, &val1);
2007 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002008
Eliezer Tamirf1410642008-02-28 11:51:50 -08002009 bnx2x_mdio45_read(bp, ext_phy_addr,
2010 EXT_PHY_OPT_PMA_PMD_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002011 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002012 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2013 val1 = (rx_sd & 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002014 break;
2015
2016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2017 DP(NETIF_MSG_LINK, "XGXS 8706\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08002018 bnx2x_mdio45_read(bp, ext_phy_addr,
2019 EXT_PHY_OPT_PMA_PMD_DEVAD,
2020 EXT_PHY_OPT_LASI_STATUS, &val1);
2021 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002022
Eliezer Tamirf1410642008-02-28 11:51:50 -08002023 bnx2x_mdio45_read(bp, ext_phy_addr,
2024 EXT_PHY_OPT_PMA_PMD_DEVAD,
2025 EXT_PHY_OPT_LASI_STATUS, &val1);
2026 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002027
Eliezer Tamirf1410642008-02-28 11:51:50 -08002028 bnx2x_mdio45_read(bp, ext_phy_addr,
2029 EXT_PHY_OPT_PMA_PMD_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002030 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002031 bnx2x_mdio45_read(bp, ext_phy_addr,
2032 EXT_PHY_OPT_PCS_DEVAD,
2033 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2034 bnx2x_mdio45_read(bp, ext_phy_addr,
2035 EXT_PHY_AUTO_NEG_DEVAD,
2036 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2037
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002038 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08002039 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2040 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2041 /* link is up if both bit 0 of pmd_rx_sd and
2042 * bit 0 of pcs_status are set, or if the autoneg bit
2043 1 is set
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002044 */
Eliezer Tamirf1410642008-02-28 11:51:50 -08002045 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2046 break;
2047
2048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2049 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2050
2051 /* clear the interrupt LASI status register */
2052 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2053 ext_phy_addr,
2054 EXT_PHY_KR_PCS_DEVAD,
2055 EXT_PHY_KR_LASI_STATUS, &val2);
2056 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2057 ext_phy_addr,
2058 EXT_PHY_KR_PCS_DEVAD,
2059 EXT_PHY_KR_LASI_STATUS, &val1);
2060 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2061 val2, val1);
2062 /* Check the LASI */
2063 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2064 ext_phy_addr,
2065 EXT_PHY_KR_PMA_PMD_DEVAD,
2066 0x9003, &val2);
2067 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2068 ext_phy_addr,
2069 EXT_PHY_KR_PMA_PMD_DEVAD,
2070 0x9003, &val1);
2071 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2072 val2, val1);
2073 /* Check the link status */
2074 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2075 ext_phy_addr,
2076 EXT_PHY_KR_PCS_DEVAD,
2077 EXT_PHY_KR_PCS_STATUS, &val2);
2078 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2079 /* Check the link status on 1.1.2 */
2080 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2081 ext_phy_addr,
2082 EXT_PHY_OPT_PMA_PMD_DEVAD,
2083 EXT_PHY_KR_STATUS, &val2);
2084 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2085 ext_phy_addr,
2086 EXT_PHY_OPT_PMA_PMD_DEVAD,
2087 EXT_PHY_KR_STATUS, &val1);
2088 DP(NETIF_MSG_LINK,
2089 "KR PMA status 0x%x->0x%x\n", val2, val1);
2090 val1 = ((val1 & 4) == 4);
2091 /* If 1G was requested assume the link is up */
2092 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2093 (bp->req_line_speed == SPEED_1000))
2094 val1 = 1;
2095 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2096 break;
2097
2098 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2099 bnx2x_mdio45_read(bp, ext_phy_addr,
2100 EXT_PHY_OPT_PMA_PMD_DEVAD,
2101 EXT_PHY_OPT_LASI_STATUS, &val2);
2102 bnx2x_mdio45_read(bp, ext_phy_addr,
2103 EXT_PHY_OPT_PMA_PMD_DEVAD,
2104 EXT_PHY_OPT_LASI_STATUS, &val1);
2105 DP(NETIF_MSG_LINK,
2106 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2107 bnx2x_mdio45_read(bp, ext_phy_addr,
2108 EXT_PHY_OPT_PMA_PMD_DEVAD,
2109 EXT_PHY_KR_STATUS, &val2);
2110 bnx2x_mdio45_read(bp, ext_phy_addr,
2111 EXT_PHY_OPT_PMA_PMD_DEVAD,
2112 EXT_PHY_KR_STATUS, &val1);
2113 DP(NETIF_MSG_LINK,
2114 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2115 val1 = ((val1 & 4) == 4);
2116 /* if link is up
2117 * print the AN outcome of the SFX7101 PHY
2118 */
2119 if (val1) {
2120 bnx2x_mdio45_read(bp, ext_phy_addr,
2121 EXT_PHY_KR_AUTO_NEG_DEVAD,
2122 0x21, &val2);
2123 DP(NETIF_MSG_LINK,
2124 "SFX7101 AN status 0x%x->%s\n", val2,
2125 (val2 & (1<<14)) ? "Master" : "Slave");
2126 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002127 break;
2128
2129 default:
2130 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2131 bp->ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002132 val1 = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002133 break;
2134 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002135
2136 } else { /* SerDes */
2137 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2138 switch (ext_phy_type) {
2139 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2140 DP(NETIF_MSG_LINK, "SerDes Direct\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08002141 val1 = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002142 break;
2143
2144 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2145 DP(NETIF_MSG_LINK, "SerDes 5482\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08002146 val1 = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002147 break;
2148
2149 default:
2150 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2151 bp->ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002152 val1 = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002153 break;
2154 }
2155 }
2156
Eliezer Tamirf1410642008-02-28 11:51:50 -08002157 return val1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002158}
2159
2160static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2161{
2162 int port = bp->port;
2163 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2164 NIG_REG_INGRESS_BMAC0_MEM;
2165 u32 wb_write[2];
2166 u32 val;
2167
Eliezer Tamirc14423f2008-02-28 11:49:42 -08002168 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169 /* reset and unreset the BigMac */
2170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2171 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2172 msleep(5);
2173 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2174 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2175
2176 /* enable access for bmac registers */
2177 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2178
2179 /* XGXS control */
2180 wb_write[0] = 0x3c;
2181 wb_write[1] = 0;
2182 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2183 wb_write, 2);
2184
2185 /* tx MAC SA */
2186 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2187 (bp->dev->dev_addr[3] << 16) |
2188 (bp->dev->dev_addr[4] << 8) |
2189 bp->dev->dev_addr[5]);
2190 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2191 bp->dev->dev_addr[1]);
2192 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2193 wb_write, 2);
2194
2195 /* tx control */
2196 val = 0xc0;
2197 if (bp->flow_ctrl & FLOW_CTRL_TX)
2198 val |= 0x800000;
2199 wb_write[0] = val;
2200 wb_write[1] = 0;
2201 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2202
2203 /* set tx mtu */
2204 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2205 wb_write[1] = 0;
2206 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2207
2208 /* mac control */
2209 val = 0x3;
2210 if (is_lb) {
2211 val |= 0x4;
2212 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2213 }
2214 wb_write[0] = val;
2215 wb_write[1] = 0;
2216 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2217 wb_write, 2);
2218
2219 /* rx control set to don't strip crc */
2220 val = 0x14;
2221 if (bp->flow_ctrl & FLOW_CTRL_RX)
2222 val |= 0x20;
2223 wb_write[0] = val;
2224 wb_write[1] = 0;
2225 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2226
2227 /* set rx mtu */
2228 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2229 wb_write[1] = 0;
2230 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2231
2232 /* set cnt max size */
2233 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2234 wb_write[1] = 0;
2235 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2236 wb_write, 2);
2237
2238 /* configure safc */
2239 wb_write[0] = 0x1000200;
2240 wb_write[1] = 0;
2241 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2242 wb_write, 2);
2243
2244 /* fix for emulation */
2245 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2246 wb_write[0] = 0xf000;
2247 wb_write[1] = 0;
2248 REG_WR_DMAE(bp,
2249 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2250 wb_write, 2);
2251 }
2252
2253 /* reset old bmac stats */
2254 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2255
2256 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2257
2258 /* select XGXS */
2259 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2260 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2261
2262 /* disable the NIG in/out to the emac */
2263 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2264 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2265 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2266
2267 /* enable the NIG in/out to the bmac */
2268 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2269
2270 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2271 val = 0;
2272 if (bp->flow_ctrl & FLOW_CTRL_TX)
2273 val = 1;
2274 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2275 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2276
2277 bp->phy_flags |= PHY_BMAC_FLAG;
2278
2279 bp->stats_state = STATS_STATE_ENABLE;
2280}
2281
Eliezer Tamirf1410642008-02-28 11:51:50 -08002282static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2283{
2284 int port = bp->port;
2285 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2286 NIG_REG_INGRESS_BMAC0_MEM;
2287 u32 wb_write[2];
2288
2289 /* Only if the bmac is out of reset */
2290 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2291 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2292 /* Clear Rx Enable bit in BMAC_CONTROL register */
2293#ifdef BNX2X_DMAE_RD
2294 bnx2x_read_dmae(bp, bmac_addr +
2295 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2296 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2297 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2298#else
2299 wb_write[0] = REG_RD(bp,
2300 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2301 wb_write[1] = REG_RD(bp,
2302 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2303#endif
2304 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2305 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2306 wb_write, 2);
2307 msleep(1);
2308 }
2309}
2310
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002311static void bnx2x_emac_enable(struct bnx2x *bp)
2312{
2313 int port = bp->port;
2314 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2315 u32 val;
2316 int timeout;
2317
Eliezer Tamirc14423f2008-02-28 11:49:42 -08002318 DP(NETIF_MSG_LINK, "enabling EMAC\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002319 /* reset and unreset the emac core */
2320 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2321 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2322 msleep(5);
2323 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2324 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2325
2326 /* enable emac and not bmac */
2327 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2328
2329 /* for paladium */
2330 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2331 /* Use lane 1 (of lanes 0-3) */
2332 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2333 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2334 }
2335 /* for fpga */
2336 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2337 /* Use lane 1 (of lanes 0-3) */
2338 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2339 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2340 }
2341 /* ASIC */
2342 else {
2343 if (bp->phy_flags & PHY_XGXS_FLAG) {
2344 DP(NETIF_MSG_LINK, "XGXS\n");
2345 /* select the master lanes (out of 0-3) */
2346 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2347 bp->ser_lane);
2348 /* select XGXS */
2349 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2350
2351 } else { /* SerDes */
2352 DP(NETIF_MSG_LINK, "SerDes\n");
2353 /* select SerDes */
2354 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2355 }
2356 }
2357
2358 /* enable emac */
2359 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2360
2361 /* init emac - use read-modify-write */
2362 /* self clear reset */
2363 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2364 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2365
2366 timeout = 200;
2367 while (val & EMAC_MODE_RESET) {
2368 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2369 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2370 if (!timeout) {
2371 BNX2X_ERR("EMAC timeout!\n");
2372 break;
2373 }
2374 timeout--;
2375 }
2376
2377 /* reset tx part */
2378 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2379
2380 timeout = 200;
2381 while (val & EMAC_TX_MODE_RESET) {
2382 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2383 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2384 if (!timeout) {
2385 BNX2X_ERR("EMAC timeout!\n");
2386 break;
2387 }
2388 timeout--;
2389 }
2390
2391 if (CHIP_REV_IS_SLOW(bp)) {
2392 /* config GMII mode */
2393 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2394 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2395
2396 } else { /* ASIC */
2397 /* pause enable/disable */
2398 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2399 EMAC_RX_MODE_FLOW_EN);
2400 if (bp->flow_ctrl & FLOW_CTRL_RX)
2401 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2402 EMAC_RX_MODE_FLOW_EN);
2403
2404 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2405 EMAC_TX_MODE_EXT_PAUSE_EN);
2406 if (bp->flow_ctrl & FLOW_CTRL_TX)
2407 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2408 EMAC_TX_MODE_EXT_PAUSE_EN);
2409 }
2410
Eliezer Tamirc14423f2008-02-28 11:49:42 -08002411 /* KEEP_VLAN_TAG, promiscuous */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002412 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2413 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2414 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2415
2416 /* identify magic packets */
2417 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2418 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2419
2420 /* enable emac for jumbo packets */
2421 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2422 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2423 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2424
2425 /* strip CRC */
2426 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2427
2428 val = ((bp->dev->dev_addr[0] << 8) |
2429 bp->dev->dev_addr[1]);
2430 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2431
2432 val = ((bp->dev->dev_addr[2] << 24) |
2433 (bp->dev->dev_addr[3] << 16) |
2434 (bp->dev->dev_addr[4] << 8) |
2435 bp->dev->dev_addr[5]);
2436 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2437
2438 /* disable the NIG in/out to the bmac */
2439 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2440 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2441 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2442
2443 /* enable the NIG in/out to the emac */
2444 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2445 val = 0;
2446 if (bp->flow_ctrl & FLOW_CTRL_TX)
2447 val = 1;
2448 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2449 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2450
2451 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2452 /* take the BigMac out of reset */
2453 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2454 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2455
2456 /* enable access for bmac registers */
2457 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2458 }
2459
2460 bp->phy_flags |= PHY_EMAC_FLAG;
2461
2462 bp->stats_state = STATS_STATE_ENABLE;
2463}
2464
2465static void bnx2x_emac_program(struct bnx2x *bp)
2466{
2467 u16 mode = 0;
2468 int port = bp->port;
2469
2470 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2471 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2472 (EMAC_MODE_25G_MODE |
2473 EMAC_MODE_PORT_MII_10M |
2474 EMAC_MODE_HALF_DUPLEX));
2475 switch (bp->line_speed) {
2476 case SPEED_10:
2477 mode |= EMAC_MODE_PORT_MII_10M;
2478 break;
2479
2480 case SPEED_100:
2481 mode |= EMAC_MODE_PORT_MII;
2482 break;
2483
2484 case SPEED_1000:
2485 mode |= EMAC_MODE_PORT_GMII;
2486 break;
2487
2488 case SPEED_2500:
2489 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2490 break;
2491
2492 default:
2493 /* 10G not valid for EMAC */
2494 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2495 break;
2496 }
2497
2498 if (bp->duplex == DUPLEX_HALF)
2499 mode |= EMAC_MODE_HALF_DUPLEX;
2500 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2501 mode);
2502
2503 bnx2x_leds_set(bp, bp->line_speed);
2504}
2505
2506static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2507{
2508 u32 lp_up2;
2509 u32 tx_driver;
2510
2511 /* read precomp */
2512 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2513 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2514
2515 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2516 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2517
2518 /* bits [10:7] at lp_up2, positioned at [15:12] */
2519 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2520 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2521 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2522
2523 if ((lp_up2 != 0) &&
2524 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2525 /* replace tx_driver bits [15:12] */
2526 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2527 tx_driver |= lp_up2;
2528 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2529 }
2530}
2531
2532static void bnx2x_pbf_update(struct bnx2x *bp)
2533{
2534 int port = bp->port;
2535 u32 init_crd, crd;
2536 u32 count = 1000;
2537 u32 pause = 0;
2538
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002539 /* disable port */
2540 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2541
2542 /* wait for init credit */
2543 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2544 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2545 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2546
2547 while ((init_crd != crd) && count) {
2548 msleep(5);
2549
2550 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2551 count--;
2552 }
2553 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2554 if (init_crd != crd)
2555 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2556
2557 if (bp->flow_ctrl & FLOW_CTRL_RX)
2558 pause = 1;
2559 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2560 if (pause) {
2561 /* update threshold */
2562 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2563 /* update init credit */
2564 init_crd = 778; /* (800-18-4) */
2565
2566 } else {
2567 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2568
2569 /* update threshold */
2570 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2571 /* update init credit */
2572 switch (bp->line_speed) {
2573 case SPEED_10:
2574 case SPEED_100:
2575 case SPEED_1000:
2576 init_crd = thresh + 55 - 22;
2577 break;
2578
2579 case SPEED_2500:
2580 init_crd = thresh + 138 - 22;
2581 break;
2582
2583 case SPEED_10000:
2584 init_crd = thresh + 553 - 22;
2585 break;
2586
2587 default:
2588 BNX2X_ERR("Invalid line_speed 0x%x\n",
2589 bp->line_speed);
2590 break;
2591 }
2592 }
2593 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2594 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2595 bp->line_speed, init_crd);
2596
2597 /* probe the credit changes */
2598 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2599 msleep(5);
2600 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2601
2602 /* enable port */
2603 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2604}
2605
2606static void bnx2x_update_mng(struct bnx2x *bp)
2607{
2608 if (!nomcp)
Eliezer Tamirf1410642008-02-28 11:51:50 -08002609 SHMEM_WR(bp, port_mb[bp->port].link_status,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002610 bp->link_status);
2611}
2612
2613static void bnx2x_link_report(struct bnx2x *bp)
2614{
2615 if (bp->link_up) {
2616 netif_carrier_on(bp->dev);
2617 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2618
2619 printk("%d Mbps ", bp->line_speed);
2620
2621 if (bp->duplex == DUPLEX_FULL)
2622 printk("full duplex");
2623 else
2624 printk("half duplex");
2625
2626 if (bp->flow_ctrl) {
2627 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2628 printk(", receive ");
2629 if (bp->flow_ctrl & FLOW_CTRL_TX)
2630 printk("& transmit ");
2631 } else {
2632 printk(", transmit ");
2633 }
2634 printk("flow control ON");
2635 }
2636 printk("\n");
2637
2638 } else { /* link_down */
2639 netif_carrier_off(bp->dev);
2640 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2641 }
2642}
2643
2644static void bnx2x_link_up(struct bnx2x *bp)
2645{
2646 int port = bp->port;
2647
2648 /* PBF - link up */
2649 bnx2x_pbf_update(bp);
2650
2651 /* disable drain */
2652 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2653
2654 /* update shared memory */
2655 bnx2x_update_mng(bp);
2656
2657 /* indicate link up */
2658 bnx2x_link_report(bp);
2659}
2660
2661static void bnx2x_link_down(struct bnx2x *bp)
2662{
2663 int port = bp->port;
2664
2665 /* notify stats */
2666 if (bp->stats_state != STATS_STATE_DISABLE) {
2667 bp->stats_state = STATS_STATE_STOP;
2668 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2669 }
2670
Eliezer Tamirf1410642008-02-28 11:51:50 -08002671 /* indicate no mac active */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002672 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2673
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002674 /* update shared memory */
2675 bnx2x_update_mng(bp);
2676
Eliezer Tamirf1410642008-02-28 11:51:50 -08002677 /* activate nig drain */
2678 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2679
2680 /* reset BigMac */
2681 bnx2x_bmac_rx_disable(bp);
2682 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2683 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2684
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002685 /* indicate link down */
2686 bnx2x_link_report(bp);
2687}
2688
2689static void bnx2x_init_mac_stats(struct bnx2x *bp);
2690
2691/* This function is called upon link interrupt */
2692static void bnx2x_link_update(struct bnx2x *bp)
2693{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002694 int port = bp->port;
2695 int i;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002696 u32 gp_status;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002697 int link_10g;
2698
Eliezer Tamirf1410642008-02-28 11:51:50 -08002699 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002700 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
Eliezer Tamirf1410642008-02-28 11:51:50 -08002701 " 10G %x, XGXS_LINK %x\n", port,
2702 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002703 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2704 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2705 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2706 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2707 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2708 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2709 );
2710
2711 might_sleep();
2712 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2713 /* avoid fast toggling */
Eliezer Tamirf1410642008-02-28 11:51:50 -08002714 for (i = 0; i < 10; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002715 msleep(10);
2716 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2717 &gp_status);
2718 }
2719
2720 bnx2x_link_settings_status(bp, gp_status);
2721
2722 /* anything 10 and over uses the bmac */
2723 link_10g = ((bp->line_speed >= SPEED_10000) &&
2724 (bp->line_speed <= SPEED_16000));
2725
2726 bnx2x_link_int_ack(bp, link_10g);
2727
2728 /* link is up only if both local phy and external phy are up */
Eliezer Tamirf1410642008-02-28 11:51:50 -08002729 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2730 if (bp->link_up) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731 if (link_10g) {
2732 bnx2x_bmac_enable(bp, 0);
2733 bnx2x_leds_set(bp, SPEED_10000);
2734
2735 } else {
2736 bnx2x_emac_enable(bp);
2737 bnx2x_emac_program(bp);
2738
2739 /* AN complete? */
2740 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2741 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2742 bnx2x_set_sgmii_tx_driver(bp);
2743 }
2744 }
2745 bnx2x_link_up(bp);
2746
2747 } else { /* link down */
2748 bnx2x_leds_unset(bp);
2749 bnx2x_link_down(bp);
2750 }
2751
2752 bnx2x_init_mac_stats(bp);
2753}
2754
2755/*
2756 * Init service functions
2757 */
2758
2759static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2760{
2761 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2762 (bp->phy_addr + bp->ser_lane) : 0;
2763
2764 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2765 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2766}
2767
2768static void bnx2x_set_master_ln(struct bnx2x *bp)
2769{
2770 u32 new_master_ln;
2771
2772 /* set the master_ln for AN */
2773 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2774 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2775 &new_master_ln);
2776 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2777 (new_master_ln | bp->ser_lane));
2778}
2779
2780static void bnx2x_reset_unicore(struct bnx2x *bp)
2781{
2782 u32 mii_control;
2783 int i;
2784
2785 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2786 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2787 /* reset the unicore */
2788 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2789 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2790
2791 /* wait for the reset to self clear */
2792 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2793 udelay(5);
2794
2795 /* the reset erased the previous bank value */
2796 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2797 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2798 &mii_control);
2799
2800 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2801 udelay(5);
2802 return;
2803 }
2804 }
2805
Eliezer Tamirf1410642008-02-28 11:51:50 -08002806 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2807 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2808 bp->phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002809}
2810
2811static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2812{
2813 /* Each two bits represents a lane number:
2814 No swap is 0123 => 0x1b no need to enable the swap */
2815
2816 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2817 if (bp->rx_lane_swap != 0x1b) {
2818 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2819 (bp->rx_lane_swap |
2820 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2821 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2822 } else {
2823 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2824 }
2825
2826 if (bp->tx_lane_swap != 0x1b) {
2827 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2828 (bp->tx_lane_swap |
2829 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2830 } else {
2831 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2832 }
2833}
2834
2835static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2836{
2837 u32 control2;
2838
2839 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2840 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2841 &control2);
2842
2843 if (bp->autoneg & AUTONEG_PARALLEL) {
2844 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2845 } else {
2846 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2847 }
2848 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2849 control2);
2850
2851 if (bp->phy_flags & PHY_XGXS_FLAG) {
2852 DP(NETIF_MSG_LINK, "XGXS\n");
2853 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2854
2855 bnx2x_mdio22_write(bp,
Eliezer Tamirf1410642008-02-28 11:51:50 -08002856 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2858
2859 bnx2x_mdio22_read(bp,
Eliezer Tamirf1410642008-02-28 11:51:50 -08002860 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2861 &control2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002862
2863 if (bp->autoneg & AUTONEG_PARALLEL) {
2864 control2 |=
2865 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2866 } else {
2867 control2 &=
2868 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2869 }
2870 bnx2x_mdio22_write(bp,
Eliezer Tamirf1410642008-02-28 11:51:50 -08002871 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2872 control2);
2873
2874 /* Disable parallel detection of HiG */
2875 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2876 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2877 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2878 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002879 }
2880}
2881
2882static void bnx2x_set_autoneg(struct bnx2x *bp)
2883{
2884 u32 reg_val;
2885
2886 /* CL37 Autoneg */
2887 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2888 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2889 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2890 (bp->autoneg & AUTONEG_CL37)) {
2891 /* CL37 Autoneg Enabled */
2892 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2893 } else {
2894 /* CL37 Autoneg Disabled */
2895 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2896 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2897 }
2898 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2899
2900 /* Enable/Disable Autodetection */
2901 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2902 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2903 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2904
2905 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2906 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2907 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2908 } else {
2909 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2910 }
2911 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2912
2913 /* Enable TetonII and BAM autoneg */
2914 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2915 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2916 &reg_val);
2917 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2918 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2919 /* Enable BAM aneg Mode and TetonII aneg Mode */
2920 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2921 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2922 } else {
2923 /* TetonII and BAM Autoneg Disabled */
2924 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2925 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2926 }
2927 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2928 reg_val);
2929
2930 /* Enable Clause 73 Aneg */
2931 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2932 (bp->autoneg & AUTONEG_CL73)) {
2933 /* Enable BAM Station Manager */
2934 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2935 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2936 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2937 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2938 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2939
2940 /* Merge CL73 and CL37 aneg resolution */
2941 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2942 &reg_val);
2943 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2944 (reg_val |
2945 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2946
2947 /* Set the CL73 AN speed */
2948 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2949 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2950 /* In the SerDes we support only the 1G.
2951 In the XGXS we support the 10G KX4
2952 but we currently do not support the KR */
2953 if (bp->phy_flags & PHY_XGXS_FLAG) {
2954 DP(NETIF_MSG_LINK, "XGXS\n");
2955 /* 10G KX4 */
2956 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2957 } else {
2958 DP(NETIF_MSG_LINK, "SerDes\n");
2959 /* 1000M KX */
2960 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2961 }
2962 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2963
2964 /* CL73 Autoneg Enabled */
2965 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2966 } else {
2967 /* CL73 Autoneg Disabled */
2968 reg_val = 0;
2969 }
2970 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2971 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2972}
2973
2974/* program SerDes, forced speed */
2975static void bnx2x_program_serdes(struct bnx2x *bp)
2976{
2977 u32 reg_val;
2978
2979 /* program duplex, disable autoneg */
2980 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2981 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2982 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2983 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2984 if (bp->req_duplex == DUPLEX_FULL)
2985 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2986 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2987
2988 /* program speed
2989 - needed only if the speed is greater than 1G (2.5G or 10G) */
2990 if (bp->req_line_speed > SPEED_1000) {
2991 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2992 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2993 /* clearing the speed value before setting the right speed */
2994 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2995 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2996 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2997 if (bp->req_line_speed == SPEED_10000)
2998 reg_val |=
2999 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
3000 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
3001 }
3002}
3003
3004static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
3005{
3006 u32 val = 0;
3007
3008 /* configure the 48 bits for BAM AN */
3009 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3010
3011 /* set extended capabilities */
Eliezer Tamirf1410642008-02-28 11:51:50 -08003012 if (bp->advertising & ADVERTISED_2500baseX_Full)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003013 val |= MDIO_OVER_1G_UP1_2_5G;
3014 if (bp->advertising & ADVERTISED_10000baseT_Full)
3015 val |= MDIO_OVER_1G_UP1_10G;
3016 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3017
3018 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3019}
3020
3021static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3022{
3023 u32 an_adv;
3024
3025 /* for AN, we are always publishing full duplex */
3026 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3027
Eliezer Tamirf1410642008-02-28 11:51:50 -08003028 /* resolve pause mode and advertisement
3029 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3030 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3031 switch (bp->req_flow_ctrl) {
3032 case FLOW_CTRL_AUTO:
3033 if (bp->dev->mtu <= 4500) {
3034 an_adv |=
3035 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3036 bp->advertising |= (ADVERTISED_Pause |
3037 ADVERTISED_Asym_Pause);
3038 } else {
3039 an_adv |=
3040 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3041 bp->advertising |= ADVERTISED_Asym_Pause;
3042 }
3043 break;
3044
3045 case FLOW_CTRL_TX:
3046 an_adv |=
3047 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3048 bp->advertising |= ADVERTISED_Asym_Pause;
3049 break;
3050
3051 case FLOW_CTRL_RX:
3052 if (bp->dev->mtu <= 4500) {
3053 an_adv |=
3054 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3055 bp->advertising |= (ADVERTISED_Pause |
3056 ADVERTISED_Asym_Pause);
3057 } else {
3058 an_adv |=
3059 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3060 bp->advertising &= ~(ADVERTISED_Pause |
3061 ADVERTISED_Asym_Pause);
3062 }
3063 break;
3064
3065 case FLOW_CTRL_BOTH:
3066 if (bp->dev->mtu <= 4500) {
3067 an_adv |=
3068 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3069 bp->advertising |= (ADVERTISED_Pause |
3070 ADVERTISED_Asym_Pause);
3071 } else {
3072 an_adv |=
3073 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3074 bp->advertising |= ADVERTISED_Asym_Pause;
3075 }
3076 break;
3077
3078 case FLOW_CTRL_NONE:
3079 default:
3080 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3081 bp->advertising &= ~(ADVERTISED_Pause |
3082 ADVERTISED_Asym_Pause);
3083 break;
3084 }
3085 } else { /* forced mode */
3086 switch (bp->req_flow_ctrl) {
3087 case FLOW_CTRL_AUTO:
3088 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3089 " req_autoneg 0x%x\n",
3090 bp->req_flow_ctrl, bp->req_autoneg);
3091 break;
3092
3093 case FLOW_CTRL_TX:
3094 an_adv |=
3095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3096 bp->advertising |= ADVERTISED_Asym_Pause;
3097 break;
3098
3099 case FLOW_CTRL_RX:
3100 case FLOW_CTRL_BOTH:
3101 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3102 bp->advertising |= (ADVERTISED_Pause |
3103 ADVERTISED_Asym_Pause);
3104 break;
3105
3106 case FLOW_CTRL_NONE:
3107 default:
3108 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3109 bp->advertising &= ~(ADVERTISED_Pause |
3110 ADVERTISED_Asym_Pause);
3111 break;
3112 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003113 }
3114
3115 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3116 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3117}
3118
3119static void bnx2x_restart_autoneg(struct bnx2x *bp)
3120{
3121 if (bp->autoneg & AUTONEG_CL73) {
3122 /* enable and restart clause 73 aneg */
3123 u32 an_ctrl;
3124
3125 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3126 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3127 &an_ctrl);
3128 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3129 (an_ctrl |
3130 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3131 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3132
3133 } else {
3134 /* Enable and restart BAM/CL37 aneg */
3135 u32 mii_control;
3136
3137 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3138 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3139 &mii_control);
3140 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3141 (mii_control |
3142 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3143 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3144 }
3145}
3146
3147static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3148{
3149 u32 control1;
3150
3151 /* in SGMII mode, the unicore is always slave */
3152 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3153 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3154 &control1);
3155 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3156 /* set sgmii mode (and not fiber) */
3157 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3158 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3159 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3160 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3161 control1);
3162
3163 /* if forced speed */
3164 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3165 /* set speed, disable autoneg */
3166 u32 mii_control;
3167
3168 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3169 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3170 &mii_control);
3171 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3172 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3173 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3174
3175 switch (bp->req_line_speed) {
3176 case SPEED_100:
3177 mii_control |=
3178 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3179 break;
3180 case SPEED_1000:
3181 mii_control |=
3182 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3183 break;
3184 case SPEED_10:
3185 /* there is nothing to set for 10M */
3186 break;
3187 default:
3188 /* invalid speed for SGMII */
3189 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3190 bp->req_line_speed);
3191 break;
3192 }
3193
3194 /* setting the full duplex */
3195 if (bp->req_duplex == DUPLEX_FULL)
3196 mii_control |=
3197 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3198 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3199 mii_control);
3200
3201 } else { /* AN mode */
3202 /* enable and restart AN */
3203 bnx2x_restart_autoneg(bp);
3204 }
3205}
3206
3207static void bnx2x_link_int_enable(struct bnx2x *bp)
3208{
3209 int port = bp->port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08003210 u32 ext_phy_type;
3211 u32 mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003212
3213 /* setting the status to report on link up
3214 for either XGXS or SerDes */
3215 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
Eliezer Tamirf1410642008-02-28 11:51:50 -08003216 (NIG_STATUS_XGXS0_LINK10G |
3217 NIG_STATUS_XGXS0_LINK_STATUS |
3218 NIG_STATUS_SERDES0_LINK_STATUS));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219
3220 if (bp->phy_flags & PHY_XGXS_FLAG) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08003221 mask = (NIG_MASK_XGXS0_LINK10G |
3222 NIG_MASK_XGXS0_LINK_STATUS);
3223 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3224 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3225 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3226 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3227 (ext_phy_type !=
3228 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3229 mask |= NIG_MASK_MI_INT;
3230 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3231 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003232
3233 } else { /* SerDes */
Eliezer Tamirf1410642008-02-28 11:51:50 -08003234 mask = NIG_MASK_SERDES0_LINK_STATUS;
3235 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3236 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3237 if ((ext_phy_type !=
3238 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3239 (ext_phy_type !=
3240 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3241 mask |= NIG_MASK_MI_INT;
3242 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3243 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003244 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08003245 bnx2x_bits_en(bp,
3246 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3247 mask);
3248 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3249 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3250 " 10G %x, XGXS_LINK %x\n", port,
3251 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3252 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3253 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3254 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3255 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3256 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3257 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3258 );
3259}
3260
3261static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3262{
3263 u32 ext_phy_addr = ((bp->ext_phy_config &
3264 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3265 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3266 u32 fw_ver1, fw_ver2;
3267
3268 /* Need to wait 200ms after reset */
3269 msleep(200);
3270 /* Boot port from external ROM
3271 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3272 */
3273 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3274 EXT_PHY_KR_PMA_PMD_DEVAD,
3275 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3276
3277 /* Reset internal microprocessor */
3278 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3279 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3280 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3281 /* set micro reset = 0 */
3282 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3283 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3284 EXT_PHY_KR_ROM_MICRO_RESET);
3285 /* Reset internal microprocessor */
3286 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3287 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3288 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3289 /* wait for 100ms for code download via SPI port */
3290 msleep(100);
3291
3292 /* Clear ser_boot_ctl bit */
3293 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3294 EXT_PHY_KR_PMA_PMD_DEVAD,
3295 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3296 /* Wait 100ms */
3297 msleep(100);
3298
3299 /* Print the PHY FW version */
3300 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3301 EXT_PHY_KR_PMA_PMD_DEVAD,
3302 0xca19, &fw_ver1);
3303 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3304 EXT_PHY_KR_PMA_PMD_DEVAD,
3305 0xca1a, &fw_ver2);
3306 DP(NETIF_MSG_LINK,
3307 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3308}
3309
3310static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3311{
3312 u32 ext_phy_addr = ((bp->ext_phy_config &
3313 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3314 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3315
3316 /* Force KR or KX */
3317 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3318 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3319 0x2040);
3320 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3321 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3322 0x000b);
3323 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3324 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3325 0x0000);
3326 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3327 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3328 0x0000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003329}
3330
3331static void bnx2x_ext_phy_init(struct bnx2x *bp)
3332{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003333 u32 ext_phy_type;
3334 u32 ext_phy_addr;
Eliezer Tamirf1410642008-02-28 11:51:50 -08003335 u32 cnt;
3336 u32 ctrl;
3337 u32 val = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003338
3339 if (bp->phy_flags & PHY_XGXS_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003340 ext_phy_addr = ((bp->ext_phy_config &
3341 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3342 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3343
3344 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
Eliezer Tamirf1410642008-02-28 11:51:50 -08003345 /* Make sure that the soft reset is off (expect for the 8072:
3346 * due to the lock, it will be done inside the specific
3347 * handling)
3348 */
3349 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3350 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3351 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3352 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3353 /* Wait for soft reset to get cleared upto 1 sec */
3354 for (cnt = 0; cnt < 1000; cnt++) {
3355 bnx2x_mdio45_read(bp, ext_phy_addr,
3356 EXT_PHY_OPT_PMA_PMD_DEVAD,
3357 EXT_PHY_OPT_CNTL, &ctrl);
3358 if (!(ctrl & (1<<15)))
3359 break;
3360 msleep(1);
3361 }
3362 DP(NETIF_MSG_LINK,
3363 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3364 }
3365
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003366 switch (ext_phy_type) {
3367 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3368 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3369 break;
3370
3371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3372 DP(NETIF_MSG_LINK, "XGXS 8705\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003373
Eliezer Tamirf1410642008-02-28 11:51:50 -08003374 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3375 EXT_PHY_OPT_PMA_PMD_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003376 EXT_PHY_OPT_PMD_MISC_CNTL,
3377 0x8288);
Eliezer Tamirf1410642008-02-28 11:51:50 -08003378 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3379 EXT_PHY_OPT_PMA_PMD_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003380 EXT_PHY_OPT_PHY_IDENTIFIER,
3381 0x7fbf);
Eliezer Tamirf1410642008-02-28 11:51:50 -08003382 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3383 EXT_PHY_OPT_PMA_PMD_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003384 EXT_PHY_OPT_CMU_PLL_BYPASS,
3385 0x0100);
Eliezer Tamirf1410642008-02-28 11:51:50 -08003386 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3387 EXT_PHY_OPT_WIS_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003388 EXT_PHY_OPT_LASI_CNTL, 0x1);
3389 break;
3390
3391 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3392 DP(NETIF_MSG_LINK, "XGXS 8706\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003393
Eliezer Tamirf1410642008-02-28 11:51:50 -08003394 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3395 /* Force speed */
3396 if (bp->req_line_speed == SPEED_10000) {
3397 DP(NETIF_MSG_LINK,
3398 "XGXS 8706 force 10Gbps\n");
3399 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3400 EXT_PHY_OPT_PMA_PMD_DEVAD,
3401 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3402 0x400);
3403 } else {
3404 /* Force 1Gbps */
3405 DP(NETIF_MSG_LINK,
3406 "XGXS 8706 force 1Gbps\n");
3407
3408 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3409 EXT_PHY_OPT_PMA_PMD_DEVAD,
3410 EXT_PHY_OPT_CNTL,
3411 0x0040);
3412
3413 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3414 EXT_PHY_OPT_PMA_PMD_DEVAD,
3415 EXT_PHY_OPT_CNTL2,
3416 0x000D);
3417 }
3418
3419 /* Enable LASI */
3420 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3421 EXT_PHY_OPT_PMA_PMD_DEVAD,
3422 EXT_PHY_OPT_LASI_CNTL,
3423 0x1);
3424 } else {
3425 /* AUTONEG */
3426 /* Allow CL37 through CL73 */
3427 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3428 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3429 EXT_PHY_AUTO_NEG_DEVAD,
3430 EXT_PHY_OPT_AN_CL37_CL73,
3431 0x040c);
3432
3433 /* Enable Full-Duplex advertisment on CL37 */
3434 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3435 EXT_PHY_AUTO_NEG_DEVAD,
3436 EXT_PHY_OPT_AN_CL37_FD,
3437 0x0020);
3438 /* Enable CL37 AN */
3439 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3440 EXT_PHY_AUTO_NEG_DEVAD,
3441 EXT_PHY_OPT_AN_CL37_AN,
3442 0x1000);
3443 /* Advertise 10G/1G support */
3444 if (bp->advertising &
3445 ADVERTISED_1000baseT_Full)
3446 val = (1<<5);
3447 if (bp->advertising &
3448 ADVERTISED_10000baseT_Full)
3449 val |= (1<<7);
3450
3451 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3452 EXT_PHY_AUTO_NEG_DEVAD,
3453 EXT_PHY_OPT_AN_ADV, val);
3454 /* Enable LASI */
3455 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3456 EXT_PHY_OPT_PMA_PMD_DEVAD,
3457 EXT_PHY_OPT_LASI_CNTL,
3458 0x1);
3459
3460 /* Enable clause 73 AN */
3461 bnx2x_mdio45_write(bp, ext_phy_addr,
3462 EXT_PHY_AUTO_NEG_DEVAD,
3463 EXT_PHY_OPT_CNTL,
3464 0x1200);
3465 }
3466 break;
3467
3468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3469 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3470 /* Wait for soft reset to get cleared upto 1 sec */
3471 for (cnt = 0; cnt < 1000; cnt++) {
3472 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3473 ext_phy_addr,
3474 EXT_PHY_OPT_PMA_PMD_DEVAD,
3475 EXT_PHY_OPT_CNTL, &ctrl);
3476 if (!(ctrl & (1<<15)))
3477 break;
3478 msleep(1);
3479 }
3480 DP(NETIF_MSG_LINK,
3481 "8072 control reg 0x%x (after %d ms)\n",
3482 ctrl, cnt);
3483
3484 bnx2x_bcm8072_external_rom_boot(bp);
3485 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3486
3487 /* enable LASI */
3488 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3489 ext_phy_addr,
3490 EXT_PHY_KR_PMA_PMD_DEVAD,
3491 0x9000, 0x0400);
3492 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3493 ext_phy_addr,
3494 EXT_PHY_KR_PMA_PMD_DEVAD,
3495 EXT_PHY_KR_LASI_CNTL, 0x0004);
3496
3497 /* If this is forced speed, set to KR or KX
3498 * (all other are not supported)
3499 */
3500 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3501 if (bp->req_line_speed == SPEED_10000) {
3502 bnx2x_bcm8072_force_10G(bp);
3503 DP(NETIF_MSG_LINK,
3504 "Forced speed 10G on 8072\n");
3505 /* unlock */
3506 bnx2x_hw_unlock(bp,
3507 HW_LOCK_RESOURCE_8072_MDIO);
3508 break;
3509 } else
3510 val = (1<<5);
3511 } else {
3512
3513 /* Advertise 10G/1G support */
3514 if (bp->advertising &
3515 ADVERTISED_1000baseT_Full)
3516 val = (1<<5);
3517 if (bp->advertising &
3518 ADVERTISED_10000baseT_Full)
3519 val |= (1<<7);
3520 }
3521 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3522 ext_phy_addr,
3523 EXT_PHY_KR_AUTO_NEG_DEVAD,
3524 0x11, val);
3525 /* Add support for CL37 ( passive mode ) I */
3526 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3527 ext_phy_addr,
3528 EXT_PHY_KR_AUTO_NEG_DEVAD,
3529 0x8370, 0x040c);
3530 /* Add support for CL37 ( passive mode ) II */
3531 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3532 ext_phy_addr,
3533 EXT_PHY_KR_AUTO_NEG_DEVAD,
3534 0xffe4, 0x20);
3535 /* Add support for CL37 ( passive mode ) III */
3536 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3537 ext_phy_addr,
3538 EXT_PHY_KR_AUTO_NEG_DEVAD,
3539 0xffe0, 0x1000);
3540 /* Restart autoneg */
3541 msleep(500);
3542 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3543 ext_phy_addr,
3544 EXT_PHY_KR_AUTO_NEG_DEVAD,
3545 EXT_PHY_KR_CTRL, 0x1200);
3546 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3547 "1G %ssupported 10G %ssupported\n",
3548 (val & (1<<5)) ? "" : "not ",
3549 (val & (1<<7)) ? "" : "not ");
3550
3551 /* unlock */
3552 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3553 break;
3554
3555 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3556 DP(NETIF_MSG_LINK,
3557 "Setting the SFX7101 LASI indication\n");
3558 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3559 EXT_PHY_OPT_PMA_PMD_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003560 EXT_PHY_OPT_LASI_CNTL, 0x1);
Eliezer Tamirf1410642008-02-28 11:51:50 -08003561 DP(NETIF_MSG_LINK,
3562 "Setting the SFX7101 LED to blink on traffic\n");
3563 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3564 EXT_PHY_OPT_PMA_PMD_DEVAD,
3565 0xC007, (1<<3));
3566
3567 /* read modify write pause advertizing */
3568 bnx2x_mdio45_read(bp, ext_phy_addr,
3569 EXT_PHY_KR_AUTO_NEG_DEVAD,
3570 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3571 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3572 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3573 if (bp->advertising & ADVERTISED_Pause)
3574 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3575
3576 if (bp->advertising & ADVERTISED_Asym_Pause) {
3577 val |=
3578 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3579 }
3580 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3581 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3582 EXT_PHY_KR_AUTO_NEG_DEVAD,
3583 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3584 /* Restart autoneg */
3585 bnx2x_mdio45_read(bp, ext_phy_addr,
3586 EXT_PHY_KR_AUTO_NEG_DEVAD,
3587 EXT_PHY_KR_CTRL, &val);
3588 val |= 0x200;
3589 bnx2x_mdio45_write(bp, ext_phy_addr,
3590 EXT_PHY_KR_AUTO_NEG_DEVAD,
3591 EXT_PHY_KR_CTRL, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003592 break;
3593
3594 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08003595 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3596 bp->ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003597 break;
3598 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003599
3600 } else { /* SerDes */
Eliezer Tamirf1410642008-02-28 11:51:50 -08003601/* ext_phy_addr = ((bp->ext_phy_config &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003602 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3603 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3604*/
3605 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3606 switch (ext_phy_type) {
3607 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3608 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3609 break;
3610
3611 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3612 DP(NETIF_MSG_LINK, "SerDes 5482\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003613 break;
3614
3615 default:
3616 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3617 bp->ext_phy_config);
3618 break;
3619 }
3620 }
3621}
3622
3623static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3624{
3625 u32 ext_phy_type;
Eliezer Tamirf1410642008-02-28 11:51:50 -08003626 u32 ext_phy_addr = ((bp->ext_phy_config &
3627 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3628 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3629 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3630
3631 /* The PHY reset is controled by GPIO 1
3632 * Give it 1ms of reset pulse
3633 */
3634 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3635 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3636 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3637 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3638 msleep(1);
3639 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3640 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3641 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003642
3643 if (bp->phy_flags & PHY_XGXS_FLAG) {
3644 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3645 switch (ext_phy_type) {
3646 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3647 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3648 break;
3649
3650 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
Eliezer Tamirf1410642008-02-28 11:51:50 -08003652 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3653 bnx2x_mdio45_write(bp, ext_phy_addr,
3654 EXT_PHY_OPT_PMA_PMD_DEVAD,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003655 EXT_PHY_OPT_CNTL, 0xa040);
Eliezer Tamirf1410642008-02-28 11:51:50 -08003656 break;
3657
3658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3659 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3660 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3661 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3662 ext_phy_addr,
3663 EXT_PHY_KR_PMA_PMD_DEVAD,
3664 0, 1<<15);
3665 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3666 break;
3667
3668 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3669 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003670 break;
3671
3672 default:
3673 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3674 bp->ext_phy_config);
3675 break;
3676 }
3677
3678 } else { /* SerDes */
3679 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3680 switch (ext_phy_type) {
3681 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3682 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3683 break;
3684
3685 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3686 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3687 break;
3688
3689 default:
3690 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3691 bp->ext_phy_config);
3692 break;
3693 }
3694 }
3695}
3696
3697static void bnx2x_link_initialize(struct bnx2x *bp)
3698{
3699 int port = bp->port;
3700
3701 /* disable attentions */
3702 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3703 (NIG_MASK_XGXS0_LINK_STATUS |
3704 NIG_MASK_XGXS0_LINK10G |
3705 NIG_MASK_SERDES0_LINK_STATUS |
3706 NIG_MASK_MI_INT));
3707
Eliezer Tamirf1410642008-02-28 11:51:50 -08003708 /* Activate the external PHY */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003709 bnx2x_ext_phy_reset(bp);
3710
3711 bnx2x_set_aer_mmd(bp);
3712
3713 if (bp->phy_flags & PHY_XGXS_FLAG)
3714 bnx2x_set_master_ln(bp);
3715
3716 /* reset the SerDes and wait for reset bit return low */
3717 bnx2x_reset_unicore(bp);
3718
3719 bnx2x_set_aer_mmd(bp);
3720
3721 /* setting the masterLn_def again after the reset */
3722 if (bp->phy_flags & PHY_XGXS_FLAG) {
3723 bnx2x_set_master_ln(bp);
3724 bnx2x_set_swap_lanes(bp);
3725 }
3726
3727 /* Set Parallel Detect */
3728 if (bp->req_autoneg & AUTONEG_SPEED)
3729 bnx2x_set_parallel_detection(bp);
3730
3731 if (bp->phy_flags & PHY_XGXS_FLAG) {
3732 if (bp->req_line_speed &&
3733 bp->req_line_speed < SPEED_1000) {
3734 bp->phy_flags |= PHY_SGMII_FLAG;
3735 } else {
3736 bp->phy_flags &= ~PHY_SGMII_FLAG;
3737 }
3738 }
3739
3740 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3741 u16 bank, rx_eq;
3742
3743 rx_eq = ((bp->serdes_config &
3744 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3745 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3746
3747 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3748 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3749 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3750 MDIO_SET_REG_BANK(bp, bank);
3751 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3752 ((rx_eq &
3753 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3754 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3755 }
3756
3757 /* forced speed requested? */
3758 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3759 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3760
3761 /* disable autoneg */
3762 bnx2x_set_autoneg(bp);
3763
3764 /* program speed and duplex */
3765 bnx2x_program_serdes(bp);
3766
3767 } else { /* AN_mode */
3768 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3769
3770 /* AN enabled */
3771 bnx2x_set_brcm_cl37_advertisment(bp);
3772
Eliezer Tamirc14423f2008-02-28 11:49:42 -08003773 /* program duplex & pause advertisement (for aneg) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003774 bnx2x_set_ieee_aneg_advertisment(bp);
3775
3776 /* enable autoneg */
3777 bnx2x_set_autoneg(bp);
3778
Eliezer Tamirc14423f2008-02-28 11:49:42 -08003779 /* enable and restart AN */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003780 bnx2x_restart_autoneg(bp);
3781 }
3782
3783 } else { /* SGMII mode */
3784 DP(NETIF_MSG_LINK, "SGMII\n");
3785
3786 bnx2x_initialize_sgmii_process(bp);
3787 }
3788
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003789 /* init ext phy and enable link state int */
3790 bnx2x_ext_phy_init(bp);
Eliezer Tamirf1410642008-02-28 11:51:50 -08003791
3792 /* enable the interrupt */
3793 bnx2x_link_int_enable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003794}
3795
3796static void bnx2x_phy_deassert(struct bnx2x *bp)
3797{
3798 int port = bp->port;
3799 u32 val;
3800
3801 if (bp->phy_flags & PHY_XGXS_FLAG) {
3802 DP(NETIF_MSG_LINK, "XGXS\n");
3803 val = XGXS_RESET_BITS;
3804
3805 } else { /* SerDes */
3806 DP(NETIF_MSG_LINK, "SerDes\n");
3807 val = SERDES_RESET_BITS;
3808 }
3809
3810 val = val << (port*16);
3811
3812 /* reset and unreset the SerDes/XGXS */
3813 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3814 msleep(5);
3815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3816}
3817
3818static int bnx2x_phy_init(struct bnx2x *bp)
3819{
3820 DP(NETIF_MSG_LINK, "started\n");
3821 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3822 bp->phy_flags |= PHY_EMAC_FLAG;
3823 bp->link_up = 1;
3824 bp->line_speed = SPEED_10000;
3825 bp->duplex = DUPLEX_FULL;
3826 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3827 bnx2x_emac_enable(bp);
3828 bnx2x_link_report(bp);
3829 return 0;
3830
3831 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3832 bp->phy_flags |= PHY_BMAC_FLAG;
3833 bp->link_up = 1;
3834 bp->line_speed = SPEED_10000;
3835 bp->duplex = DUPLEX_FULL;
3836 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3837 bnx2x_bmac_enable(bp, 0);
3838 bnx2x_link_report(bp);
3839 return 0;
3840
3841 } else {
3842 bnx2x_phy_deassert(bp);
3843 bnx2x_link_initialize(bp);
3844 }
3845
3846 return 0;
3847}
3848
3849static void bnx2x_link_reset(struct bnx2x *bp)
3850{
3851 int port = bp->port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08003852 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3853
3854 /* update shared memory */
3855 bp->link_status = 0;
3856 bnx2x_update_mng(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003857
3858 /* disable attentions */
3859 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3860 (NIG_MASK_XGXS0_LINK_STATUS |
3861 NIG_MASK_XGXS0_LINK10G |
3862 NIG_MASK_SERDES0_LINK_STATUS |
3863 NIG_MASK_MI_INT));
3864
Eliezer Tamirf1410642008-02-28 11:51:50 -08003865 /* activate nig drain */
3866 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3867
3868 /* disable nig egress interface */
3869 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3870 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3871
3872 /* Stop BigMac rx */
3873 bnx2x_bmac_rx_disable(bp);
3874
3875 /* disable emac */
3876 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3877
3878 msleep(10);
3879
3880 /* The PHY reset is controled by GPIO 1
3881 * Hold it as output low
3882 */
3883 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3884 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3885 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3886 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3887 DP(NETIF_MSG_LINK, "reset external PHY\n");
3888 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003889
3890 /* reset the SerDes/XGXS */
3891 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3892 (0x1ff << (port*16)));
3893
Eliezer Tamirf1410642008-02-28 11:51:50 -08003894 /* reset BigMac */
3895 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3896 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3897
3898 /* disable nig ingress interface */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003899 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003900 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003901
Eliezer Tamirf1410642008-02-28 11:51:50 -08003902 /* set link down */
3903 bp->link_up = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003904}
3905
3906#ifdef BNX2X_XGXS_LB
3907static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3908{
3909 int port = bp->port;
3910
3911 if (is_10g) {
3912 u32 md_devad;
3913
3914 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3915
3916 /* change the uni_phy_addr in the nig */
3917 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3918 &md_devad);
3919 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3920
3921 /* change the aer mmd */
3922 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3923 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3924
3925 /* config combo IEEE0 control reg for loopback */
3926 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3927 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3928 0x6041);
3929
3930 /* set aer mmd back */
3931 bnx2x_set_aer_mmd(bp);
3932
3933 /* and md_devad */
3934 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3935
3936 } else {
3937 u32 mii_control;
3938
3939 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3940
3941 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3942 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3943 &mii_control);
3944 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3945 (mii_control |
3946 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3947 }
3948}
3949#endif
3950
3951/* end of PHY/MAC */
3952
3953/* slow path */
3954
3955/*
3956 * General service functions
3957 */
3958
3959/* the slow path queue is odd since completions arrive on the fastpath ring */
3960static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3961 u32 data_hi, u32 data_lo, int common)
3962{
3963 int port = bp->port;
3964
3965 DP(NETIF_MSG_TIMER,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08003966 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003967 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3968 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3969 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3970
3971#ifdef BNX2X_STOP_ON_ERROR
3972 if (unlikely(bp->panic))
3973 return -EIO;
3974#endif
3975
3976 spin_lock(&bp->spq_lock);
3977
3978 if (!bp->spq_left) {
3979 BNX2X_ERR("BUG! SPQ ring full!\n");
3980 spin_unlock(&bp->spq_lock);
3981 bnx2x_panic();
3982 return -EBUSY;
3983 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08003984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003985 /* CID needs port number to be encoded int it */
3986 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3987 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3988 HW_CID(bp, cid)));
3989 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3990 if (common)
3991 bp->spq_prod_bd->hdr.type |=
3992 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3993
3994 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3995 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3996
3997 bp->spq_left--;
3998
3999 if (bp->spq_prod_bd == bp->spq_last_bd) {
4000 bp->spq_prod_bd = bp->spq;
4001 bp->spq_prod_idx = 0;
4002 DP(NETIF_MSG_TIMER, "end of spq\n");
4003
4004 } else {
4005 bp->spq_prod_bd++;
4006 bp->spq_prod_idx++;
4007 }
4008
4009 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4010 bp->spq_prod_idx);
4011
4012 spin_unlock(&bp->spq_lock);
4013 return 0;
4014}
4015
4016/* acquire split MCP access lock register */
4017static int bnx2x_lock_alr(struct bnx2x *bp)
4018{
4019 int rc = 0;
4020 u32 i, j, val;
4021
4022 might_sleep();
4023 i = 100;
4024 for (j = 0; j < i*10; j++) {
4025 val = (1UL << 31);
4026 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4027 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4028 if (val & (1L << 31))
4029 break;
4030
4031 msleep(5);
4032 }
4033
4034 if (!(val & (1L << 31))) {
4035 BNX2X_ERR("Cannot acquire nvram interface\n");
4036
4037 rc = -EBUSY;
4038 }
4039
4040 return rc;
4041}
4042
4043/* Release split MCP access lock register */
4044static void bnx2x_unlock_alr(struct bnx2x *bp)
4045{
4046 u32 val = 0;
4047
4048 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4049}
4050
4051static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4052{
4053 struct host_def_status_block *def_sb = bp->def_status_blk;
4054 u16 rc = 0;
4055
4056 barrier(); /* status block is written to by the chip */
4057
4058 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4059 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4060 rc |= 1;
4061 }
4062 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4063 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4064 rc |= 2;
4065 }
4066 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4067 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4068 rc |= 4;
4069 }
4070 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4071 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4072 rc |= 8;
4073 }
4074 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4075 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4076 rc |= 16;
4077 }
4078 return rc;
4079}
4080
4081/*
4082 * slow path service functions
4083 */
4084
4085static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4086{
4087 int port = bp->port;
4088 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4089 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4090 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004091 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4092 NIG_REG_MASK_INTERRUPT_PORT0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004093
4094 if (~bp->aeu_mask & (asserted & 0xff))
4095 BNX2X_ERR("IGU ERROR\n");
4096 if (bp->attn_state & asserted)
4097 BNX2X_ERR("IGU ERROR\n");
4098
4099 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4100 bp->aeu_mask, asserted);
4101 bp->aeu_mask &= ~(asserted & 0xff);
4102 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4103
4104 REG_WR(bp, aeu_addr, bp->aeu_mask);
4105
4106 bp->attn_state |= asserted;
4107
4108 if (asserted & ATTN_HARD_WIRED_MASK) {
4109 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004110
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004111 /* save nig interrupt mask */
4112 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
4113 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004114
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004115 bnx2x_link_update(bp);
4116
4117 /* handle unicore attn? */
4118 }
4119 if (asserted & ATTN_SW_TIMER_4_FUNC)
4120 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4121
4122 if (asserted & GPIO_2_FUNC)
4123 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4124
4125 if (asserted & GPIO_3_FUNC)
4126 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4127
4128 if (asserted & GPIO_4_FUNC)
4129 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4130
4131 if (port == 0) {
4132 if (asserted & ATTN_GENERAL_ATTN_1) {
4133 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4135 }
4136 if (asserted & ATTN_GENERAL_ATTN_2) {
4137 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4138 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4139 }
4140 if (asserted & ATTN_GENERAL_ATTN_3) {
4141 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4143 }
4144 } else {
4145 if (asserted & ATTN_GENERAL_ATTN_4) {
4146 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4147 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4148 }
4149 if (asserted & ATTN_GENERAL_ATTN_5) {
4150 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4151 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4152 }
4153 if (asserted & ATTN_GENERAL_ATTN_6) {
4154 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4155 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4156 }
4157 }
4158
4159 } /* if hardwired */
4160
4161 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4162 asserted, BAR_IGU_INTMEM + igu_addr);
4163 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4164
4165 /* now set back the mask */
4166 if (asserted & ATTN_NIG_FOR_FUNC)
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004167 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
4168}
4169
4170static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4171{
4172 int port = bp->port;
4173 int reg_offset;
4174 u32 val;
4175
4176 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4177
4178 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4179 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4180
4181 val = REG_RD(bp, reg_offset);
4182 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4183 REG_WR(bp, reg_offset, val);
4184
4185 BNX2X_ERR("SPIO5 hw attention\n");
4186
4187 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4188 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4189 /* Fan failure attention */
4190
4191 /* The PHY reset is controled by GPIO 1 */
4192 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4193 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4194 /* Low power mode is controled by GPIO 2 */
4195 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4196 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4197 /* mark the failure */
4198 bp->ext_phy_config &=
4199 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4200 bp->ext_phy_config |=
4201 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4202 SHMEM_WR(bp,
4203 dev_info.port_hw_config[port].
4204 external_phy_config,
4205 bp->ext_phy_config);
4206 /* log the failure */
4207 printk(KERN_ERR PFX "Fan Failure on Network"
4208 " Controller %s has caused the driver to"
4209 " shutdown the card to prevent permanent"
4210 " damage. Please contact Dell Support for"
4211 " assistance\n", bp->dev->name);
4212 break;
4213
4214 default:
4215 break;
4216 }
4217 }
4218}
4219
4220static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4221{
4222 u32 val;
4223
4224 if (attn & BNX2X_DOORQ_ASSERT) {
4225
4226 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4227 BNX2X_ERR("DB hw attention 0x%x\n", val);
4228 /* DORQ discard attention */
4229 if (val & 0x2)
4230 BNX2X_ERR("FATAL error from DORQ\n");
4231 }
4232}
4233
4234static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235{
4236 u32 val;
4237
4238 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4239
4240 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4241 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4242 /* CFC error attention */
4243 if (val & 0x2)
4244 BNX2X_ERR("FATAL error from CFC\n");
4245 }
4246
4247 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4248
4249 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4250 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4251 /* RQ_USDMDP_FIFO_OVERFLOW */
4252 if (val & 0x18000)
4253 BNX2X_ERR("FATAL error from PXP\n");
4254 }
4255}
4256
4257static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4258{
4259 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4260
4261 if (attn & BNX2X_MC_ASSERT_BITS) {
4262
4263 BNX2X_ERR("MC assert!\n");
4264 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4266 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4267 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4268 bnx2x_panic();
4269
4270 } else if (attn & BNX2X_MCP_ASSERT) {
4271
4272 BNX2X_ERR("MCP assert!\n");
4273 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4274 bnx2x_mc_assert(bp);
4275
4276 } else
4277 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4278 }
4279
4280 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4281
4282 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4283 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
4284 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004285}
4286
4287static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4288{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004289 struct attn_route attn;
4290 struct attn_route group_mask;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004291 int port = bp->port;
4292 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004293 u32 reg_addr;
4294 u32 val;
4295
4296 /* need to take HW lock because MCP or other port might also
4297 try to handle this event */
4298 bnx2x_lock_alr(bp);
4299
4300 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4301 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4302 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4303 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4304 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4305
4306 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4307 if (deasserted & (1 << index)) {
4308 group_mask = bp->attn_group[index];
4309
4310 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4311 (unsigned long long)group_mask.sig[0]);
4312
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004313 bnx2x_attn_int_deasserted3(bp,
4314 attn.sig[3] & group_mask.sig[3]);
4315 bnx2x_attn_int_deasserted1(bp,
4316 attn.sig[1] & group_mask.sig[1]);
4317 bnx2x_attn_int_deasserted2(bp,
4318 attn.sig[2] & group_mask.sig[2]);
4319 bnx2x_attn_int_deasserted0(bp,
4320 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004321
4322 if ((attn.sig[0] & group_mask.sig[0] &
4323 HW_INTERRUT_ASSERT_SET_0) ||
4324 (attn.sig[1] & group_mask.sig[1] &
4325 HW_INTERRUT_ASSERT_SET_1) ||
4326 (attn.sig[2] & group_mask.sig[2] &
4327 HW_INTERRUT_ASSERT_SET_2))
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004328 BNX2X_ERR("FATAL HW block attention"
4329 " set0 0x%x set1 0x%x"
4330 " set2 0x%x\n",
4331 (attn.sig[0] & group_mask.sig[0] &
4332 HW_INTERRUT_ASSERT_SET_0),
4333 (attn.sig[1] & group_mask.sig[1] &
4334 HW_INTERRUT_ASSERT_SET_1),
4335 (attn.sig[2] & group_mask.sig[2] &
4336 HW_INTERRUT_ASSERT_SET_2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004337
4338 if ((attn.sig[0] & group_mask.sig[0] &
4339 HW_PRTY_ASSERT_SET_0) ||
4340 (attn.sig[1] & group_mask.sig[1] &
4341 HW_PRTY_ASSERT_SET_1) ||
4342 (attn.sig[2] & group_mask.sig[2] &
4343 HW_PRTY_ASSERT_SET_2))
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004344 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004345 }
4346 }
4347
4348 bnx2x_unlock_alr(bp);
4349
4350 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4351
4352 val = ~deasserted;
4353/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4354 val, BAR_IGU_INTMEM + reg_addr); */
4355 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4356
4357 if (bp->aeu_mask & (deasserted & 0xff))
4358 BNX2X_ERR("IGU BUG\n");
4359 if (~bp->attn_state & deasserted)
4360 BNX2X_ERR("IGU BUG\n");
4361
4362 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4363 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4364
4365 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4366 bp->aeu_mask |= (deasserted & 0xff);
4367
4368 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4369 REG_WR(bp, reg_addr, bp->aeu_mask);
4370
4371 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4372 bp->attn_state &= ~deasserted;
4373 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4374}
4375
4376static void bnx2x_attn_int(struct bnx2x *bp)
4377{
4378 /* read local copy of bits */
4379 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4380 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4381 u32 attn_state = bp->attn_state;
4382
4383 /* look for changed bits */
4384 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4385 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4386
4387 DP(NETIF_MSG_HW,
4388 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4389 attn_bits, attn_ack, asserted, deasserted);
4390
4391 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4392 BNX2X_ERR("bad attention state\n");
4393
4394 /* handle bits that were raised */
4395 if (asserted)
4396 bnx2x_attn_int_asserted(bp, asserted);
4397
4398 if (deasserted)
4399 bnx2x_attn_int_deasserted(bp, deasserted);
4400}
4401
4402static void bnx2x_sp_task(struct work_struct *work)
4403{
4404 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4405 u16 status;
4406
4407 /* Return here if interrupt is disabled */
4408 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004409 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004410 return;
4411 }
4412
4413 status = bnx2x_update_dsb_idx(bp);
4414 if (status == 0)
4415 BNX2X_ERR("spurious slowpath interrupt!\n");
4416
4417 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4418
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004419 /* HW attentions */
4420 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004421 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004422
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004423 /* CStorm events: query_stats, port delete ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004424 if (status & 0x2)
4425 bp->stat_pending = 0;
4426
4427 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4428 IGU_INT_NOP, 1);
4429 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4430 IGU_INT_NOP, 1);
4431 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4432 IGU_INT_NOP, 1);
4433 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4434 IGU_INT_NOP, 1);
4435 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4436 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004437
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004438}
4439
4440static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4441{
4442 struct net_device *dev = dev_instance;
4443 struct bnx2x *bp = netdev_priv(dev);
4444
4445 /* Return here if interrupt is disabled */
4446 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004447 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004448 return IRQ_HANDLED;
4449 }
4450
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08004451 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004452
4453#ifdef BNX2X_STOP_ON_ERROR
4454 if (unlikely(bp->panic))
4455 return IRQ_HANDLED;
4456#endif
4457
4458 schedule_work(&bp->sp_task);
4459
4460 return IRQ_HANDLED;
4461}
4462
4463/* end of slow path */
4464
4465/* Statistics */
4466
4467/****************************************************************************
4468* Macros
4469****************************************************************************/
4470
4471#define UPDATE_STAT(s, t) \
4472 do { \
4473 estats->t += new->s - old->s; \
4474 old->s = new->s; \
4475 } while (0)
4476
4477/* sum[hi:lo] += add[hi:lo] */
4478#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4479 do { \
4480 s_lo += a_lo; \
4481 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4482 } while (0)
4483
4484/* difference = minuend - subtrahend */
4485#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4486 do { \
4487 if (m_lo < s_lo) { /* underflow */ \
4488 d_hi = m_hi - s_hi; \
4489 if (d_hi > 0) { /* we can 'loan' 1 */ \
4490 d_hi--; \
4491 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4492 } else { /* m_hi <= s_hi */ \
4493 d_hi = 0; \
4494 d_lo = 0; \
4495 } \
4496 } else { /* m_lo >= s_lo */ \
4497 if (m_hi < s_hi) { \
4498 d_hi = 0; \
4499 d_lo = 0; \
4500 } else { /* m_hi >= s_hi */ \
4501 d_hi = m_hi - s_hi; \
4502 d_lo = m_lo - s_lo; \
4503 } \
4504 } \
4505 } while (0)
4506
4507/* minuend -= subtrahend */
4508#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4509 do { \
4510 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4511 } while (0)
4512
4513#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4514 do { \
4515 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4516 diff.lo, new->s_lo, old->s_lo); \
4517 old->s_hi = new->s_hi; \
4518 old->s_lo = new->s_lo; \
4519 ADD_64(estats->t_hi, diff.hi, \
4520 estats->t_lo, diff.lo); \
4521 } while (0)
4522
4523/* sum[hi:lo] += add */
4524#define ADD_EXTEND_64(s_hi, s_lo, a) \
4525 do { \
4526 s_lo += a; \
4527 s_hi += (s_lo < a) ? 1 : 0; \
4528 } while (0)
4529
4530#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4531 do { \
4532 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4533 } while (0)
4534
4535#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4536 do { \
4537 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4538 old_tclient->s = le32_to_cpu(tclient->s); \
4539 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4540 } while (0)
4541
4542/*
4543 * General service functions
4544 */
4545
4546static inline long bnx2x_hilo(u32 *hiref)
4547{
4548 u32 lo = *(hiref + 1);
4549#if (BITS_PER_LONG == 64)
4550 u32 hi = *hiref;
4551
4552 return HILO_U64(hi, lo);
4553#else
4554 return lo;
4555#endif
4556}
4557
4558/*
4559 * Init service functions
4560 */
4561
4562static void bnx2x_init_mac_stats(struct bnx2x *bp)
4563{
4564 struct dmae_command *dmae;
4565 int port = bp->port;
4566 int loader_idx = port * 8;
4567 u32 opcode;
4568 u32 mac_addr;
4569
4570 bp->executer_idx = 0;
4571 if (bp->fw_mb) {
4572 /* MCP */
4573 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4574 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4575#ifdef __BIG_ENDIAN
4576 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4577#else
4578 DMAE_CMD_ENDIANITY_DW_SWAP |
4579#endif
4580 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4581
4582 if (bp->link_up)
4583 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4584
4585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4586 dmae->opcode = opcode;
4587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4588 sizeof(u32));
4589 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4590 sizeof(u32));
4591 dmae->dst_addr_lo = bp->fw_mb >> 2;
4592 dmae->dst_addr_hi = 0;
4593 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4594 sizeof(u32)) >> 2;
4595 if (bp->link_up) {
4596 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4597 dmae->comp_addr_hi = 0;
4598 dmae->comp_val = 1;
4599 } else {
4600 dmae->comp_addr_lo = 0;
4601 dmae->comp_addr_hi = 0;
4602 dmae->comp_val = 0;
4603 }
4604 }
4605
4606 if (!bp->link_up) {
4607 /* no need to collect statistics in link down */
4608 return;
4609 }
4610
4611 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4612 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4613 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4614#ifdef __BIG_ENDIAN
4615 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4616#else
4617 DMAE_CMD_ENDIANITY_DW_SWAP |
4618#endif
4619 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4620
4621 if (bp->phy_flags & PHY_BMAC_FLAG) {
4622
4623 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4624 NIG_REG_INGRESS_BMAC0_MEM);
4625
4626 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4627 BIGMAC_REGISTER_TX_STAT_GTBYT */
4628 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4629 dmae->opcode = opcode;
4630 dmae->src_addr_lo = (mac_addr +
4631 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4632 dmae->src_addr_hi = 0;
4633 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4634 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4635 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4636 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4637 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4638 dmae->comp_addr_hi = 0;
4639 dmae->comp_val = 1;
4640
4641 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4642 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4643 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4644 dmae->opcode = opcode;
4645 dmae->src_addr_lo = (mac_addr +
4646 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4647 dmae->src_addr_hi = 0;
4648 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4649 offsetof(struct bmac_stats, rx_gr64));
4650 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4651 offsetof(struct bmac_stats, rx_gr64));
4652 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4653 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4654 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4655 dmae->comp_addr_hi = 0;
4656 dmae->comp_val = 1;
4657
4658 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4659
4660 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4661
4662 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4663 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4664 dmae->opcode = opcode;
4665 dmae->src_addr_lo = (mac_addr +
4666 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4667 dmae->src_addr_hi = 0;
4668 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4669 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4670 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4671 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4672 dmae->comp_addr_hi = 0;
4673 dmae->comp_val = 1;
4674
4675 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4676 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4677 dmae->opcode = opcode;
4678 dmae->src_addr_lo = (mac_addr +
4679 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4680 dmae->src_addr_hi = 0;
4681 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4682 offsetof(struct emac_stats,
4683 rx_falsecarriererrors));
4684 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4685 offsetof(struct emac_stats,
4686 rx_falsecarriererrors));
4687 dmae->len = 1;
4688 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4689 dmae->comp_addr_hi = 0;
4690 dmae->comp_val = 1;
4691
4692 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4693 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4694 dmae->opcode = opcode;
4695 dmae->src_addr_lo = (mac_addr +
4696 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4697 dmae->src_addr_hi = 0;
4698 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4699 offsetof(struct emac_stats,
4700 tx_ifhcoutoctets));
4701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4702 offsetof(struct emac_stats,
4703 tx_ifhcoutoctets));
4704 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4705 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4706 dmae->comp_addr_hi = 0;
4707 dmae->comp_val = 1;
4708 }
4709
4710 /* NIG */
4711 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4712 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4713 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4714 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4715#ifdef __BIG_ENDIAN
4716 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4717#else
4718 DMAE_CMD_ENDIANITY_DW_SWAP |
4719#endif
4720 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4721 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4722 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4723 dmae->src_addr_hi = 0;
4724 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4725 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4726 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4727 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4728 offsetof(struct nig_stats, done));
4729 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4730 offsetof(struct nig_stats, done));
4731 dmae->comp_val = 0xffffffff;
4732}
4733
4734static void bnx2x_init_stats(struct bnx2x *bp)
4735{
4736 int port = bp->port;
4737
4738 bp->stats_state = STATS_STATE_DISABLE;
4739 bp->executer_idx = 0;
4740
4741 bp->old_brb_discard = REG_RD(bp,
4742 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4743
4744 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4745 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4746 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4747
4748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4749 REG_WR(bp, BAR_XSTRORM_INTMEM +
4750 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4751
4752 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4753 REG_WR(bp, BAR_TSTRORM_INTMEM +
4754 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4755
4756 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4757 REG_WR(bp, BAR_CSTRORM_INTMEM +
4758 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4759
4760 REG_WR(bp, BAR_XSTRORM_INTMEM +
4761 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4762 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4763 REG_WR(bp, BAR_XSTRORM_INTMEM +
4764 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4765 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4766
4767 REG_WR(bp, BAR_TSTRORM_INTMEM +
4768 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4769 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4770 REG_WR(bp, BAR_TSTRORM_INTMEM +
4771 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4772 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4773}
4774
4775static void bnx2x_stop_stats(struct bnx2x *bp)
4776{
4777 might_sleep();
4778 if (bp->stats_state != STATS_STATE_DISABLE) {
4779 int timeout = 10;
4780
4781 bp->stats_state = STATS_STATE_STOP;
4782 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4783
4784 while (bp->stats_state != STATS_STATE_DISABLE) {
4785 if (!timeout) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004786 BNX2X_ERR("timeout waiting for stats stop\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004787 break;
4788 }
4789 timeout--;
4790 msleep(100);
4791 }
4792 }
4793 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4794}
4795
4796/*
4797 * Statistics service functions
4798 */
4799
4800static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4801{
4802 struct regp diff;
4803 struct regp sum;
4804 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4805 struct bmac_stats *old = &bp->old_bmac;
4806 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4807
4808 sum.hi = 0;
4809 sum.lo = 0;
4810
4811 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4812 tx_gtbyt.lo, total_bytes_transmitted_lo);
4813
4814 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4815 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4816 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4817
4818 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4819 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4820 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4821
4822 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4823 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4824 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4825 estats->total_unicast_packets_transmitted_lo, sum.lo);
4826
4827 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4828 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4829 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4830 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4831 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4832 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4833 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4834 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4835 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4836 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4837 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4838
4839 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4840 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4841 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4842 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4843 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4844 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4845 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4846 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4847
4848 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4849 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4850 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4851 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4852 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4853 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4854 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4855}
4856
4857static void bnx2x_update_emac_stats(struct bnx2x *bp)
4858{
4859 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4860 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4861
4862 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4863 total_bytes_transmitted_lo);
4864 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4865 total_unicast_packets_transmitted_hi,
4866 total_unicast_packets_transmitted_lo);
4867 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4868 total_multicast_packets_transmitted_hi,
4869 total_multicast_packets_transmitted_lo);
4870 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4871 total_broadcast_packets_transmitted_hi,
4872 total_broadcast_packets_transmitted_lo);
4873
4874 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4875 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4876 estats->single_collision_transmit_frames +=
4877 new->tx_dot3statssinglecollisionframes;
4878 estats->multiple_collision_transmit_frames +=
4879 new->tx_dot3statsmultiplecollisionframes;
4880 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4881 estats->excessive_collision_frames +=
4882 new->tx_dot3statsexcessivecollisions;
4883 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4884 estats->frames_transmitted_65_127_bytes +=
4885 new->tx_etherstatspkts65octetsto127octets;
4886 estats->frames_transmitted_128_255_bytes +=
4887 new->tx_etherstatspkts128octetsto255octets;
4888 estats->frames_transmitted_256_511_bytes +=
4889 new->tx_etherstatspkts256octetsto511octets;
4890 estats->frames_transmitted_512_1023_bytes +=
4891 new->tx_etherstatspkts512octetsto1023octets;
4892 estats->frames_transmitted_1024_1522_bytes +=
4893 new->tx_etherstatspkts1024octetsto1522octet;
4894 estats->frames_transmitted_1523_9022_bytes +=
4895 new->tx_etherstatspktsover1522octets;
4896
4897 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4898 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4899 estats->false_carrier_detections += new->rx_falsecarriererrors;
4900 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4901 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4902 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4903 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4904 estats->control_frames_received += new->rx_maccontrolframesreceived;
4905 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4906 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4907
4908 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4909 stat_IfHCInBadOctets_lo);
4910 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4911 stat_IfHCOutBadOctets_lo);
4912 estats->stat_Dot3statsInternalMacTransmitErrors +=
4913 new->tx_dot3statsinternalmactransmiterrors;
4914 estats->stat_Dot3StatsCarrierSenseErrors +=
4915 new->rx_dot3statscarriersenseerrors;
4916 estats->stat_Dot3StatsDeferredTransmissions +=
4917 new->tx_dot3statsdeferredtransmissions;
4918 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4919 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4920}
4921
4922static int bnx2x_update_storm_stats(struct bnx2x *bp)
4923{
4924 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4925 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4926 struct tstorm_per_client_stats *tclient =
4927 &tstats->client_statistics[0];
4928 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4929 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4930 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4931 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4932 u32 diff;
4933
4934 /* are DMAE stats valid? */
4935 if (nstats->done != 0xffffffff) {
4936 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4937 return -1;
4938 }
4939
4940 /* are storm stats valid? */
4941 if (tstats->done.hi != 0xffffffff) {
4942 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4943 return -2;
4944 }
4945 if (xstats->done.hi != 0xffffffff) {
4946 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4947 return -3;
4948 }
4949
4950 estats->total_bytes_received_hi =
4951 estats->valid_bytes_received_hi =
4952 le32_to_cpu(tclient->total_rcv_bytes.hi);
4953 estats->total_bytes_received_lo =
4954 estats->valid_bytes_received_lo =
4955 le32_to_cpu(tclient->total_rcv_bytes.lo);
4956 ADD_64(estats->total_bytes_received_hi,
4957 le32_to_cpu(tclient->rcv_error_bytes.hi),
4958 estats->total_bytes_received_lo,
4959 le32_to_cpu(tclient->rcv_error_bytes.lo));
4960
4961 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4962 total_unicast_packets_received_hi,
4963 total_unicast_packets_received_lo);
4964 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4965 total_multicast_packets_received_hi,
4966 total_multicast_packets_received_lo);
4967 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4968 total_broadcast_packets_received_hi,
4969 total_broadcast_packets_received_lo);
4970
4971 estats->frames_received_64_bytes = MAC_STX_NA;
4972 estats->frames_received_65_127_bytes = MAC_STX_NA;
4973 estats->frames_received_128_255_bytes = MAC_STX_NA;
4974 estats->frames_received_256_511_bytes = MAC_STX_NA;
4975 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4976 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4977 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4978
4979 estats->x_total_sent_bytes_hi =
4980 le32_to_cpu(xstats->total_sent_bytes.hi);
4981 estats->x_total_sent_bytes_lo =
4982 le32_to_cpu(xstats->total_sent_bytes.lo);
4983 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4984
4985 estats->t_rcv_unicast_bytes_hi =
4986 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4987 estats->t_rcv_unicast_bytes_lo =
4988 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4989 estats->t_rcv_broadcast_bytes_hi =
4990 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4991 estats->t_rcv_broadcast_bytes_lo =
4992 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4993 estats->t_rcv_multicast_bytes_hi =
4994 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4995 estats->t_rcv_multicast_bytes_lo =
4996 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4997 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4998
4999 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
5000 estats->packets_too_big_discard =
5001 le32_to_cpu(tclient->packets_too_big_discard);
5002 estats->jabber_packets_received = estats->packets_too_big_discard +
5003 estats->stat_Dot3statsFramesTooLong;
5004 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
5005 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
5006 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
5007 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
5008 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
5009 estats->brb_truncate_discard =
5010 le32_to_cpu(tstats->brb_truncate_discard);
5011
5012 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
5013 bp->old_brb_discard = nstats->brb_discard;
5014
5015 estats->brb_packet = nstats->brb_packet;
5016 estats->brb_truncate = nstats->brb_truncate;
5017 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
5018 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
5019 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
5020 estats->mng_discard = nstats->mng_discard;
5021 estats->mng_octet_inp = nstats->mng_octet_inp;
5022 estats->mng_octet_out = nstats->mng_octet_out;
5023 estats->mng_packet_inp = nstats->mng_packet_inp;
5024 estats->mng_packet_out = nstats->mng_packet_out;
5025 estats->pbf_octets = nstats->pbf_octets;
5026 estats->pbf_packet = nstats->pbf_packet;
5027 estats->safc_inp = nstats->safc_inp;
5028
5029 xstats->done.hi = 0;
5030 tstats->done.hi = 0;
5031 nstats->done = 0;
5032
5033 return 0;
5034}
5035
5036static void bnx2x_update_net_stats(struct bnx2x *bp)
5037{
5038 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5039 struct net_device_stats *nstats = &bp->dev->stats;
5040
5041 nstats->rx_packets =
5042 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
5043 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
5044 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
5045
5046 nstats->tx_packets =
5047 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
5048 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
5049 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
5050
5051 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
5052
Eliezer Tamir0e39e642008-02-28 11:54:03 -08005053 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005054
Eliezer Tamir0e39e642008-02-28 11:54:03 -08005055 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005056 nstats->tx_dropped = 0;
5057
5058 nstats->multicast =
5059 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
5060
Eliezer Tamir0e39e642008-02-28 11:54:03 -08005061 nstats->collisions = estats->single_collision_transmit_frames +
5062 estats->multiple_collision_transmit_frames +
5063 estats->late_collision_frames +
5064 estats->excessive_collision_frames;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005065
5066 nstats->rx_length_errors = estats->runt_packets_received +
5067 estats->jabber_packets_received;
Eliezer Tamir0e39e642008-02-28 11:54:03 -08005068 nstats->rx_over_errors = estats->brb_discard +
5069 estats->brb_truncate_discard;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005070 nstats->rx_crc_errors = estats->crc_receive_errors;
5071 nstats->rx_frame_errors = estats->alignment_errors;
Eliezer Tamir0e39e642008-02-28 11:54:03 -08005072 nstats->rx_fifo_errors = estats->no_buff_discard;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005073 nstats->rx_missed_errors = estats->xxoverflow_discard;
5074
5075 nstats->rx_errors = nstats->rx_length_errors +
5076 nstats->rx_over_errors +
5077 nstats->rx_crc_errors +
5078 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08005079 nstats->rx_fifo_errors +
5080 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005081
5082 nstats->tx_aborted_errors = estats->late_collision_frames +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08005083 estats->excessive_collision_frames;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005084 nstats->tx_carrier_errors = estats->false_carrier_detections;
5085 nstats->tx_fifo_errors = 0;
5086 nstats->tx_heartbeat_errors = 0;
5087 nstats->tx_window_errors = 0;
5088
5089 nstats->tx_errors = nstats->tx_aborted_errors +
5090 nstats->tx_carrier_errors;
5091
5092 estats->mac_stx_start = ++estats->mac_stx_end;
5093}
5094
5095static void bnx2x_update_stats(struct bnx2x *bp)
5096{
5097 int i;
5098
5099 if (!bnx2x_update_storm_stats(bp)) {
5100
5101 if (bp->phy_flags & PHY_BMAC_FLAG) {
5102 bnx2x_update_bmac_stats(bp);
5103
5104 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5105 bnx2x_update_emac_stats(bp);
5106
5107 } else { /* unreached */
5108 BNX2X_ERR("no MAC active\n");
5109 return;
5110 }
5111
5112 bnx2x_update_net_stats(bp);
5113 }
5114
5115 if (bp->msglevel & NETIF_MSG_TIMER) {
5116 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5117 struct net_device_stats *nstats = &bp->dev->stats;
5118
5119 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5120 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5121 " tx pkt (%lx)\n",
5122 bnx2x_tx_avail(bp->fp),
5123 *bp->fp->tx_cons_sb, nstats->tx_packets);
5124 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5125 " rx pkt (%lx)\n",
5126 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5127 *bp->fp->rx_cons_sb, nstats->rx_packets);
5128 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5129 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5130 estats->driver_xoff, estats->brb_discard);
5131 printk(KERN_DEBUG "tstats: checksum_discard %u "
5132 "packets_too_big_discard %u no_buff_discard %u "
5133 "mac_discard %u mac_filter_discard %u "
5134 "xxovrflow_discard %u brb_truncate_discard %u "
5135 "ttl0_discard %u\n",
5136 estats->checksum_discard,
5137 estats->packets_too_big_discard,
5138 estats->no_buff_discard, estats->mac_discard,
5139 estats->mac_filter_discard, estats->xxoverflow_discard,
5140 estats->brb_truncate_discard, estats->ttl0_discard);
5141
5142 for_each_queue(bp, i) {
5143 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5144 bnx2x_fp(bp, i, tx_pkt),
5145 bnx2x_fp(bp, i, rx_pkt),
5146 bnx2x_fp(bp, i, rx_calls));
5147 }
5148 }
5149
5150 if (bp->state != BNX2X_STATE_OPEN) {
5151 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5152 return;
5153 }
5154
5155#ifdef BNX2X_STOP_ON_ERROR
5156 if (unlikely(bp->panic))
5157 return;
5158#endif
5159
5160 /* loader */
5161 if (bp->executer_idx) {
5162 struct dmae_command *dmae = &bp->dmae;
5163 int port = bp->port;
5164 int loader_idx = port * 8;
5165
5166 memset(dmae, 0, sizeof(struct dmae_command));
5167
5168 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5169 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5170 DMAE_CMD_DST_RESET |
5171#ifdef __BIG_ENDIAN
5172 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5173#else
5174 DMAE_CMD_ENDIANITY_DW_SWAP |
5175#endif
5176 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5177 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5178 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5179 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5180 sizeof(struct dmae_command) *
5181 (loader_idx + 1)) >> 2;
5182 dmae->dst_addr_hi = 0;
5183 dmae->len = sizeof(struct dmae_command) >> 2;
5184 dmae->len--; /* !!! for A0/1 only */
5185 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5186 dmae->comp_addr_hi = 0;
5187 dmae->comp_val = 1;
5188
5189 bnx2x_post_dmae(bp, dmae, loader_idx);
5190 }
5191
5192 if (bp->stats_state != STATS_STATE_ENABLE) {
5193 bp->stats_state = STATS_STATE_DISABLE;
5194 return;
5195 }
5196
5197 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5198 /* stats ramrod has it's own slot on the spe */
5199 bp->spq_left++;
5200 bp->stat_pending = 1;
5201 }
5202}
5203
5204static void bnx2x_timer(unsigned long data)
5205{
5206 struct bnx2x *bp = (struct bnx2x *) data;
5207
5208 if (!netif_running(bp->dev))
5209 return;
5210
5211 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08005212 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005213
5214 if (poll) {
5215 struct bnx2x_fastpath *fp = &bp->fp[0];
5216 int rc;
5217
5218 bnx2x_tx_int(fp, 1000);
5219 rc = bnx2x_rx_int(fp, 1000);
5220 }
5221
Eliezer Tamirf1410642008-02-28 11:51:50 -08005222 if (!nomcp) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005223 int port = bp->port;
5224 u32 drv_pulse;
5225 u32 mcp_pulse;
5226
5227 ++bp->fw_drv_pulse_wr_seq;
5228 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5229 /* TBD - add SYSTEM_TIME */
5230 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eliezer Tamirf1410642008-02-28 11:51:50 -08005231 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005232
Eliezer Tamirf1410642008-02-28 11:51:50 -08005233 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005234 MCP_PULSE_SEQ_MASK);
5235 /* The delta between driver pulse and mcp response
5236 * should be 1 (before mcp response) or 0 (after mcp response)
5237 */
5238 if ((drv_pulse != mcp_pulse) &&
5239 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5240 /* someone lost a heartbeat... */
5241 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5242 drv_pulse, mcp_pulse);
5243 }
5244 }
5245
5246 if (bp->stats_state == STATS_STATE_DISABLE)
Eliezer Tamirf1410642008-02-28 11:51:50 -08005247 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005248
5249 bnx2x_update_stats(bp);
5250
Eliezer Tamirf1410642008-02-28 11:51:50 -08005251timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005252 mod_timer(&bp->timer, jiffies + bp->current_interval);
5253}
5254
5255/* end of Statistics */
5256
5257/* nic init */
5258
5259/*
5260 * nic init service functions
5261 */
5262
5263static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5264 dma_addr_t mapping, int id)
5265{
5266 int port = bp->port;
5267 u64 section;
5268 int index;
5269
5270 /* USTORM */
5271 section = ((u64)mapping) + offsetof(struct host_status_block,
5272 u_status_block);
5273 sb->u_status_block.status_block_id = id;
5274
5275 REG_WR(bp, BAR_USTRORM_INTMEM +
5276 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5277 REG_WR(bp, BAR_USTRORM_INTMEM +
5278 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5279 U64_HI(section));
5280
5281 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5282 REG_WR16(bp, BAR_USTRORM_INTMEM +
5283 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5284
5285 /* CSTORM */
5286 section = ((u64)mapping) + offsetof(struct host_status_block,
5287 c_status_block);
5288 sb->c_status_block.status_block_id = id;
5289
5290 REG_WR(bp, BAR_CSTRORM_INTMEM +
5291 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5292 REG_WR(bp, BAR_CSTRORM_INTMEM +
5293 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5294 U64_HI(section));
5295
5296 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5297 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5298 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5299
5300 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5301}
5302
5303static void bnx2x_init_def_sb(struct bnx2x *bp,
5304 struct host_def_status_block *def_sb,
5305 dma_addr_t mapping, int id)
5306{
5307 int port = bp->port;
5308 int index, val, reg_offset;
5309 u64 section;
5310
5311 /* ATTN */
5312 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5313 atten_status_block);
5314 def_sb->atten_status_block.status_block_id = id;
5315
Eliezer Tamir49d66772008-02-28 11:53:13 -08005316 bp->def_att_idx = 0;
5317 bp->attn_state = 0;
5318
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005319 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5320 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5321
5322 for (index = 0; index < 3; index++) {
5323 bp->attn_group[index].sig[0] = REG_RD(bp,
5324 reg_offset + 0x10*index);
5325 bp->attn_group[index].sig[1] = REG_RD(bp,
5326 reg_offset + 0x4 + 0x10*index);
5327 bp->attn_group[index].sig[2] = REG_RD(bp,
5328 reg_offset + 0x8 + 0x10*index);
5329 bp->attn_group[index].sig[3] = REG_RD(bp,
5330 reg_offset + 0xc + 0x10*index);
5331 }
5332
5333 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5334 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5335
5336 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5337 HC_REG_ATTN_MSG0_ADDR_L);
5338
5339 REG_WR(bp, reg_offset, U64_LO(section));
5340 REG_WR(bp, reg_offset + 4, U64_HI(section));
5341
5342 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5343
5344 val = REG_RD(bp, reg_offset);
5345 val |= id;
5346 REG_WR(bp, reg_offset, val);
5347
5348 /* USTORM */
5349 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5350 u_def_status_block);
5351 def_sb->u_def_status_block.status_block_id = id;
5352
Eliezer Tamir49d66772008-02-28 11:53:13 -08005353 bp->def_u_idx = 0;
5354
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005355 REG_WR(bp, BAR_USTRORM_INTMEM +
5356 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5357 REG_WR(bp, BAR_USTRORM_INTMEM +
5358 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5359 U64_HI(section));
5360 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5361 BNX2X_BTR);
5362
5363 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5364 REG_WR16(bp, BAR_USTRORM_INTMEM +
5365 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5366
5367 /* CSTORM */
5368 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5369 c_def_status_block);
5370 def_sb->c_def_status_block.status_block_id = id;
5371
Eliezer Tamir49d66772008-02-28 11:53:13 -08005372 bp->def_c_idx = 0;
5373
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005374 REG_WR(bp, BAR_CSTRORM_INTMEM +
5375 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5376 REG_WR(bp, BAR_CSTRORM_INTMEM +
5377 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5378 U64_HI(section));
5379 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5380 BNX2X_BTR);
5381
5382 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5383 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5384 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5385
5386 /* TSTORM */
5387 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5388 t_def_status_block);
5389 def_sb->t_def_status_block.status_block_id = id;
5390
Eliezer Tamir49d66772008-02-28 11:53:13 -08005391 bp->def_t_idx = 0;
5392
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005393 REG_WR(bp, BAR_TSTRORM_INTMEM +
5394 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5395 REG_WR(bp, BAR_TSTRORM_INTMEM +
5396 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5397 U64_HI(section));
5398 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5399 BNX2X_BTR);
5400
5401 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5402 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5403 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5404
5405 /* XSTORM */
5406 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5407 x_def_status_block);
5408 def_sb->x_def_status_block.status_block_id = id;
5409
Eliezer Tamir49d66772008-02-28 11:53:13 -08005410 bp->def_x_idx = 0;
5411
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005412 REG_WR(bp, BAR_XSTRORM_INTMEM +
5413 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5414 REG_WR(bp, BAR_XSTRORM_INTMEM +
5415 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5416 U64_HI(section));
5417 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5418 BNX2X_BTR);
5419
5420 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5421 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5422 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5423
Eliezer Tamir49d66772008-02-28 11:53:13 -08005424 bp->stat_pending = 0;
5425
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005426 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5427}
5428
5429static void bnx2x_update_coalesce(struct bnx2x *bp)
5430{
5431 int port = bp->port;
5432 int i;
5433
5434 for_each_queue(bp, i) {
5435
5436 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5437 REG_WR8(bp, BAR_USTRORM_INTMEM +
5438 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5439 HC_INDEX_U_ETH_RX_CQ_CONS),
5440 bp->rx_ticks_int/12);
5441 REG_WR16(bp, BAR_USTRORM_INTMEM +
5442 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5443 HC_INDEX_U_ETH_RX_CQ_CONS),
5444 bp->rx_ticks_int ? 0 : 1);
5445
5446 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5447 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5448 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5449 HC_INDEX_C_ETH_TX_CQ_CONS),
5450 bp->tx_ticks_int/12);
5451 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5452 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5453 HC_INDEX_C_ETH_TX_CQ_CONS),
5454 bp->tx_ticks_int ? 0 : 1);
5455 }
5456}
5457
5458static void bnx2x_init_rx_rings(struct bnx2x *bp)
5459{
5460 u16 ring_prod;
5461 int i, j;
5462 int port = bp->port;
5463
5464 bp->rx_buf_use_size = bp->dev->mtu;
5465
5466 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5467 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5468
5469 for_each_queue(bp, j) {
5470 struct bnx2x_fastpath *fp = &bp->fp[j];
5471
5472 fp->rx_bd_cons = 0;
5473 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5474
5475 for (i = 1; i <= NUM_RX_RINGS; i++) {
5476 struct eth_rx_bd *rx_bd;
5477
5478 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5479 rx_bd->addr_hi =
5480 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5481 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5482 rx_bd->addr_lo =
5483 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5484 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5485
5486 }
5487
5488 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5489 struct eth_rx_cqe_next_page *nextpg;
5490
5491 nextpg = (struct eth_rx_cqe_next_page *)
5492 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5493 nextpg->addr_hi =
5494 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5495 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5496 nextpg->addr_lo =
5497 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5498 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5499 }
5500
5501 /* rx completion queue */
5502 fp->rx_comp_cons = ring_prod = 0;
5503
5504 for (i = 0; i < bp->rx_ring_size; i++) {
5505 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5506 BNX2X_ERR("was only able to allocate "
5507 "%d rx skbs\n", i);
5508 break;
5509 }
5510 ring_prod = NEXT_RX_IDX(ring_prod);
5511 BUG_TRAP(ring_prod > i);
5512 }
5513
5514 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5515 fp->rx_pkt = fp->rx_calls = 0;
5516
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005517 /* Warning! this will generate an interrupt (to the TSTORM) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005518 /* must only be done when chip is initialized */
5519 REG_WR(bp, BAR_TSTRORM_INTMEM +
5520 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5521 if (j != 0)
5522 continue;
5523
5524 REG_WR(bp, BAR_USTRORM_INTMEM +
5525 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5526 U64_LO(fp->rx_comp_mapping));
5527 REG_WR(bp, BAR_USTRORM_INTMEM +
5528 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5529 U64_HI(fp->rx_comp_mapping));
5530 }
5531}
5532
5533static void bnx2x_init_tx_ring(struct bnx2x *bp)
5534{
5535 int i, j;
5536
5537 for_each_queue(bp, j) {
5538 struct bnx2x_fastpath *fp = &bp->fp[j];
5539
5540 for (i = 1; i <= NUM_TX_RINGS; i++) {
5541 struct eth_tx_bd *tx_bd =
5542 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5543
5544 tx_bd->addr_hi =
5545 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5546 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5547 tx_bd->addr_lo =
5548 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5549 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5550 }
5551
5552 fp->tx_pkt_prod = 0;
5553 fp->tx_pkt_cons = 0;
5554 fp->tx_bd_prod = 0;
5555 fp->tx_bd_cons = 0;
5556 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5557 fp->tx_pkt = 0;
5558 }
5559}
5560
5561static void bnx2x_init_sp_ring(struct bnx2x *bp)
5562{
5563 int port = bp->port;
5564
5565 spin_lock_init(&bp->spq_lock);
5566
5567 bp->spq_left = MAX_SPQ_PENDING;
5568 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005569 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5570 bp->spq_prod_bd = bp->spq;
5571 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5572
5573 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5574 U64_LO(bp->spq_mapping));
5575 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5576 U64_HI(bp->spq_mapping));
5577
5578 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5579 bp->spq_prod_idx);
5580}
5581
5582static void bnx2x_init_context(struct bnx2x *bp)
5583{
5584 int i;
5585
5586 for_each_queue(bp, i) {
5587 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5588 struct bnx2x_fastpath *fp = &bp->fp[i];
5589
5590 context->xstorm_st_context.tx_bd_page_base_hi =
5591 U64_HI(fp->tx_desc_mapping);
5592 context->xstorm_st_context.tx_bd_page_base_lo =
5593 U64_LO(fp->tx_desc_mapping);
5594 context->xstorm_st_context.db_data_addr_hi =
5595 U64_HI(fp->tx_prods_mapping);
5596 context->xstorm_st_context.db_data_addr_lo =
5597 U64_LO(fp->tx_prods_mapping);
5598
5599 context->ustorm_st_context.rx_bd_page_base_hi =
5600 U64_HI(fp->rx_desc_mapping);
5601 context->ustorm_st_context.rx_bd_page_base_lo =
5602 U64_LO(fp->rx_desc_mapping);
5603 context->ustorm_st_context.status_block_id = i;
5604 context->ustorm_st_context.sb_index_number =
5605 HC_INDEX_U_ETH_RX_CQ_CONS;
5606 context->ustorm_st_context.rcq_base_address_hi =
5607 U64_HI(fp->rx_comp_mapping);
5608 context->ustorm_st_context.rcq_base_address_lo =
5609 U64_LO(fp->rx_comp_mapping);
5610 context->ustorm_st_context.flags =
5611 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5612 context->ustorm_st_context.mc_alignment_size = 64;
5613 context->ustorm_st_context.num_rss = bp->num_queues;
5614
5615 context->cstorm_st_context.sb_index_number =
5616 HC_INDEX_C_ETH_TX_CQ_CONS;
5617 context->cstorm_st_context.status_block_id = i;
5618
5619 context->xstorm_ag_context.cdu_reserved =
5620 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5621 CDU_REGION_NUMBER_XCM_AG,
5622 ETH_CONNECTION_TYPE);
5623 context->ustorm_ag_context.cdu_usage =
5624 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5625 CDU_REGION_NUMBER_UCM_AG,
5626 ETH_CONNECTION_TYPE);
5627 }
5628}
5629
5630static void bnx2x_init_ind_table(struct bnx2x *bp)
5631{
5632 int port = bp->port;
5633 int i;
5634
5635 if (!is_multi(bp))
5636 return;
5637
5638 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5639 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5640 i % bp->num_queues);
5641
5642 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5643}
5644
Eliezer Tamir49d66772008-02-28 11:53:13 -08005645static void bnx2x_set_client_config(struct bnx2x *bp)
5646{
5647#ifdef BCM_VLAN
5648 int mode = bp->rx_mode;
5649#endif
5650 int i, port = bp->port;
5651 struct tstorm_eth_client_config tstorm_client = {0};
5652
5653 tstorm_client.mtu = bp->dev->mtu;
5654 tstorm_client.statistics_counter_id = 0;
5655 tstorm_client.config_flags =
5656 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5657#ifdef BCM_VLAN
5658 if (mode && bp->vlgrp) {
5659 tstorm_client.config_flags |=
5660 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5661 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5662 }
5663#endif
5664 if (mode != BNX2X_RX_MODE_PROMISC)
5665 tstorm_client.drop_flags =
5666 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5667
5668 for_each_queue(bp, i) {
5669 REG_WR(bp, BAR_TSTRORM_INTMEM +
5670 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5671 ((u32 *)&tstorm_client)[0]);
5672 REG_WR(bp, BAR_TSTRORM_INTMEM +
5673 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5674 ((u32 *)&tstorm_client)[1]);
5675 }
5676
5677/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5678 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5679}
5680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005681static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5682{
5683 int mode = bp->rx_mode;
5684 int port = bp->port;
5685 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5686 int i;
5687
5688 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5689
5690 switch (mode) {
5691 case BNX2X_RX_MODE_NONE: /* no Rx */
5692 tstorm_mac_filter.ucast_drop_all = 1;
5693 tstorm_mac_filter.mcast_drop_all = 1;
5694 tstorm_mac_filter.bcast_drop_all = 1;
5695 break;
5696 case BNX2X_RX_MODE_NORMAL:
5697 tstorm_mac_filter.bcast_accept_all = 1;
5698 break;
5699 case BNX2X_RX_MODE_ALLMULTI:
5700 tstorm_mac_filter.mcast_accept_all = 1;
5701 tstorm_mac_filter.bcast_accept_all = 1;
5702 break;
5703 case BNX2X_RX_MODE_PROMISC:
5704 tstorm_mac_filter.ucast_accept_all = 1;
5705 tstorm_mac_filter.mcast_accept_all = 1;
5706 tstorm_mac_filter.bcast_accept_all = 1;
5707 break;
5708 default:
5709 BNX2X_ERR("bad rx mode (%d)\n", mode);
5710 }
5711
5712 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5713 REG_WR(bp, BAR_TSTRORM_INTMEM +
5714 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5715 ((u32 *)&tstorm_mac_filter)[i]);
5716
5717/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5718 ((u32 *)&tstorm_mac_filter)[i]); */
5719 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005720
Eliezer Tamir49d66772008-02-28 11:53:13 -08005721 if (mode != BNX2X_RX_MODE_NONE)
5722 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005723}
5724
5725static void bnx2x_init_internal(struct bnx2x *bp)
5726{
5727 int port = bp->port;
5728 struct tstorm_eth_function_common_config tstorm_config = {0};
5729 struct stats_indication_flags stats_flags = {0};
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005730
5731 if (is_multi(bp)) {
5732 tstorm_config.config_flags = MULTI_FLAGS;
5733 tstorm_config.rss_result_mask = MULTI_MASK;
5734 }
5735
5736 REG_WR(bp, BAR_TSTRORM_INTMEM +
5737 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5738 (*(u32 *)&tstorm_config));
5739
5740/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5741 (*(u32 *)&tstorm_config)); */
5742
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005743 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005744 bnx2x_set_storm_rx_mode(bp);
5745
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005746 stats_flags.collect_eth = cpu_to_le32(1);
5747
5748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5749 ((u32 *)&stats_flags)[0]);
5750 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5751 ((u32 *)&stats_flags)[1]);
5752
5753 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5754 ((u32 *)&stats_flags)[0]);
5755 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5756 ((u32 *)&stats_flags)[1]);
5757
5758 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5759 ((u32 *)&stats_flags)[0]);
5760 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5761 ((u32 *)&stats_flags)[1]);
5762
5763/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5764 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5765}
5766
5767static void bnx2x_nic_init(struct bnx2x *bp)
5768{
5769 int i;
5770
5771 for_each_queue(bp, i) {
5772 struct bnx2x_fastpath *fp = &bp->fp[i];
5773
5774 fp->state = BNX2X_FP_STATE_CLOSED;
5775 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5776 bp, fp->status_blk, i);
5777 fp->index = i;
5778 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5779 }
5780
5781 bnx2x_init_def_sb(bp, bp->def_status_blk,
5782 bp->def_status_blk_mapping, 0x10);
5783 bnx2x_update_coalesce(bp);
5784 bnx2x_init_rx_rings(bp);
5785 bnx2x_init_tx_ring(bp);
5786 bnx2x_init_sp_ring(bp);
5787 bnx2x_init_context(bp);
5788 bnx2x_init_internal(bp);
5789 bnx2x_init_stats(bp);
5790 bnx2x_init_ind_table(bp);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005791 bnx2x_int_enable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005792
5793}
5794
5795/* end of nic init */
5796
5797/*
5798 * gzip service functions
5799 */
5800
5801static int bnx2x_gunzip_init(struct bnx2x *bp)
5802{
5803 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5804 &bp->gunzip_mapping);
5805 if (bp->gunzip_buf == NULL)
5806 goto gunzip_nomem1;
5807
5808 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5809 if (bp->strm == NULL)
5810 goto gunzip_nomem2;
5811
5812 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5813 GFP_KERNEL);
5814 if (bp->strm->workspace == NULL)
5815 goto gunzip_nomem3;
5816
5817 return 0;
5818
5819gunzip_nomem3:
5820 kfree(bp->strm);
5821 bp->strm = NULL;
5822
5823gunzip_nomem2:
5824 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5825 bp->gunzip_mapping);
5826 bp->gunzip_buf = NULL;
5827
5828gunzip_nomem1:
5829 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5830 " uncompression\n", bp->dev->name);
5831 return -ENOMEM;
5832}
5833
5834static void bnx2x_gunzip_end(struct bnx2x *bp)
5835{
5836 kfree(bp->strm->workspace);
5837
5838 kfree(bp->strm);
5839 bp->strm = NULL;
5840
5841 if (bp->gunzip_buf) {
5842 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5843 bp->gunzip_mapping);
5844 bp->gunzip_buf = NULL;
5845 }
5846}
5847
5848static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5849{
5850 int n, rc;
5851
5852 /* check gzip header */
5853 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5854 return -EINVAL;
5855
5856 n = 10;
5857
5858#define FNAME 0x8
5859
5860 if (zbuf[3] & FNAME)
5861 while ((zbuf[n++] != 0) && (n < len));
5862
5863 bp->strm->next_in = zbuf + n;
5864 bp->strm->avail_in = len - n;
5865 bp->strm->next_out = bp->gunzip_buf;
5866 bp->strm->avail_out = FW_BUF_SIZE;
5867
5868 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5869 if (rc != Z_OK)
5870 return rc;
5871
5872 rc = zlib_inflate(bp->strm, Z_FINISH);
5873 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5874 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5875 bp->dev->name, bp->strm->msg);
5876
5877 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5878 if (bp->gunzip_outlen & 0x3)
5879 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5880 " gunzip_outlen (%d) not aligned\n",
5881 bp->dev->name, bp->gunzip_outlen);
5882 bp->gunzip_outlen >>= 2;
5883
5884 zlib_inflateEnd(bp->strm);
5885
5886 if (rc == Z_STREAM_END)
5887 return 0;
5888
5889 return rc;
5890}
5891
5892/* nic load/unload */
5893
5894/*
5895 * general service functions
5896 */
5897
5898/* send a NIG loopback debug packet */
5899static void bnx2x_lb_pckt(struct bnx2x *bp)
5900{
5901#ifdef USE_DMAE
5902 u32 wb_write[3];
5903#endif
5904
5905 /* Ethernet source and destination addresses */
5906#ifdef USE_DMAE
5907 wb_write[0] = 0x55555555;
5908 wb_write[1] = 0x55555555;
5909 wb_write[2] = 0x20; /* SOP */
5910 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5911#else
5912 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5913 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5914 /* SOP */
5915 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5916#endif
5917
5918 /* NON-IP protocol */
5919#ifdef USE_DMAE
5920 wb_write[0] = 0x09000000;
5921 wb_write[1] = 0x55555555;
5922 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5923 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5924#else
5925 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5926 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5927 /* EOP, eop_bvalid = 0 */
5928 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5929#endif
5930}
5931
5932/* some of the internal memories
5933 * are not directly readable from the driver
5934 * to test them we send debug packets
5935 */
5936static int bnx2x_int_mem_test(struct bnx2x *bp)
5937{
5938 int factor;
5939 int count, i;
5940 u32 val = 0;
5941
5942 switch (CHIP_REV(bp)) {
5943 case CHIP_REV_EMUL:
5944 factor = 200;
5945 break;
5946 case CHIP_REV_FPGA:
5947 factor = 120;
5948 break;
5949 default:
5950 factor = 1;
5951 break;
5952 }
5953
5954 DP(NETIF_MSG_HW, "start part1\n");
5955
5956 /* Disable inputs of parser neighbor blocks */
5957 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5958 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5959 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5960 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5961
5962 /* Write 0 to parser credits for CFC search request */
5963 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5964
5965 /* send Ethernet packet */
5966 bnx2x_lb_pckt(bp);
5967
5968 /* TODO do i reset NIG statistic? */
5969 /* Wait until NIG register shows 1 packet of size 0x10 */
5970 count = 1000 * factor;
5971 while (count) {
5972#ifdef BNX2X_DMAE_RD
5973 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5974 val = *bnx2x_sp(bp, wb_data[0]);
5975#else
5976 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5977 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5978#endif
5979 if (val == 0x10)
5980 break;
5981
5982 msleep(10);
5983 count--;
5984 }
5985 if (val != 0x10) {
5986 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5987 return -1;
5988 }
5989
5990 /* Wait until PRS register shows 1 packet */
5991 count = 1000 * factor;
5992 while (count) {
5993 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5994
5995 if (val == 1)
5996 break;
5997
5998 msleep(10);
5999 count--;
6000 }
6001 if (val != 0x1) {
6002 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6003 return -2;
6004 }
6005
6006 /* Reset and init BRB, PRS */
6007 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
6008 msleep(50);
6009 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
6010 msleep(50);
6011 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6012 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6013
6014 DP(NETIF_MSG_HW, "part2\n");
6015
6016 /* Disable inputs of parser neighbor blocks */
6017 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6018 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6019 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6020 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
6021
6022 /* Write 0 to parser credits for CFC search request */
6023 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6024
6025 /* send 10 Ethernet packets */
6026 for (i = 0; i < 10; i++)
6027 bnx2x_lb_pckt(bp);
6028
6029 /* Wait until NIG register shows 10 + 1
6030 packets of size 11*0x10 = 0xb0 */
6031 count = 1000 * factor;
6032 while (count) {
6033#ifdef BNX2X_DMAE_RD
6034 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6035 val = *bnx2x_sp(bp, wb_data[0]);
6036#else
6037 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6038 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6039#endif
6040 if (val == 0xb0)
6041 break;
6042
6043 msleep(10);
6044 count--;
6045 }
6046 if (val != 0xb0) {
6047 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6048 return -3;
6049 }
6050
6051 /* Wait until PRS register shows 2 packets */
6052 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6053 if (val != 2)
6054 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6055
6056 /* Write 1 to parser credits for CFC search request */
6057 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6058
6059 /* Wait until PRS register shows 3 packets */
6060 msleep(10 * factor);
6061 /* Wait until NIG register shows 1 packet of size 0x10 */
6062 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6063 if (val != 3)
6064 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6065
6066 /* clear NIG EOP FIFO */
6067 for (i = 0; i < 11; i++)
6068 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6069 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6070 if (val != 1) {
6071 BNX2X_ERR("clear of NIG failed\n");
6072 return -4;
6073 }
6074
6075 /* Reset and init BRB, PRS, NIG */
6076 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6077 msleep(50);
6078 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6079 msleep(50);
6080 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6081 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6082#ifndef BCM_ISCSI
6083 /* set NIC mode */
6084 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6085#endif
6086
6087 /* Enable inputs of parser neighbor blocks */
6088 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6089 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6090 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6091 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6092
6093 DP(NETIF_MSG_HW, "done\n");
6094
6095 return 0; /* OK */
6096}
6097
6098static void enable_blocks_attention(struct bnx2x *bp)
6099{
6100 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6101 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6102 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6103 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6104 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6105 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6106 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6107 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6108 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6109/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6110/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6111 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6112 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6113 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6114/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6115/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6116 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6117 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6118 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6119 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6120/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6121/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6122 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6123 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6124 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6125 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6126/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6127/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6128 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6129 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6130/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6131 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6132}
6133
6134static int bnx2x_function_init(struct bnx2x *bp, int mode)
6135{
6136 int func = bp->port;
6137 int port = func ? PORT1 : PORT0;
6138 u32 val, i;
6139#ifdef USE_DMAE
6140 u32 wb_write[2];
6141#endif
6142
6143 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6144 if ((func != 0) && (func != 1)) {
6145 BNX2X_ERR("BAD function number (%d)\n", func);
6146 return -ENODEV;
6147 }
6148
6149 bnx2x_gunzip_init(bp);
6150
6151 if (mode & 0x1) { /* init common */
6152 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6153 func, mode);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6155 0xffffffff);
6156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6157 0xfffc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006158 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6159
6160 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6161 msleep(30);
6162 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6163
6164 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6165 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6166
6167 bnx2x_init_pxp(bp);
6168
6169 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6170 /* enable HW interrupt from PXP on USDM
6171 overflow bit 16 on INT_MASK_0 */
6172 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6173 }
6174
6175#ifdef __BIG_ENDIAN
6176 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6177 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6178 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6179 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6180 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6181 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6182
6183/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6184 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6185 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6186 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6187 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6188#endif
6189
6190#ifndef BCM_ISCSI
6191 /* set NIC mode */
6192 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6193#endif
6194
6195 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6196#ifdef BCM_ISCSI
6197 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6198 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6199 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6200#endif
6201
6202 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6203
6204 /* let the HW do it's magic ... */
6205 msleep(100);
6206 /* finish PXP init
6207 (can be moved up if we want to use the DMAE) */
6208 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6209 if (val != 1) {
6210 BNX2X_ERR("PXP2 CFG failed\n");
6211 return -EBUSY;
6212 }
6213
6214 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6215 if (val != 1) {
6216 BNX2X_ERR("PXP2 RD_INIT failed\n");
6217 return -EBUSY;
6218 }
6219
6220 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6221 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6222
6223 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6224
6225 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6226 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6227 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6228 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6229
6230#ifdef BNX2X_DMAE_RD
6231 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6232 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6233 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6234 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6235#else
6236 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6237 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6238 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6239 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6240 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6241 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6242 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6243 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6244 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6245 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6246 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6247 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6248#endif
6249 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006250 /* soft reset pulse */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006251 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6252 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6253
6254#ifdef BCM_ISCSI
6255 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6256#endif
6257 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6258 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6259 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6260 /* enable hw interrupt from doorbell Q */
6261 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6262 }
6263
6264 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6265
6266 if (CHIP_REV_IS_SLOW(bp)) {
6267 /* fix for emulation and FPGA for no pause */
6268 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6269 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6270 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6271 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6272 }
6273
6274 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6275
6276 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6277 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6278 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6279 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6280
6281 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6282 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6283 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6284 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6285
6286 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6287 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6288 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6289 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6290
6291 /* sync semi rtc */
6292 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6293 0x80000000);
6294 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6295 0x80000000);
6296
6297 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6298 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6299 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6300
6301 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6302 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6303 REG_WR(bp, i, 0xc0cac01a);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006304 /* TODO: replace with something meaningful */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006305 }
6306 /* SRCH COMMON comes here */
6307 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6308
6309 if (sizeof(union cdu_context) != 1024) {
6310 /* we currently assume that a context is 1024 bytes */
6311 printk(KERN_ALERT PFX "please adjust the size of"
6312 " cdu_context(%ld)\n",
6313 (long)sizeof(union cdu_context));
6314 }
6315 val = (4 << 24) + (0 << 12) + 1024;
6316 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6317 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6318
6319 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6320 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6321
6322 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6323 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6324 MISC_AEU_COMMON_END);
6325 /* RXPCS COMMON comes here */
6326 /* EMAC0 COMMON comes here */
6327 /* EMAC1 COMMON comes here */
6328 /* DBU COMMON comes here */
6329 /* DBG COMMON comes here */
6330 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6331
6332 if (CHIP_REV_IS_SLOW(bp))
6333 msleep(200);
6334
6335 /* finish CFC init */
6336 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6337 if (val != 1) {
6338 BNX2X_ERR("CFC LL_INIT failed\n");
6339 return -EBUSY;
6340 }
6341
6342 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6343 if (val != 1) {
6344 BNX2X_ERR("CFC AC_INIT failed\n");
6345 return -EBUSY;
6346 }
6347
6348 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6349 if (val != 1) {
6350 BNX2X_ERR("CFC CAM_INIT failed\n");
6351 return -EBUSY;
6352 }
6353
6354 REG_WR(bp, CFC_REG_DEBUG0, 0);
6355
6356 /* read NIG statistic
6357 to see if this is our first up since powerup */
6358#ifdef BNX2X_DMAE_RD
6359 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6360 val = *bnx2x_sp(bp, wb_data[0]);
6361#else
6362 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6363 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6364#endif
6365 /* do internal memory self test */
6366 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6367 BNX2X_ERR("internal mem selftest failed\n");
6368 return -EBUSY;
6369 }
6370
6371 /* clear PXP2 attentions */
6372 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6373
6374 enable_blocks_attention(bp);
6375 /* enable_blocks_parity(bp); */
6376
Eliezer Tamirf1410642008-02-28 11:51:50 -08006377 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6378 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6379 /* Fan failure is indicated by SPIO 5 */
6380 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6381 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6382
6383 /* set to active low mode */
6384 val = REG_RD(bp, MISC_REG_SPIO_INT);
6385 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6386 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6387 REG_WR(bp, MISC_REG_SPIO_INT, val);
6388
6389 /* enable interrupt to signal the IGU */
6390 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6391 val |= (1 << MISC_REGISTERS_SPIO_5);
6392 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6393 break;
6394
6395 default:
6396 break;
6397 }
6398
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006399 } /* end of common init */
6400
6401 /* per port init */
6402
6403 /* the phys address is shifted right 12 bits and has an added
6404 1=valid bit added to the 53rd bit
6405 then since this is a wide register(TM)
6406 we split it into two 32 bit writes
6407 */
6408#define RQ_ONCHIP_AT_PORT_SIZE 384
6409#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6410#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6411#define PXP_ONE_ILT(x) ((x << 10) | x)
6412
6413 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6414
6415 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6416
6417 /* Port PXP comes here */
6418 /* Port PXP2 comes here */
6419
6420 /* Offset is
6421 * Port0 0
6422 * Port1 384 */
6423 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6424#ifdef USE_DMAE
6425 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6426 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6427 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6428#else
6429 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6430 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6431 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6432 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6433#endif
6434 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6435
6436#ifdef BCM_ISCSI
6437 /* Port0 1
6438 * Port1 385 */
6439 i++;
6440 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6441 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6442 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6443 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6444
6445 /* Port0 2
6446 * Port1 386 */
6447 i++;
6448 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6449 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6450 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6451 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6452
6453 /* Port0 3
6454 * Port1 387 */
6455 i++;
6456 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6457 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6458 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6459 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6460#endif
6461
6462 /* Port TCM comes here */
6463 /* Port UCM comes here */
6464 /* Port CCM comes here */
6465 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6466 func ? XCM_PORT1_END : XCM_PORT0_END);
6467
6468#ifdef USE_DMAE
6469 wb_write[0] = 0;
6470 wb_write[1] = 0;
6471#endif
6472 for (i = 0; i < 32; i++) {
6473 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6474#ifdef USE_DMAE
6475 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6476#else
6477 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6478 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6479#endif
6480 }
6481 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6482
6483 /* Port QM comes here */
6484
6485#ifdef BCM_ISCSI
6486 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6487 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6488
6489 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6490 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6491#endif
6492 /* Port DQ comes here */
6493 /* Port BRB1 comes here */
6494 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6495 func ? PRS_PORT1_END : PRS_PORT0_END);
6496 /* Port TSDM comes here */
6497 /* Port CSDM comes here */
6498 /* Port USDM comes here */
6499 /* Port XSDM comes here */
6500 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6501 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6502 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6503 func ? USEM_PORT1_END : USEM_PORT0_END);
6504 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6505 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6506 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6507 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6508 /* Port UPB comes here */
6509 /* Port XSDM comes here */
6510 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6511 func ? PBF_PORT1_END : PBF_PORT0_END);
6512
6513 /* configure PBF to work without PAUSE mtu 9000 */
6514 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6515
6516 /* update threshold */
6517 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6518 /* update init credit */
6519 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6520
6521 /* probe changes */
6522 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6523 msleep(5);
6524 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6525
6526#ifdef BCM_ISCSI
6527 /* tell the searcher where the T2 table is */
6528 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6529
6530 wb_write[0] = U64_LO(bp->t2_mapping);
6531 wb_write[1] = U64_HI(bp->t2_mapping);
6532 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6533 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6534 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6535 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6536
6537 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6538 /* Port SRCH comes here */
6539#endif
6540 /* Port CDU comes here */
6541 /* Port CFC comes here */
6542 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6543 func ? HC_PORT1_END : HC_PORT0_END);
6544 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6545 MISC_AEU_PORT0_START,
6546 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6547 /* Port PXPCS comes here */
6548 /* Port EMAC0 comes here */
6549 /* Port EMAC1 comes here */
6550 /* Port DBU comes here */
6551 /* Port DBG comes here */
6552 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6553 func ? NIG_PORT1_END : NIG_PORT0_END);
6554 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6555 /* Port MCP comes here */
6556 /* Port DMAE comes here */
6557
Eliezer Tamirf1410642008-02-28 11:51:50 -08006558 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6559 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6560 /* add SPIO 5 to group 0 */
6561 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6562 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6563 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6564 break;
6565
6566 default:
6567 break;
6568 }
6569
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006570 bnx2x_link_reset(bp);
6571
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006572 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006573 REG_WR(bp, 0x2114, 0xffffffff);
6574 REG_WR(bp, 0x2120, 0xffffffff);
6575 REG_WR(bp, 0x2814, 0xffffffff);
6576
6577 /* !!! move to init_values.h */
6578 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6579 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6580 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6581 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6582
6583 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6584 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6585 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6586 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6587
6588 bnx2x_gunzip_end(bp);
6589
6590 if (!nomcp) {
6591 port = bp->port;
6592
6593 bp->fw_drv_pulse_wr_seq =
Eliezer Tamirf1410642008-02-28 11:51:50 -08006594 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006595 DRV_PULSE_SEQ_MASK);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006596 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006597 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6598 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6599 } else {
6600 bp->fw_mb = 0;
6601 }
6602
6603 return 0;
6604}
6605
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006606/* send the MCP a request, block until there is a reply */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006607static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6608{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006609 int port = bp->port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08006610 u32 seq = ++bp->fw_seq;
6611 u32 rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006612
Eliezer Tamirf1410642008-02-28 11:51:50 -08006613 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6614 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006615
6616 /* let the FW do it's magic ... */
6617 msleep(100); /* TBD */
6618
6619 if (CHIP_REV_IS_SLOW(bp))
6620 msleep(900);
6621
Eliezer Tamirf1410642008-02-28 11:51:50 -08006622 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006623 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6624
6625 /* is this a reply to our command? */
6626 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6627 rc &= FW_MSG_CODE_MASK;
Eliezer Tamirf1410642008-02-28 11:51:50 -08006628
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006629 } else {
6630 /* FW BUG! */
6631 BNX2X_ERR("FW failed to respond!\n");
6632 bnx2x_fw_dump(bp);
6633 rc = 0;
6634 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006635
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006636 return rc;
6637}
6638
6639static void bnx2x_free_mem(struct bnx2x *bp)
6640{
6641
6642#define BNX2X_PCI_FREE(x, y, size) \
6643 do { \
6644 if (x) { \
6645 pci_free_consistent(bp->pdev, size, x, y); \
6646 x = NULL; \
6647 y = 0; \
6648 } \
6649 } while (0)
6650
6651#define BNX2X_FREE(x) \
6652 do { \
6653 if (x) { \
6654 vfree(x); \
6655 x = NULL; \
6656 } \
6657 } while (0)
6658
6659 int i;
6660
6661 /* fastpath */
6662 for_each_queue(bp, i) {
6663
6664 /* Status blocks */
6665 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6666 bnx2x_fp(bp, i, status_blk_mapping),
6667 sizeof(struct host_status_block) +
6668 sizeof(struct eth_tx_db_data));
6669
6670 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6671 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6672 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6673 bnx2x_fp(bp, i, tx_desc_mapping),
6674 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6675
6676 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6677 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6678 bnx2x_fp(bp, i, rx_desc_mapping),
6679 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6680
6681 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6682 bnx2x_fp(bp, i, rx_comp_mapping),
6683 sizeof(struct eth_fast_path_rx_cqe) *
6684 NUM_RCQ_BD);
6685 }
6686
6687 BNX2X_FREE(bp->fp);
6688
6689 /* end of fastpath */
6690
6691 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6692 (sizeof(struct host_def_status_block)));
6693
6694 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6695 (sizeof(struct bnx2x_slowpath)));
6696
6697#ifdef BCM_ISCSI
6698 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6699 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6700 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6701 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6702#endif
6703 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6704
6705#undef BNX2X_PCI_FREE
6706#undef BNX2X_KFREE
6707}
6708
6709static int bnx2x_alloc_mem(struct bnx2x *bp)
6710{
6711
6712#define BNX2X_PCI_ALLOC(x, y, size) \
6713 do { \
6714 x = pci_alloc_consistent(bp->pdev, size, y); \
6715 if (x == NULL) \
6716 goto alloc_mem_err; \
6717 memset(x, 0, size); \
6718 } while (0)
6719
6720#define BNX2X_ALLOC(x, size) \
6721 do { \
6722 x = vmalloc(size); \
6723 if (x == NULL) \
6724 goto alloc_mem_err; \
6725 memset(x, 0, size); \
6726 } while (0)
6727
6728 int i;
6729
6730 /* fastpath */
6731 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6732
6733 for_each_queue(bp, i) {
6734 bnx2x_fp(bp, i, bp) = bp;
6735
6736 /* Status blocks */
6737 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6738 &bnx2x_fp(bp, i, status_blk_mapping),
6739 sizeof(struct host_status_block) +
6740 sizeof(struct eth_tx_db_data));
6741
6742 bnx2x_fp(bp, i, hw_tx_prods) =
6743 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6744
6745 bnx2x_fp(bp, i, tx_prods_mapping) =
6746 bnx2x_fp(bp, i, status_blk_mapping) +
6747 sizeof(struct host_status_block);
6748
6749 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6750 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6751 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6752 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6753 &bnx2x_fp(bp, i, tx_desc_mapping),
6754 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6755
6756 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6757 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6758 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6759 &bnx2x_fp(bp, i, rx_desc_mapping),
6760 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6761
6762 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6763 &bnx2x_fp(bp, i, rx_comp_mapping),
6764 sizeof(struct eth_fast_path_rx_cqe) *
6765 NUM_RCQ_BD);
6766
6767 }
6768 /* end of fastpath */
6769
6770 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6771 sizeof(struct host_def_status_block));
6772
6773 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6774 sizeof(struct bnx2x_slowpath));
6775
6776#ifdef BCM_ISCSI
6777 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6778
6779 /* Initialize T1 */
6780 for (i = 0; i < 64*1024; i += 64) {
6781 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6782 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6783 }
6784
6785 /* allocate searcher T2 table
6786 we allocate 1/4 of alloc num for T2
6787 (which is not entered into the ILT) */
6788 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6789
6790 /* Initialize T2 */
6791 for (i = 0; i < 16*1024; i += 64)
6792 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6793
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006794 /* now fixup the last line in the block to point to the next block */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006795 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6796
6797 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6798 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6799
6800 /* QM queues (128*MAX_CONN) */
6801 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6802#endif
6803
6804 /* Slow path ring */
6805 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6806
6807 return 0;
6808
6809alloc_mem_err:
6810 bnx2x_free_mem(bp);
6811 return -ENOMEM;
6812
6813#undef BNX2X_PCI_ALLOC
6814#undef BNX2X_ALLOC
6815}
6816
6817static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6818{
6819 int i;
6820
6821 for_each_queue(bp, i) {
6822 struct bnx2x_fastpath *fp = &bp->fp[i];
6823
6824 u16 bd_cons = fp->tx_bd_cons;
6825 u16 sw_prod = fp->tx_pkt_prod;
6826 u16 sw_cons = fp->tx_pkt_cons;
6827
6828 BUG_TRAP(fp->tx_buf_ring != NULL);
6829
6830 while (sw_cons != sw_prod) {
6831 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6832 sw_cons++;
6833 }
6834 }
6835}
6836
6837static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6838{
6839 int i, j;
6840
6841 for_each_queue(bp, j) {
6842 struct bnx2x_fastpath *fp = &bp->fp[j];
6843
6844 BUG_TRAP(fp->rx_buf_ring != NULL);
6845
6846 for (i = 0; i < NUM_RX_BD; i++) {
6847 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6848 struct sk_buff *skb = rx_buf->skb;
6849
6850 if (skb == NULL)
6851 continue;
6852
6853 pci_unmap_single(bp->pdev,
6854 pci_unmap_addr(rx_buf, mapping),
6855 bp->rx_buf_use_size,
6856 PCI_DMA_FROMDEVICE);
6857
6858 rx_buf->skb = NULL;
6859 dev_kfree_skb(skb);
6860 }
6861 }
6862}
6863
6864static void bnx2x_free_skbs(struct bnx2x *bp)
6865{
6866 bnx2x_free_tx_skbs(bp);
6867 bnx2x_free_rx_skbs(bp);
6868}
6869
6870static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6871{
6872 int i;
6873
6874 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006875 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006876 bp->msix_table[0].vector);
6877
6878 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006879 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006880 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6881 bnx2x_fp(bp, i, state));
6882
Eliezer Tamir228241e2008-02-28 11:56:57 -08006883 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6884 BNX2X_ERR("IRQ of fp #%d being freed while "
6885 "state != closed\n", i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006886
Eliezer Tamir228241e2008-02-28 11:56:57 -08006887 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006888 }
6889
6890}
6891
6892static void bnx2x_free_irq(struct bnx2x *bp)
6893{
6894
6895 if (bp->flags & USING_MSIX_FLAG) {
6896
6897 bnx2x_free_msix_irqs(bp);
6898 pci_disable_msix(bp->pdev);
6899
6900 bp->flags &= ~USING_MSIX_FLAG;
6901
6902 } else
6903 free_irq(bp->pdev->irq, bp->dev);
6904}
6905
6906static int bnx2x_enable_msix(struct bnx2x *bp)
6907{
6908
6909 int i;
6910
6911 bp->msix_table[0].entry = 0;
6912 for_each_queue(bp, i)
6913 bp->msix_table[i + 1].entry = i + 1;
6914
6915 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6916 bp->num_queues + 1)){
Eliezer Tamir228241e2008-02-28 11:56:57 -08006917 BNX2X_LOG("failed to enable MSI-X\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006918 return -1;
6919
6920 }
6921
6922 bp->flags |= USING_MSIX_FLAG;
6923
6924 return 0;
6925
6926}
6927
6928
6929static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6930{
6931
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006932 int i, rc;
6933
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006934 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6935 bp->dev->name, bp->dev);
6936
6937 if (rc) {
6938 BNX2X_ERR("request sp irq failed\n");
6939 return -EBUSY;
6940 }
6941
6942 for_each_queue(bp, i) {
6943 rc = request_irq(bp->msix_table[i + 1].vector,
6944 bnx2x_msix_fp_int, 0,
6945 bp->dev->name, &bp->fp[i]);
6946
6947 if (rc) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006948 BNX2X_ERR("request fp #%d irq failed "
6949 "rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006950 bnx2x_free_msix_irqs(bp);
6951 return -EBUSY;
6952 }
6953
6954 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6955
6956 }
6957
6958 return 0;
6959
6960}
6961
6962static int bnx2x_req_irq(struct bnx2x *bp)
6963{
6964
6965 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6966 IRQF_SHARED, bp->dev->name, bp->dev);
6967 if (!rc)
6968 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6969
6970 return rc;
6971
6972}
6973
6974/*
6975 * Init service functions
6976 */
6977
6978static void bnx2x_set_mac_addr(struct bnx2x *bp)
6979{
6980 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6981
6982 /* CAM allocation
6983 * unicasts 0-31:port0 32-63:port1
6984 * multicast 64-127:port0 128-191:port1
6985 */
6986 config->hdr.length_6b = 2;
6987 config->hdr.offset = bp->port ? 31 : 0;
6988 config->hdr.reserved0 = 0;
6989 config->hdr.reserved1 = 0;
6990
6991 /* primary MAC */
6992 config->config_table[0].cam_entry.msb_mac_addr =
6993 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6994 config->config_table[0].cam_entry.middle_mac_addr =
6995 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6996 config->config_table[0].cam_entry.lsb_mac_addr =
6997 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6998 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6999 config->config_table[0].target_table_entry.flags = 0;
7000 config->config_table[0].target_table_entry.client_id = 0;
7001 config->config_table[0].target_table_entry.vlan_id = 0;
7002
7003 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
7004 config->config_table[0].cam_entry.msb_mac_addr,
7005 config->config_table[0].cam_entry.middle_mac_addr,
7006 config->config_table[0].cam_entry.lsb_mac_addr);
7007
7008 /* broadcast */
7009 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
7010 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
7011 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
7012 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
7013 config->config_table[1].target_table_entry.flags =
7014 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7015 config->config_table[1].target_table_entry.client_id = 0;
7016 config->config_table[1].target_table_entry.vlan_id = 0;
7017
7018 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7019 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7020 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7021}
7022
7023static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7024 int *state_p, int poll)
7025{
7026 /* can take a while if any port is running */
7027 int timeout = 500;
7028
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007029 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7030 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007031
7032 might_sleep();
7033
7034 while (timeout) {
7035
7036 if (poll) {
7037 bnx2x_rx_int(bp->fp, 10);
7038 /* If index is different from 0
7039 * The reply for some commands will
7040 * be on the none default queue
7041 */
7042 if (idx)
7043 bnx2x_rx_int(&bp->fp[idx], 10);
7044 }
7045
7046 mb(); /* state is changed by bnx2x_sp_event()*/
7047
Eliezer Tamir49d66772008-02-28 11:53:13 -08007048 if (*state_p == state)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007049 return 0;
7050
7051 timeout--;
7052 msleep(1);
7053
7054 }
7055
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007056 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007057 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7058 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007059
Eliezer Tamir49d66772008-02-28 11:53:13 -08007060 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007061}
7062
7063static int bnx2x_setup_leading(struct bnx2x *bp)
7064{
7065
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007066 /* reset IGU state */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007067 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7068
7069 /* SETUP ramrod */
7070 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7071
7072 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7073
7074}
7075
7076static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7077{
7078
7079 /* reset IGU state */
7080 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7081
Eliezer Tamir228241e2008-02-28 11:56:57 -08007082 /* SETUP ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007083 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
7084 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7085
7086 /* Wait for completion */
7087 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eliezer Tamir228241e2008-02-28 11:56:57 -08007088 &(bp->fp[index].state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007089
7090}
7091
7092
7093static int bnx2x_poll(struct napi_struct *napi, int budget);
7094static void bnx2x_set_rx_mode(struct net_device *dev);
7095
7096static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7097{
Eliezer Tamir228241e2008-02-28 11:56:57 -08007098 u32 load_code;
7099 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007100
7101 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7102
7103 /* Send LOAD_REQUEST command to MCP.
7104 Returns the type of LOAD command: if it is the
7105 first port to be initialized common blocks should be
7106 initialized, otherwise - not.
7107 */
7108 if (!nomcp) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007109 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7110 if (!load_code) {
7111 BNX2X_ERR("MCP response failure, unloading\n");
7112 return -EBUSY;
7113 }
7114 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7115 BNX2X_ERR("MCP refused load request, unloading\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007116 return -EBUSY; /* other port in diagnostic mode */
7117 }
7118 } else {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007119 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007120 }
7121
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007122 /* if we can't use msix we only need one fp,
7123 * so try to enable msix with the requested number of fp's
7124 * and fallback to inta with one fp
7125 */
7126 if (req_irq) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007127 if (use_inta) {
7128 bp->num_queues = 1;
7129 } else {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007130 if ((use_multi > 1) && (use_multi <= 16))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007131 /* user requested number */
7132 bp->num_queues = use_multi;
7133 else if (use_multi == 1)
7134 bp->num_queues = num_online_cpus();
7135 else
7136 bp->num_queues = 1;
7137
7138 if (bnx2x_enable_msix(bp)) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007139 /* failed to enable msix */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007140 bp->num_queues = 1;
7141 if (use_multi)
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007142 BNX2X_ERR("Multi requested but failed"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007143 " to enable MSI-X\n");
7144 }
7145 }
7146 }
7147
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007148 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7149
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007150 if (bnx2x_alloc_mem(bp))
7151 return -ENOMEM;
7152
7153 if (req_irq) {
7154 if (bp->flags & USING_MSIX_FLAG) {
7155 if (bnx2x_req_msix_irqs(bp)) {
7156 pci_disable_msix(bp->pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08007157 goto load_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007158 }
7159
7160 } else {
7161 if (bnx2x_req_irq(bp)) {
7162 BNX2X_ERR("IRQ request failed, aborting\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08007163 goto load_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007164 }
7165 }
7166 }
7167
7168 for_each_queue(bp, i)
7169 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7170 bnx2x_poll, 128);
7171
7172
7173 /* Initialize HW */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007174 if (bnx2x_function_init(bp,
7175 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007176 BNX2X_ERR("HW init failed, aborting\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08007177 goto load_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007178 }
7179
7180
7181 atomic_set(&bp->intr_sem, 0);
7182
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007183
7184 /* Setup NIC internals and enable interrupts */
7185 bnx2x_nic_init(bp);
7186
7187 /* Send LOAD_DONE command to MCP */
7188 if (!nomcp) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007189 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7190 if (!load_code) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007191 BNX2X_ERR("MCP response failure, unloading\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08007192 goto load_int_disable;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007193 }
7194 }
7195
7196 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7197
7198 /* Enable Rx interrupt handling before sending the ramrod
7199 as it's completed on Rx FP queue */
7200 for_each_queue(bp, i)
7201 napi_enable(&bnx2x_fp(bp, i, napi));
7202
7203 if (bnx2x_setup_leading(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007204 goto load_stop_netif;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007205
7206 for_each_nondefault_queue(bp, i)
7207 if (bnx2x_setup_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007208 goto load_stop_netif;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007209
7210 bnx2x_set_mac_addr(bp);
7211
7212 bnx2x_phy_init(bp);
7213
7214 /* Start fast path */
7215 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7216 netif_start_queue(bp->dev);
7217 if (bp->flags & USING_MSIX_FLAG)
7218 printk(KERN_INFO PFX "%s: using MSI-X\n",
7219 bp->dev->name);
7220
7221 /* Otherwise Tx queue should be only reenabled */
7222 } else if (netif_running(bp->dev)) {
7223 netif_wake_queue(bp->dev);
7224 bnx2x_set_rx_mode(bp->dev);
7225 }
7226
7227 /* start the timer */
7228 mod_timer(&bp->timer, jiffies + bp->current_interval);
7229
7230 return 0;
7231
Eliezer Tamir228241e2008-02-28 11:56:57 -08007232load_stop_netif:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007233 for_each_queue(bp, i)
7234 napi_disable(&bnx2x_fp(bp, i, napi));
7235
Eliezer Tamir228241e2008-02-28 11:56:57 -08007236load_int_disable:
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08007237 bnx2x_int_disable_sync(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007238
7239 bnx2x_free_skbs(bp);
7240 bnx2x_free_irq(bp);
7241
Eliezer Tamir228241e2008-02-28 11:56:57 -08007242load_error:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007243 bnx2x_free_mem(bp);
7244
7245 /* TBD we really need to reset the chip
7246 if we want to recover from this */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007247 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007248}
7249
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007250
7251static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7252{
7253 int port = bp->port;
7254#ifdef USE_DMAE
7255 u32 wb_write[2];
7256#endif
7257 int base, i;
7258
7259 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7260
7261 /* Do not rcv packets to BRB */
7262 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7263 /* Do not direct rcv packets that are not for MCP to the BRB */
7264 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7265 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7266
7267 /* Configure IGU and AEU */
7268 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7269 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7270
7271 /* TODO: Close Doorbell port? */
7272
7273 /* Clear ILT */
7274#ifdef USE_DMAE
7275 wb_write[0] = 0;
7276 wb_write[1] = 0;
7277#endif
7278 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7279 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7280#ifdef USE_DMAE
7281 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7282#else
7283 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7284 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7285#endif
7286 }
7287
7288 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7289 /* reset_common */
7290 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7291 0xd3ffff7f);
7292 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7293 0x1403);
7294 }
7295}
7296
7297static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7298{
7299
7300 int rc;
7301
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007302 /* halt the connection */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007303 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7304 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7305
7306
7307 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7308 &(bp->fp[index].state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007309 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007310 return rc;
7311
7312 /* delete cfc entry */
7313 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7314
Eliezer Tamir49d66772008-02-28 11:53:13 -08007315 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007316 &(bp->fp[index].state), 1);
7317
7318}
7319
7320
7321static void bnx2x_stop_leading(struct bnx2x *bp)
7322{
Eliezer Tamir49d66772008-02-28 11:53:13 -08007323 u16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007324 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007325 this can take a lot of time */
7326 int timeout = 500;
7327
7328 might_sleep();
7329
7330 /* Send HALT ramrod */
7331 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7332 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7333
7334 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7335 &(bp->fp[0].state), 1))
7336 return;
7337
Eliezer Tamir49d66772008-02-28 11:53:13 -08007338 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007339
Eliezer Tamir228241e2008-02-28 11:56:57 -08007340 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007341 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7342
Eliezer Tamir49d66772008-02-28 11:53:13 -08007343 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007344 we are going to reset the chip anyway
7345 so there is not much to do if this times out
7346 */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007347 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
7348 timeout--;
7349 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350 }
Eliezer Tamir49d66772008-02-28 11:53:13 -08007351 if (!timeout) {
7352 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7353 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7354 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7355 }
7356 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7357 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007358}
7359
Eliezer Tamir49d66772008-02-28 11:53:13 -08007360
Eliezer Tamir228241e2008-02-28 11:56:57 -08007361static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007362{
7363 u32 reset_code = 0;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007364 int i, timeout;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007365
7366 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7367
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007368 del_timer_sync(&bp->timer);
7369
Eliezer Tamir228241e2008-02-28 11:56:57 -08007370 bp->rx_mode = BNX2X_RX_MODE_NONE;
7371 bnx2x_set_storm_rx_mode(bp);
7372
7373 if (netif_running(bp->dev)) {
7374 netif_tx_disable(bp->dev);
7375 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7376 }
7377
7378 /* Wait until all fast path tasks complete */
7379 for_each_queue(bp, i) {
7380 struct bnx2x_fastpath *fp = &bp->fp[i];
7381
7382 timeout = 1000;
7383 while (bnx2x_has_work(fp) && (timeout--))
7384 msleep(1);
7385 if (!timeout)
7386 BNX2X_ERR("timeout waiting for queue[%d]\n", i);
7387 }
7388
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007389 /* Wait until stat ramrod returns and all SP tasks complete */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007390 timeout = 1000;
7391 while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
7392 (timeout--))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007393 msleep(1);
7394
Eliezer Tamir228241e2008-02-28 11:56:57 -08007395 for_each_queue(bp, i)
7396 napi_disable(&bnx2x_fp(bp, i, napi));
7397 /* Disable interrupts after Tx and Rx are disabled on stack level */
7398 bnx2x_int_disable_sync(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007399
7400 if (bp->flags & NO_WOL_FLAG)
7401 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007403 else if (bp->wol) {
7404 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7405 u8 *mac_addr = bp->dev->dev_addr;
7406 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7407 EMAC_MODE_ACPI_RCVD);
7408
7409 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7410
7411 val = (mac_addr[0] << 8) | mac_addr[1];
7412 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7413
7414 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7415 (mac_addr[4] << 8) | mac_addr[5];
7416 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7417
7418 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007419
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007420 } else
7421 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7422
Eliezer Tamir228241e2008-02-28 11:56:57 -08007423 /* Close multi and leading connections */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007424 for_each_nondefault_queue(bp, i)
7425 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007426 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007427
7428 bnx2x_stop_leading(bp);
Eliezer Tamir228241e2008-02-28 11:56:57 -08007429 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
7430 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
7431 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
7432 "state 0x%x fp[0].state 0x%x",
7433 bp->state, bp->fp[0].state);
7434 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007435
Eliezer Tamir228241e2008-02-28 11:56:57 -08007436unload_error:
7437 bnx2x_link_reset(bp);
7438
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007439 if (!nomcp)
Eliezer Tamir228241e2008-02-28 11:56:57 -08007440 reset_code = bnx2x_fw_command(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007441 else
Eliezer Tamir228241e2008-02-28 11:56:57 -08007442 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007443
7444 /* Release IRQs */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007445 if (free_irq)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007446 bnx2x_free_irq(bp);
7447
7448 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007449 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007450
7451 /* Report UNLOAD_DONE to MCP */
7452 if (!nomcp)
7453 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7454
7455 /* Free SKBs and driver internals */
7456 bnx2x_free_skbs(bp);
7457 bnx2x_free_mem(bp);
7458
7459 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007460
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007461 netif_carrier_off(bp->dev);
7462
7463 return 0;
7464}
7465
7466/* end of nic load/unload */
7467
7468/* ethtool_ops */
7469
7470/*
7471 * Init service functions
7472 */
7473
7474static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7475{
7476 int port = bp->port;
7477 u32 ext_phy_type;
7478
7479 bp->phy_flags = 0;
7480
7481 switch (switch_cfg) {
7482 case SWITCH_CFG_1G:
7483 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7484
7485 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7486 switch (ext_phy_type) {
7487 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7488 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7489 ext_phy_type);
7490
7491 bp->supported |= (SUPPORTED_10baseT_Half |
7492 SUPPORTED_10baseT_Full |
7493 SUPPORTED_100baseT_Half |
7494 SUPPORTED_100baseT_Full |
7495 SUPPORTED_1000baseT_Full |
Eliezer Tamirf1410642008-02-28 11:51:50 -08007496 SUPPORTED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007497 SUPPORTED_TP | SUPPORTED_FIBRE |
7498 SUPPORTED_Autoneg |
7499 SUPPORTED_Pause |
7500 SUPPORTED_Asym_Pause);
7501 break;
7502
7503 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7504 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7505 ext_phy_type);
7506
7507 bp->phy_flags |= PHY_SGMII_FLAG;
7508
Eliezer Tamirf1410642008-02-28 11:51:50 -08007509 bp->supported |= (SUPPORTED_10baseT_Half |
7510 SUPPORTED_10baseT_Full |
7511 SUPPORTED_100baseT_Half |
7512 SUPPORTED_100baseT_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007513 SUPPORTED_1000baseT_Full |
7514 SUPPORTED_TP | SUPPORTED_FIBRE |
7515 SUPPORTED_Autoneg |
7516 SUPPORTED_Pause |
7517 SUPPORTED_Asym_Pause);
7518 break;
7519
7520 default:
7521 BNX2X_ERR("NVRAM config error. "
7522 "BAD SerDes ext_phy_config 0x%x\n",
7523 bp->ext_phy_config);
7524 return;
7525 }
7526
7527 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7528 port*0x10);
7529 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7530 break;
7531
7532 case SWITCH_CFG_10G:
7533 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7534
7535 bp->phy_flags |= PHY_XGXS_FLAG;
7536
7537 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7538 switch (ext_phy_type) {
7539 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7540 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7541 ext_phy_type);
7542
7543 bp->supported |= (SUPPORTED_10baseT_Half |
7544 SUPPORTED_10baseT_Full |
7545 SUPPORTED_100baseT_Half |
7546 SUPPORTED_100baseT_Full |
7547 SUPPORTED_1000baseT_Full |
Eliezer Tamirf1410642008-02-28 11:51:50 -08007548 SUPPORTED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007549 SUPPORTED_10000baseT_Full |
7550 SUPPORTED_TP | SUPPORTED_FIBRE |
7551 SUPPORTED_Autoneg |
7552 SUPPORTED_Pause |
7553 SUPPORTED_Asym_Pause);
7554 break;
7555
7556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007557 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7558 ext_phy_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007559
7560 bp->supported |= (SUPPORTED_10000baseT_Full |
7561 SUPPORTED_FIBRE |
7562 SUPPORTED_Pause |
7563 SUPPORTED_Asym_Pause);
7564 break;
7565
Eliezer Tamirf1410642008-02-28 11:51:50 -08007566 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7567 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7568 ext_phy_type);
7569
7570 bp->supported |= (SUPPORTED_10000baseT_Full |
7571 SUPPORTED_1000baseT_Full |
7572 SUPPORTED_Autoneg |
7573 SUPPORTED_FIBRE |
7574 SUPPORTED_Pause |
7575 SUPPORTED_Asym_Pause);
7576 break;
7577
7578 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7579 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7580 ext_phy_type);
7581
7582 bp->supported |= (SUPPORTED_10000baseT_Full |
7583 SUPPORTED_1000baseT_Full |
7584 SUPPORTED_FIBRE |
7585 SUPPORTED_Autoneg |
7586 SUPPORTED_Pause |
7587 SUPPORTED_Asym_Pause);
7588 break;
7589
7590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7591 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7592 ext_phy_type);
7593
7594 bp->supported |= (SUPPORTED_10000baseT_Full |
7595 SUPPORTED_TP |
7596 SUPPORTED_Autoneg |
7597 SUPPORTED_Pause |
7598 SUPPORTED_Asym_Pause);
7599 break;
7600
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007601 default:
7602 BNX2X_ERR("NVRAM config error. "
7603 "BAD XGXS ext_phy_config 0x%x\n",
7604 bp->ext_phy_config);
7605 return;
7606 }
7607
7608 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7609 port*0x18);
7610 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7611
7612 bp->ser_lane = ((bp->lane_config &
7613 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7614 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7615 bp->rx_lane_swap = ((bp->lane_config &
7616 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7617 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7618 bp->tx_lane_swap = ((bp->lane_config &
7619 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7620 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7621 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7622 bp->rx_lane_swap, bp->tx_lane_swap);
7623 break;
7624
7625 default:
7626 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7627 bp->link_config);
7628 return;
7629 }
7630
7631 /* mask what we support according to speed_cap_mask */
7632 if (!(bp->speed_cap_mask &
7633 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7634 bp->supported &= ~SUPPORTED_10baseT_Half;
7635
7636 if (!(bp->speed_cap_mask &
7637 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7638 bp->supported &= ~SUPPORTED_10baseT_Full;
7639
7640 if (!(bp->speed_cap_mask &
7641 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7642 bp->supported &= ~SUPPORTED_100baseT_Half;
7643
7644 if (!(bp->speed_cap_mask &
7645 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7646 bp->supported &= ~SUPPORTED_100baseT_Full;
7647
7648 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7649 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7650 SUPPORTED_1000baseT_Full);
7651
7652 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eliezer Tamirf1410642008-02-28 11:51:50 -08007653 bp->supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007654
7655 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7656 bp->supported &= ~SUPPORTED_10000baseT_Full;
7657
7658 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7659}
7660
7661static void bnx2x_link_settings_requested(struct bnx2x *bp)
7662{
7663 bp->req_autoneg = 0;
7664 bp->req_duplex = DUPLEX_FULL;
7665
7666 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7667 case PORT_FEATURE_LINK_SPEED_AUTO:
7668 if (bp->supported & SUPPORTED_Autoneg) {
7669 bp->req_autoneg |= AUTONEG_SPEED;
7670 bp->req_line_speed = 0;
7671 bp->advertising = bp->supported;
7672 } else {
Eliezer Tamirf1410642008-02-28 11:51:50 -08007673 if (XGXS_EXT_PHY_TYPE(bp) ==
7674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007675 /* force 10G, no AN */
7676 bp->req_line_speed = SPEED_10000;
7677 bp->advertising =
7678 (ADVERTISED_10000baseT_Full |
7679 ADVERTISED_FIBRE);
7680 break;
7681 }
7682 BNX2X_ERR("NVRAM config error. "
7683 "Invalid link_config 0x%x"
7684 " Autoneg not supported\n",
7685 bp->link_config);
7686 return;
7687 }
7688 break;
7689
7690 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007691 if (bp->supported & SUPPORTED_10baseT_Full) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007692 bp->req_line_speed = SPEED_10;
7693 bp->advertising = (ADVERTISED_10baseT_Full |
7694 ADVERTISED_TP);
7695 } else {
7696 BNX2X_ERR("NVRAM config error. "
7697 "Invalid link_config 0x%x"
7698 " speed_cap_mask 0x%x\n",
7699 bp->link_config, bp->speed_cap_mask);
7700 return;
7701 }
7702 break;
7703
7704 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007705 if (bp->supported & SUPPORTED_10baseT_Half) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007706 bp->req_line_speed = SPEED_10;
7707 bp->req_duplex = DUPLEX_HALF;
7708 bp->advertising = (ADVERTISED_10baseT_Half |
7709 ADVERTISED_TP);
7710 } else {
7711 BNX2X_ERR("NVRAM config error. "
7712 "Invalid link_config 0x%x"
7713 " speed_cap_mask 0x%x\n",
7714 bp->link_config, bp->speed_cap_mask);
7715 return;
7716 }
7717 break;
7718
7719 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007720 if (bp->supported & SUPPORTED_100baseT_Full) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007721 bp->req_line_speed = SPEED_100;
7722 bp->advertising = (ADVERTISED_100baseT_Full |
7723 ADVERTISED_TP);
7724 } else {
7725 BNX2X_ERR("NVRAM config error. "
7726 "Invalid link_config 0x%x"
7727 " speed_cap_mask 0x%x\n",
7728 bp->link_config, bp->speed_cap_mask);
7729 return;
7730 }
7731 break;
7732
7733 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007734 if (bp->supported & SUPPORTED_100baseT_Half) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007735 bp->req_line_speed = SPEED_100;
7736 bp->req_duplex = DUPLEX_HALF;
7737 bp->advertising = (ADVERTISED_100baseT_Half |
7738 ADVERTISED_TP);
7739 } else {
7740 BNX2X_ERR("NVRAM config error. "
7741 "Invalid link_config 0x%x"
7742 " speed_cap_mask 0x%x\n",
7743 bp->link_config, bp->speed_cap_mask);
7744 return;
7745 }
7746 break;
7747
7748 case PORT_FEATURE_LINK_SPEED_1G:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007749 if (bp->supported & SUPPORTED_1000baseT_Full) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007750 bp->req_line_speed = SPEED_1000;
7751 bp->advertising = (ADVERTISED_1000baseT_Full |
7752 ADVERTISED_TP);
7753 } else {
7754 BNX2X_ERR("NVRAM config error. "
7755 "Invalid link_config 0x%x"
7756 " speed_cap_mask 0x%x\n",
7757 bp->link_config, bp->speed_cap_mask);
7758 return;
7759 }
7760 break;
7761
7762 case PORT_FEATURE_LINK_SPEED_2_5G:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007763 if (bp->supported & SUPPORTED_2500baseX_Full) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007764 bp->req_line_speed = SPEED_2500;
Eliezer Tamirf1410642008-02-28 11:51:50 -08007765 bp->advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007766 ADVERTISED_TP);
7767 } else {
7768 BNX2X_ERR("NVRAM config error. "
7769 "Invalid link_config 0x%x"
7770 " speed_cap_mask 0x%x\n",
7771 bp->link_config, bp->speed_cap_mask);
7772 return;
7773 }
7774 break;
7775
7776 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7777 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7778 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eliezer Tamirf1410642008-02-28 11:51:50 -08007779 if (bp->supported & SUPPORTED_10000baseT_Full) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007780 bp->req_line_speed = SPEED_10000;
7781 bp->advertising = (ADVERTISED_10000baseT_Full |
7782 ADVERTISED_FIBRE);
7783 } else {
7784 BNX2X_ERR("NVRAM config error. "
7785 "Invalid link_config 0x%x"
7786 " speed_cap_mask 0x%x\n",
7787 bp->link_config, bp->speed_cap_mask);
7788 return;
7789 }
7790 break;
7791
7792 default:
7793 BNX2X_ERR("NVRAM config error. "
7794 "BAD link speed link_config 0x%x\n",
7795 bp->link_config);
7796 bp->req_autoneg |= AUTONEG_SPEED;
7797 bp->req_line_speed = 0;
7798 bp->advertising = bp->supported;
7799 break;
7800 }
7801 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7802 bp->req_line_speed, bp->req_duplex);
7803
7804 bp->req_flow_ctrl = (bp->link_config &
7805 PORT_FEATURE_FLOW_CONTROL_MASK);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007806 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7807 (bp->supported & SUPPORTED_Autoneg))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007808 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007809
Eliezer Tamirf1410642008-02-28 11:51:50 -08007810 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7811 " advertising 0x%x\n",
7812 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007813}
7814
7815static void bnx2x_get_hwinfo(struct bnx2x *bp)
7816{
7817 u32 val, val2, val3, val4, id;
7818 int port = bp->port;
7819 u32 switch_cfg;
7820
7821 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7822 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7823
7824 /* Get the chip revision id and number. */
7825 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7826 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7827 id = ((val & 0xffff) << 16);
7828 val = REG_RD(bp, MISC_REG_CHIP_REV);
7829 id |= ((val & 0xf) << 12);
7830 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7831 id |= ((val & 0xff) << 4);
7832 REG_RD(bp, MISC_REG_BOND_ID);
7833 id |= (val & 0xf);
7834 bp->chip_id = id;
7835 BNX2X_DEV_INFO("chip ID is %x\n", id);
7836
7837 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7838 BNX2X_DEV_INFO("MCP not active\n");
7839 nomcp = 1;
7840 goto set_mac;
7841 }
7842
7843 val = SHMEM_RD(bp, validity_map[port]);
7844 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Eliezer Tamirf1410642008-02-28 11:51:50 -08007845 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7846 BNX2X_ERR("BAD MCP validity signature\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007847
Eliezer Tamirf1410642008-02-28 11:51:50 -08007848 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007849 DRV_MSG_SEQ_NUMBER_MASK);
7850
7851 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007852 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007853 bp->serdes_config =
Eliezer Tamirf1410642008-02-28 11:51:50 -08007854 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007855 bp->lane_config =
7856 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7857 bp->ext_phy_config =
7858 SHMEM_RD(bp,
7859 dev_info.port_hw_config[port].external_phy_config);
7860 bp->speed_cap_mask =
7861 SHMEM_RD(bp,
7862 dev_info.port_hw_config[port].speed_capability_mask);
7863
7864 bp->link_config =
7865 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7866
Eliezer Tamirf1410642008-02-28 11:51:50 -08007867 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007868 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7869 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7870 " fw_seq (%08x)\n",
Eliezer Tamirf1410642008-02-28 11:51:50 -08007871 bp->hw_config, bp->board, bp->serdes_config,
7872 bp->lane_config, bp->ext_phy_config,
7873 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007874
7875 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7876 bnx2x_link_settings_supported(bp, switch_cfg);
7877
7878 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7879 /* for now disable cl73 */
7880 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7881 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7882
7883 bnx2x_link_settings_requested(bp);
7884
7885 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7886 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7887 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7888 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7889 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7890 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7891 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7892 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7893
7894 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7895
7896
7897 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7898 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7899 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7900 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7901
7902 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7903 val, val2, val3, val4);
7904
7905 /* bc ver */
7906 if (!nomcp) {
7907 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7908 BNX2X_DEV_INFO("bc_ver %X\n", val);
7909 if (val < BNX2X_BC_VER) {
7910 /* for now only warn
7911 * later we might need to enforce this */
7912 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7913 " please upgrade BC\n", BNX2X_BC_VER, val);
7914 }
7915 } else {
7916 bp->bc_ver = 0;
7917 }
7918
7919 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7920 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7921 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7922 bp->flash_size, bp->flash_size);
7923
7924 return;
7925
7926set_mac: /* only supposed to happen on emulation/FPGA */
Eliezer Tamirf1410642008-02-28 11:51:50 -08007927 BNX2X_ERR("warning rendom MAC workaround active\n");
7928 random_ether_addr(bp->dev->dev_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007929 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7930
7931}
7932
7933/*
7934 * ethtool service functions
7935 */
7936
7937/* All ethtool functions called with rtnl_lock */
7938
7939static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7940{
7941 struct bnx2x *bp = netdev_priv(dev);
7942
7943 cmd->supported = bp->supported;
7944 cmd->advertising = bp->advertising;
7945
7946 if (netif_carrier_ok(dev)) {
7947 cmd->speed = bp->line_speed;
7948 cmd->duplex = bp->duplex;
7949 } else {
7950 cmd->speed = bp->req_line_speed;
7951 cmd->duplex = bp->req_duplex;
7952 }
7953
7954 if (bp->phy_flags & PHY_XGXS_FLAG) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08007955 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7956
7957 switch (ext_phy_type) {
7958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7961 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7962 cmd->port = PORT_FIBRE;
7963 break;
7964
7965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7966 cmd->port = PORT_TP;
7967 break;
7968
7969 default:
7970 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7971 bp->ext_phy_config);
7972 }
7973 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007974 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007975
7976 cmd->phy_address = bp->phy_addr;
7977 cmd->transceiver = XCVR_INTERNAL;
7978
Eliezer Tamirf1410642008-02-28 11:51:50 -08007979 if (bp->req_autoneg & AUTONEG_SPEED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007980 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08007981 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007982 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007983
7984 cmd->maxtxpkt = 0;
7985 cmd->maxrxpkt = 0;
7986
7987 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7988 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7989 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7990 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7991 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7992 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7993 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7994
7995 return 0;
7996}
7997
7998static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7999{
8000 struct bnx2x *bp = netdev_priv(dev);
8001 u32 advertising;
8002
8003 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8004 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8005 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8006 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8007 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8008 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8009 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8010
8011 switch (cmd->port) {
8012 case PORT_TP:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008013 if (!(bp->supported & SUPPORTED_TP)) {
8014 DP(NETIF_MSG_LINK, "TP not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008015 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008016 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008017
8018 if (bp->phy_flags & PHY_XGXS_FLAG) {
8019 bnx2x_link_reset(bp);
8020 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
8021 bnx2x_phy_deassert(bp);
8022 }
8023 break;
8024
8025 case PORT_FIBRE:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008026 if (!(bp->supported & SUPPORTED_FIBRE)) {
8027 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008028 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008029 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008030
8031 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
8032 bnx2x_link_reset(bp);
8033 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
8034 bnx2x_phy_deassert(bp);
8035 }
8036 break;
8037
8038 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008039 DP(NETIF_MSG_LINK, "Unknown port type\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008040 return -EINVAL;
8041 }
8042
8043 if (cmd->autoneg == AUTONEG_ENABLE) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008044 if (!(bp->supported & SUPPORTED_Autoneg)) {
8045 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008046 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008047 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008048
8049 /* advertise the requested speed and duplex if supported */
8050 cmd->advertising &= bp->supported;
8051
8052 bp->req_autoneg |= AUTONEG_SPEED;
8053 bp->req_line_speed = 0;
8054 bp->req_duplex = DUPLEX_FULL;
8055 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
8056
8057 } else { /* forced speed */
8058 /* advertise the requested speed and duplex if supported */
8059 switch (cmd->speed) {
8060 case SPEED_10:
8061 if (cmd->duplex == DUPLEX_FULL) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008062 if (!(bp->supported &
8063 SUPPORTED_10baseT_Full)) {
8064 DP(NETIF_MSG_LINK,
8065 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008066 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008067 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008068
8069 advertising = (ADVERTISED_10baseT_Full |
8070 ADVERTISED_TP);
8071 } else {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008072 if (!(bp->supported &
8073 SUPPORTED_10baseT_Half)) {
8074 DP(NETIF_MSG_LINK,
8075 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008076 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008077 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008078
8079 advertising = (ADVERTISED_10baseT_Half |
8080 ADVERTISED_TP);
8081 }
8082 break;
8083
8084 case SPEED_100:
8085 if (cmd->duplex == DUPLEX_FULL) {
8086 if (!(bp->supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008087 SUPPORTED_100baseT_Full)) {
8088 DP(NETIF_MSG_LINK,
8089 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008090 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008091 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008092
8093 advertising = (ADVERTISED_100baseT_Full |
8094 ADVERTISED_TP);
8095 } else {
8096 if (!(bp->supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008097 SUPPORTED_100baseT_Half)) {
8098 DP(NETIF_MSG_LINK,
8099 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008100 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008101 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008102
8103 advertising = (ADVERTISED_100baseT_Half |
8104 ADVERTISED_TP);
8105 }
8106 break;
8107
8108 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008109 if (cmd->duplex != DUPLEX_FULL) {
8110 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008111 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008112 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008113
Eliezer Tamirf1410642008-02-28 11:51:50 -08008114 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8115 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008116 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008117 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008118
8119 advertising = (ADVERTISED_1000baseT_Full |
8120 ADVERTISED_TP);
8121 break;
8122
8123 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008124 if (cmd->duplex != DUPLEX_FULL) {
8125 DP(NETIF_MSG_LINK,
8126 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008127 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008128 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008129
Eliezer Tamirf1410642008-02-28 11:51:50 -08008130 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8131 DP(NETIF_MSG_LINK,
8132 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008133 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008134 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008135
Eliezer Tamirf1410642008-02-28 11:51:50 -08008136 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008137 ADVERTISED_TP);
8138 break;
8139
8140 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008141 if (cmd->duplex != DUPLEX_FULL) {
8142 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008143 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008144 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008145
Eliezer Tamirf1410642008-02-28 11:51:50 -08008146 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8147 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008148 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008149 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008150
8151 advertising = (ADVERTISED_10000baseT_Full |
8152 ADVERTISED_FIBRE);
8153 break;
8154
8155 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008156 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008157 return -EINVAL;
8158 }
8159
8160 bp->req_autoneg &= ~AUTONEG_SPEED;
8161 bp->req_line_speed = cmd->speed;
8162 bp->req_duplex = cmd->duplex;
8163 bp->advertising = advertising;
8164 }
8165
8166 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8167 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8168 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8169 bp->advertising);
8170
8171 bnx2x_stop_stats(bp);
8172 bnx2x_link_initialize(bp);
8173
8174 return 0;
8175}
8176
8177static void bnx2x_get_drvinfo(struct net_device *dev,
8178 struct ethtool_drvinfo *info)
8179{
8180 struct bnx2x *bp = netdev_priv(dev);
8181
8182 strcpy(info->driver, DRV_MODULE_NAME);
8183 strcpy(info->version, DRV_MODULE_VERSION);
8184 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8185 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8186 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8187 bp->bc_ver);
8188 strcpy(info->bus_info, pci_name(bp->pdev));
8189 info->n_stats = BNX2X_NUM_STATS;
8190 info->testinfo_len = BNX2X_NUM_TESTS;
8191 info->eedump_len = bp->flash_size;
8192 info->regdump_len = 0;
8193}
8194
8195static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8196{
8197 struct bnx2x *bp = netdev_priv(dev);
8198
8199 if (bp->flags & NO_WOL_FLAG) {
8200 wol->supported = 0;
8201 wol->wolopts = 0;
8202 } else {
8203 wol->supported = WAKE_MAGIC;
8204 if (bp->wol)
8205 wol->wolopts = WAKE_MAGIC;
8206 else
8207 wol->wolopts = 0;
8208 }
8209 memset(&wol->sopass, 0, sizeof(wol->sopass));
8210}
8211
8212static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8213{
8214 struct bnx2x *bp = netdev_priv(dev);
8215
8216 if (wol->wolopts & ~WAKE_MAGIC)
8217 return -EINVAL;
8218
8219 if (wol->wolopts & WAKE_MAGIC) {
8220 if (bp->flags & NO_WOL_FLAG)
8221 return -EINVAL;
8222
8223 bp->wol = 1;
8224 } else {
8225 bp->wol = 0;
8226 }
8227 return 0;
8228}
8229
8230static u32 bnx2x_get_msglevel(struct net_device *dev)
8231{
8232 struct bnx2x *bp = netdev_priv(dev);
8233
8234 return bp->msglevel;
8235}
8236
8237static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8238{
8239 struct bnx2x *bp = netdev_priv(dev);
8240
8241 if (capable(CAP_NET_ADMIN))
8242 bp->msglevel = level;
8243}
8244
8245static int bnx2x_nway_reset(struct net_device *dev)
8246{
8247 struct bnx2x *bp = netdev_priv(dev);
8248
8249 if (bp->state != BNX2X_STATE_OPEN) {
8250 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8251 return -EAGAIN;
8252 }
8253
8254 bnx2x_stop_stats(bp);
8255 bnx2x_link_initialize(bp);
8256
8257 return 0;
8258}
8259
8260static int bnx2x_get_eeprom_len(struct net_device *dev)
8261{
8262 struct bnx2x *bp = netdev_priv(dev);
8263
8264 return bp->flash_size;
8265}
8266
8267static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8268{
8269 int port = bp->port;
8270 int count, i;
8271 u32 val = 0;
8272
8273 /* adjust timeout for emulation/FPGA */
8274 count = NVRAM_TIMEOUT_COUNT;
8275 if (CHIP_REV_IS_SLOW(bp))
8276 count *= 100;
8277
8278 /* request access to nvram interface */
8279 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8280 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8281
8282 for (i = 0; i < count*10; i++) {
8283 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8284 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8285 break;
8286
8287 udelay(5);
8288 }
8289
8290 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8291 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8292 return -EBUSY;
8293 }
8294
8295 return 0;
8296}
8297
8298static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8299{
8300 int port = bp->port;
8301 int count, i;
8302 u32 val = 0;
8303
8304 /* adjust timeout for emulation/FPGA */
8305 count = NVRAM_TIMEOUT_COUNT;
8306 if (CHIP_REV_IS_SLOW(bp))
8307 count *= 100;
8308
8309 /* relinquish nvram interface */
8310 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8311 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8312
8313 for (i = 0; i < count*10; i++) {
8314 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8315 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8316 break;
8317
8318 udelay(5);
8319 }
8320
8321 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8322 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8323 return -EBUSY;
8324 }
8325
8326 return 0;
8327}
8328
8329static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8330{
8331 u32 val;
8332
8333 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8334
8335 /* enable both bits, even on read */
8336 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8337 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8338 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8339}
8340
8341static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8342{
8343 u32 val;
8344
8345 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8346
8347 /* disable both bits, even after read */
8348 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8349 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8350 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8351}
8352
8353static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8354 u32 cmd_flags)
8355{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008356 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008357 u32 val;
8358
8359 /* build the command word */
8360 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8361
8362 /* need to clear DONE bit separately */
8363 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8364
8365 /* address of the NVRAM to read from */
8366 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8367 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8368
8369 /* issue a read command */
8370 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8371
8372 /* adjust timeout for emulation/FPGA */
8373 count = NVRAM_TIMEOUT_COUNT;
8374 if (CHIP_REV_IS_SLOW(bp))
8375 count *= 100;
8376
8377 /* wait for completion */
8378 *ret_val = 0;
8379 rc = -EBUSY;
8380 for (i = 0; i < count; i++) {
8381 udelay(5);
8382 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8383
8384 if (val & MCPR_NVM_COMMAND_DONE) {
8385 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8386 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8387 /* we read nvram data in cpu order
8388 * but ethtool sees it as an array of bytes
8389 * converting to big-endian will do the work */
8390 val = cpu_to_be32(val);
8391 *ret_val = val;
8392 rc = 0;
8393 break;
8394 }
8395 }
8396
8397 return rc;
8398}
8399
8400static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8401 int buf_size)
8402{
8403 int rc;
8404 u32 cmd_flags;
8405 u32 val;
8406
8407 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8408 DP(NETIF_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008409 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008410 offset, buf_size);
8411 return -EINVAL;
8412 }
8413
8414 if (offset + buf_size > bp->flash_size) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008415 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008416 " buf_size (0x%x) > flash_size (0x%x)\n",
8417 offset, buf_size, bp->flash_size);
8418 return -EINVAL;
8419 }
8420
8421 /* request access to nvram interface */
8422 rc = bnx2x_acquire_nvram_lock(bp);
8423 if (rc)
8424 return rc;
8425
8426 /* enable access to nvram interface */
8427 bnx2x_enable_nvram_access(bp);
8428
8429 /* read the first word(s) */
8430 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8431 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8432 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8433 memcpy(ret_buf, &val, 4);
8434
8435 /* advance to the next dword */
8436 offset += sizeof(u32);
8437 ret_buf += sizeof(u32);
8438 buf_size -= sizeof(u32);
8439 cmd_flags = 0;
8440 }
8441
8442 if (rc == 0) {
8443 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8444 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8445 memcpy(ret_buf, &val, 4);
8446 }
8447
8448 /* disable access to nvram interface */
8449 bnx2x_disable_nvram_access(bp);
8450 bnx2x_release_nvram_lock(bp);
8451
8452 return rc;
8453}
8454
8455static int bnx2x_get_eeprom(struct net_device *dev,
8456 struct ethtool_eeprom *eeprom, u8 *eebuf)
8457{
8458 struct bnx2x *bp = netdev_priv(dev);
8459 int rc;
8460
8461 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8462 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8463 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8464 eeprom->len, eeprom->len);
8465
8466 /* parameters already validated in ethtool_get_eeprom */
8467
8468 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8469
8470 return rc;
8471}
8472
8473static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8474 u32 cmd_flags)
8475{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008476 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008477
8478 /* build the command word */
8479 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8480
8481 /* need to clear DONE bit separately */
8482 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8483
8484 /* write the data */
8485 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8486
8487 /* address of the NVRAM to write to */
8488 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8489 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8490
8491 /* issue the write command */
8492 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8493
8494 /* adjust timeout for emulation/FPGA */
8495 count = NVRAM_TIMEOUT_COUNT;
8496 if (CHIP_REV_IS_SLOW(bp))
8497 count *= 100;
8498
8499 /* wait for completion */
8500 rc = -EBUSY;
8501 for (i = 0; i < count; i++) {
8502 udelay(5);
8503 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8504 if (val & MCPR_NVM_COMMAND_DONE) {
8505 rc = 0;
8506 break;
8507 }
8508 }
8509
8510 return rc;
8511}
8512
Eliezer Tamirf1410642008-02-28 11:51:50 -08008513#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008514
8515static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8516 int buf_size)
8517{
8518 int rc;
8519 u32 cmd_flags;
8520 u32 align_offset;
8521 u32 val;
8522
8523 if (offset + buf_size > bp->flash_size) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008524 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008525 " buf_size (0x%x) > flash_size (0x%x)\n",
8526 offset, buf_size, bp->flash_size);
8527 return -EINVAL;
8528 }
8529
8530 /* request access to nvram interface */
8531 rc = bnx2x_acquire_nvram_lock(bp);
8532 if (rc)
8533 return rc;
8534
8535 /* enable access to nvram interface */
8536 bnx2x_enable_nvram_access(bp);
8537
8538 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8539 align_offset = (offset & ~0x03);
8540 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8541
8542 if (rc == 0) {
8543 val &= ~(0xff << BYTE_OFFSET(offset));
8544 val |= (*data_buf << BYTE_OFFSET(offset));
8545
8546 /* nvram data is returned as an array of bytes
8547 * convert it back to cpu order */
8548 val = be32_to_cpu(val);
8549
8550 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8551
8552 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8553 cmd_flags);
8554 }
8555
8556 /* disable access to nvram interface */
8557 bnx2x_disable_nvram_access(bp);
8558 bnx2x_release_nvram_lock(bp);
8559
8560 return rc;
8561}
8562
8563static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8564 int buf_size)
8565{
8566 int rc;
8567 u32 cmd_flags;
8568 u32 val;
8569 u32 written_so_far;
8570
8571 if (buf_size == 1) { /* ethtool */
8572 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8573 }
8574
8575 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8576 DP(NETIF_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008577 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008578 offset, buf_size);
8579 return -EINVAL;
8580 }
8581
8582 if (offset + buf_size > bp->flash_size) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008583 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008584 " buf_size (0x%x) > flash_size (0x%x)\n",
8585 offset, buf_size, bp->flash_size);
8586 return -EINVAL;
8587 }
8588
8589 /* request access to nvram interface */
8590 rc = bnx2x_acquire_nvram_lock(bp);
8591 if (rc)
8592 return rc;
8593
8594 /* enable access to nvram interface */
8595 bnx2x_enable_nvram_access(bp);
8596
8597 written_so_far = 0;
8598 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8599 while ((written_so_far < buf_size) && (rc == 0)) {
8600 if (written_so_far == (buf_size - sizeof(u32)))
8601 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8602 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8603 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8604 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8605 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8606
8607 memcpy(&val, data_buf, 4);
8608 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8609
8610 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8611
8612 /* advance to the next dword */
8613 offset += sizeof(u32);
8614 data_buf += sizeof(u32);
8615 written_so_far += sizeof(u32);
8616 cmd_flags = 0;
8617 }
8618
8619 /* disable access to nvram interface */
8620 bnx2x_disable_nvram_access(bp);
8621 bnx2x_release_nvram_lock(bp);
8622
8623 return rc;
8624}
8625
8626static int bnx2x_set_eeprom(struct net_device *dev,
8627 struct ethtool_eeprom *eeprom, u8 *eebuf)
8628{
8629 struct bnx2x *bp = netdev_priv(dev);
8630 int rc;
8631
8632 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8633 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8634 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8635 eeprom->len, eeprom->len);
8636
8637 /* parameters already validated in ethtool_set_eeprom */
8638
8639 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8640
8641 return rc;
8642}
8643
8644static int bnx2x_get_coalesce(struct net_device *dev,
8645 struct ethtool_coalesce *coal)
8646{
8647 struct bnx2x *bp = netdev_priv(dev);
8648
8649 memset(coal, 0, sizeof(struct ethtool_coalesce));
8650
8651 coal->rx_coalesce_usecs = bp->rx_ticks;
8652 coal->tx_coalesce_usecs = bp->tx_ticks;
8653 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8654
8655 return 0;
8656}
8657
8658static int bnx2x_set_coalesce(struct net_device *dev,
8659 struct ethtool_coalesce *coal)
8660{
8661 struct bnx2x *bp = netdev_priv(dev);
8662
8663 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8664 if (bp->rx_ticks > 3000)
8665 bp->rx_ticks = 3000;
8666
8667 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8668 if (bp->tx_ticks > 0x3000)
8669 bp->tx_ticks = 0x3000;
8670
8671 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8672 if (bp->stats_ticks > 0xffff00)
8673 bp->stats_ticks = 0xffff00;
8674 bp->stats_ticks &= 0xffff00;
8675
8676 if (netif_running(bp->dev))
8677 bnx2x_update_coalesce(bp);
8678
8679 return 0;
8680}
8681
8682static void bnx2x_get_ringparam(struct net_device *dev,
8683 struct ethtool_ringparam *ering)
8684{
8685 struct bnx2x *bp = netdev_priv(dev);
8686
8687 ering->rx_max_pending = MAX_RX_AVAIL;
8688 ering->rx_mini_max_pending = 0;
8689 ering->rx_jumbo_max_pending = 0;
8690
8691 ering->rx_pending = bp->rx_ring_size;
8692 ering->rx_mini_pending = 0;
8693 ering->rx_jumbo_pending = 0;
8694
8695 ering->tx_max_pending = MAX_TX_AVAIL;
8696 ering->tx_pending = bp->tx_ring_size;
8697}
8698
8699static int bnx2x_set_ringparam(struct net_device *dev,
8700 struct ethtool_ringparam *ering)
8701{
8702 struct bnx2x *bp = netdev_priv(dev);
8703
8704 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8705 (ering->tx_pending > MAX_TX_AVAIL) ||
8706 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8707 return -EINVAL;
8708
8709 bp->rx_ring_size = ering->rx_pending;
8710 bp->tx_ring_size = ering->tx_pending;
8711
8712 if (netif_running(bp->dev)) {
8713 bnx2x_nic_unload(bp, 0);
8714 bnx2x_nic_load(bp, 0);
8715 }
8716
8717 return 0;
8718}
8719
8720static void bnx2x_get_pauseparam(struct net_device *dev,
8721 struct ethtool_pauseparam *epause)
8722{
8723 struct bnx2x *bp = netdev_priv(dev);
8724
8725 epause->autoneg =
8726 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8727 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8728 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8729
8730 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8731 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8732 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8733}
8734
8735static int bnx2x_set_pauseparam(struct net_device *dev,
8736 struct ethtool_pauseparam *epause)
8737{
8738 struct bnx2x *bp = netdev_priv(dev);
8739
8740 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8741 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8742 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8743
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008744 if (epause->autoneg) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008745 if (!(bp->supported & SUPPORTED_Autoneg)) {
8746 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8747 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008748 }
8749
Eliezer Tamirf1410642008-02-28 11:51:50 -08008750 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8751 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008752 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8753
Eliezer Tamirf1410642008-02-28 11:51:50 -08008754 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008755
Eliezer Tamirf1410642008-02-28 11:51:50 -08008756 if (epause->rx_pause)
8757 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8758 if (epause->tx_pause)
8759 bp->req_flow_ctrl |= FLOW_CTRL_TX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008760
Eliezer Tamirf1410642008-02-28 11:51:50 -08008761 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8762 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8763 bp->req_flow_ctrl = FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008764
Eliezer Tamirf1410642008-02-28 11:51:50 -08008765 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8766 bp->req_autoneg, bp->req_flow_ctrl);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008767
8768 bnx2x_stop_stats(bp);
8769 bnx2x_link_initialize(bp);
8770
8771 return 0;
8772}
8773
8774static u32 bnx2x_get_rx_csum(struct net_device *dev)
8775{
8776 struct bnx2x *bp = netdev_priv(dev);
8777
8778 return bp->rx_csum;
8779}
8780
8781static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8782{
8783 struct bnx2x *bp = netdev_priv(dev);
8784
8785 bp->rx_csum = data;
8786 return 0;
8787}
8788
8789static int bnx2x_set_tso(struct net_device *dev, u32 data)
8790{
8791 if (data)
8792 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8793 else
8794 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8795 return 0;
8796}
8797
8798static struct {
8799 char string[ETH_GSTRING_LEN];
8800} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8801 { "MC Errors (online)" }
8802};
8803
8804static int bnx2x_self_test_count(struct net_device *dev)
8805{
8806 return BNX2X_NUM_TESTS;
8807}
8808
8809static void bnx2x_self_test(struct net_device *dev,
8810 struct ethtool_test *etest, u64 *buf)
8811{
8812 struct bnx2x *bp = netdev_priv(dev);
8813 int stats_state;
8814
8815 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8816
8817 if (bp->state != BNX2X_STATE_OPEN) {
8818 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8819 return;
8820 }
8821
8822 stats_state = bp->stats_state;
8823 bnx2x_stop_stats(bp);
8824
8825 if (bnx2x_mc_assert(bp) != 0) {
8826 buf[0] = 1;
8827 etest->flags |= ETH_TEST_FL_FAILED;
8828 }
8829
8830#ifdef BNX2X_EXTRA_DEBUG
8831 bnx2x_panic_dump(bp);
8832#endif
8833 bp->stats_state = stats_state;
8834}
8835
8836static struct {
8837 char string[ETH_GSTRING_LEN];
8838} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
Eliezer Tamir0e39e642008-02-28 11:54:03 -08008839 { "rx_bytes"},
8840 { "rx_error_bytes"},
8841 { "tx_bytes"},
8842 { "tx_error_bytes"},
8843 { "rx_ucast_packets"},
8844 { "rx_mcast_packets"},
8845 { "rx_bcast_packets"},
8846 { "tx_ucast_packets"},
8847 { "tx_mcast_packets"},
8848 { "tx_bcast_packets"},
8849 { "tx_mac_errors"}, /* 10 */
8850 { "tx_carrier_errors"},
8851 { "rx_crc_errors"},
8852 { "rx_align_errors"},
8853 { "tx_single_collisions"},
8854 { "tx_multi_collisions"},
8855 { "tx_deferred"},
8856 { "tx_excess_collisions"},
8857 { "tx_late_collisions"},
8858 { "tx_total_collisions"},
8859 { "rx_fragments"}, /* 20 */
8860 { "rx_jabbers"},
8861 { "rx_undersize_packets"},
8862 { "rx_oversize_packets"},
8863 { "rx_xon_frames"},
8864 { "rx_xoff_frames"},
8865 { "tx_xon_frames"},
8866 { "tx_xoff_frames"},
8867 { "rx_mac_ctrl_frames"},
8868 { "rx_filtered_packets"},
8869 { "rx_discards"}, /* 30 */
8870 { "brb_discard"},
8871 { "brb_truncate"},
8872 { "xxoverflow"}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008873};
8874
8875#define STATS_OFFSET32(offset_name) \
8876 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8877
8878static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
Eliezer Tamir0e39e642008-02-28 11:54:03 -08008879 STATS_OFFSET32(total_bytes_received_hi),
8880 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
8881 STATS_OFFSET32(total_bytes_transmitted_hi),
8882 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
8883 STATS_OFFSET32(total_unicast_packets_received_hi),
8884 STATS_OFFSET32(total_multicast_packets_received_hi),
8885 STATS_OFFSET32(total_broadcast_packets_received_hi),
8886 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8887 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8888 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8889 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8890 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
8891 STATS_OFFSET32(crc_receive_errors),
8892 STATS_OFFSET32(alignment_errors),
8893 STATS_OFFSET32(single_collision_transmit_frames),
8894 STATS_OFFSET32(multiple_collision_transmit_frames),
8895 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
8896 STATS_OFFSET32(excessive_collision_frames),
8897 STATS_OFFSET32(late_collision_frames),
8898 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
8899 STATS_OFFSET32(runt_packets_received), /* 20 */
8900 STATS_OFFSET32(jabber_packets_received),
8901 STATS_OFFSET32(error_runt_packets_received),
8902 STATS_OFFSET32(error_jabber_packets_received),
8903 STATS_OFFSET32(pause_xon_frames_received),
8904 STATS_OFFSET32(pause_xoff_frames_received),
8905 STATS_OFFSET32(pause_xon_frames_transmitted),
8906 STATS_OFFSET32(pause_xoff_frames_transmitted),
8907 STATS_OFFSET32(control_frames_received),
8908 STATS_OFFSET32(mac_filter_discard),
8909 STATS_OFFSET32(no_buff_discard), /* 30 */
8910 STATS_OFFSET32(brb_discard),
8911 STATS_OFFSET32(brb_truncate_discard),
8912 STATS_OFFSET32(xxoverflow_discard)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008913};
8914
8915static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8916 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8917 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8918 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
Eliezer Tamir0e39e642008-02-28 11:54:03 -08008919 4, 4, 4, 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008920};
8921
8922static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8923{
8924 switch (stringset) {
8925 case ETH_SS_STATS:
8926 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8927 break;
8928
8929 case ETH_SS_TEST:
8930 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8931 break;
8932 }
8933}
8934
8935static int bnx2x_get_stats_count(struct net_device *dev)
8936{
8937 return BNX2X_NUM_STATS;
8938}
8939
8940static void bnx2x_get_ethtool_stats(struct net_device *dev,
8941 struct ethtool_stats *stats, u64 *buf)
8942{
8943 struct bnx2x *bp = netdev_priv(dev);
8944 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8945 int i;
8946
8947 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8948 if (bnx2x_stats_len_arr[i] == 0) {
8949 /* skip this counter */
8950 buf[i] = 0;
8951 continue;
8952 }
8953 if (!hw_stats) {
8954 buf[i] = 0;
8955 continue;
8956 }
8957 if (bnx2x_stats_len_arr[i] == 4) {
8958 /* 4-byte counter */
8959 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8960 continue;
8961 }
8962 /* 8-byte counter */
8963 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8964 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8965 }
8966}
8967
8968static int bnx2x_phys_id(struct net_device *dev, u32 data)
8969{
8970 struct bnx2x *bp = netdev_priv(dev);
8971 int i;
8972
8973 if (data == 0)
8974 data = 2;
8975
8976 for (i = 0; i < (data * 2); i++) {
8977 if ((i % 2) == 0) {
8978 bnx2x_leds_set(bp, SPEED_1000);
8979 } else {
8980 bnx2x_leds_unset(bp);
8981 }
8982 msleep_interruptible(500);
8983 if (signal_pending(current))
8984 break;
8985 }
8986
8987 if (bp->link_up)
8988 bnx2x_leds_set(bp, bp->line_speed);
8989
8990 return 0;
8991}
8992
8993static struct ethtool_ops bnx2x_ethtool_ops = {
8994 .get_settings = bnx2x_get_settings,
8995 .set_settings = bnx2x_set_settings,
8996 .get_drvinfo = bnx2x_get_drvinfo,
8997 .get_wol = bnx2x_get_wol,
8998 .set_wol = bnx2x_set_wol,
8999 .get_msglevel = bnx2x_get_msglevel,
9000 .set_msglevel = bnx2x_set_msglevel,
9001 .nway_reset = bnx2x_nway_reset,
9002 .get_link = ethtool_op_get_link,
9003 .get_eeprom_len = bnx2x_get_eeprom_len,
9004 .get_eeprom = bnx2x_get_eeprom,
9005 .set_eeprom = bnx2x_set_eeprom,
9006 .get_coalesce = bnx2x_get_coalesce,
9007 .set_coalesce = bnx2x_set_coalesce,
9008 .get_ringparam = bnx2x_get_ringparam,
9009 .set_ringparam = bnx2x_set_ringparam,
9010 .get_pauseparam = bnx2x_get_pauseparam,
9011 .set_pauseparam = bnx2x_set_pauseparam,
9012 .get_rx_csum = bnx2x_get_rx_csum,
9013 .set_rx_csum = bnx2x_set_rx_csum,
9014 .get_tx_csum = ethtool_op_get_tx_csum,
9015 .set_tx_csum = ethtool_op_set_tx_csum,
9016 .get_sg = ethtool_op_get_sg,
9017 .set_sg = ethtool_op_set_sg,
9018 .get_tso = ethtool_op_get_tso,
9019 .set_tso = bnx2x_set_tso,
9020 .self_test_count = bnx2x_self_test_count,
9021 .self_test = bnx2x_self_test,
9022 .get_strings = bnx2x_get_strings,
9023 .phys_id = bnx2x_phys_id,
9024 .get_stats_count = bnx2x_get_stats_count,
9025 .get_ethtool_stats = bnx2x_get_ethtool_stats
9026};
9027
9028/* end of ethtool_ops */
9029
9030/****************************************************************************
9031* General service functions
9032****************************************************************************/
9033
9034static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9035{
9036 u16 pmcsr;
9037
9038 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9039
9040 switch (state) {
9041 case PCI_D0:
9042 pci_write_config_word(bp->pdev,
9043 bp->pm_cap + PCI_PM_CTRL,
9044 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9045 PCI_PM_CTRL_PME_STATUS));
9046
9047 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9048 /* delay required during transition out of D3hot */
9049 msleep(20);
9050 break;
9051
9052 case PCI_D3hot:
9053 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9054 pmcsr |= 3;
9055
9056 if (bp->wol)
9057 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9058
9059 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9060 pmcsr);
9061
9062 /* No more memory access after this point until
9063 * device is brought back to D0.
9064 */
9065 break;
9066
9067 default:
9068 return -EINVAL;
9069 }
9070 return 0;
9071}
9072
9073/*
9074 * net_device service functions
9075 */
9076
Eliezer Tamir49d66772008-02-28 11:53:13 -08009077/* called with netif_tx_lock from set_multicast */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009078static void bnx2x_set_rx_mode(struct net_device *dev)
9079{
9080 struct bnx2x *bp = netdev_priv(dev);
9081 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9082
9083 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
9084
9085 if (dev->flags & IFF_PROMISC)
9086 rx_mode = BNX2X_RX_MODE_PROMISC;
9087
9088 else if ((dev->flags & IFF_ALLMULTI) ||
9089 (dev->mc_count > BNX2X_MAX_MULTICAST))
9090 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9091
9092 else { /* some multicasts */
9093 int i, old, offset;
9094 struct dev_mc_list *mclist;
9095 struct mac_configuration_cmd *config =
9096 bnx2x_sp(bp, mcast_config);
9097
9098 for (i = 0, mclist = dev->mc_list;
9099 mclist && (i < dev->mc_count);
9100 i++, mclist = mclist->next) {
9101
9102 config->config_table[i].cam_entry.msb_mac_addr =
9103 swab16(*(u16 *)&mclist->dmi_addr[0]);
9104 config->config_table[i].cam_entry.middle_mac_addr =
9105 swab16(*(u16 *)&mclist->dmi_addr[2]);
9106 config->config_table[i].cam_entry.lsb_mac_addr =
9107 swab16(*(u16 *)&mclist->dmi_addr[4]);
9108 config->config_table[i].cam_entry.flags =
9109 cpu_to_le16(bp->port);
9110 config->config_table[i].target_table_entry.flags = 0;
9111 config->config_table[i].target_table_entry.
9112 client_id = 0;
9113 config->config_table[i].target_table_entry.
9114 vlan_id = 0;
9115
9116 DP(NETIF_MSG_IFUP,
9117 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9118 i, config->config_table[i].cam_entry.msb_mac_addr,
9119 config->config_table[i].cam_entry.middle_mac_addr,
9120 config->config_table[i].cam_entry.lsb_mac_addr);
9121 }
9122 old = config->hdr.length_6b;
9123 if (old > i) {
9124 for (; i < old; i++) {
9125 if (CAM_IS_INVALID(config->config_table[i])) {
9126 i--; /* already invalidated */
9127 break;
9128 }
9129 /* invalidate */
9130 CAM_INVALIDATE(config->config_table[i]);
9131 }
9132 }
9133
9134 if (CHIP_REV_IS_SLOW(bp))
9135 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9136 else
9137 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9138
9139 config->hdr.length_6b = i;
9140 config->hdr.offset = offset;
9141 config->hdr.reserved0 = 0;
9142 config->hdr.reserved1 = 0;
9143
9144 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9145 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9146 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9147 }
9148
9149 bp->rx_mode = rx_mode;
9150 bnx2x_set_storm_rx_mode(bp);
9151}
9152
9153static int bnx2x_poll(struct napi_struct *napi, int budget)
9154{
9155 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9156 napi);
9157 struct bnx2x *bp = fp->bp;
9158 int work_done = 0;
9159
9160#ifdef BNX2X_STOP_ON_ERROR
9161 if (unlikely(bp->panic))
9162 goto out_panic;
9163#endif
9164
9165 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9166 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9167 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9168
9169 bnx2x_update_fpsb_idx(fp);
9170
9171 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9172 bnx2x_tx_int(fp, budget);
9173
9174
9175 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9176 work_done = bnx2x_rx_int(fp, budget);
9177
9178
9179 rmb(); /* bnx2x_has_work() reads the status block */
9180
9181 /* must not complete if we consumed full budget */
9182 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9183
9184#ifdef BNX2X_STOP_ON_ERROR
9185out_panic:
9186#endif
9187 netif_rx_complete(bp->dev, napi);
9188
9189 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9190 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9191 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9192 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9193 }
9194
9195 return work_done;
9196}
9197
9198/* Called with netif_tx_lock.
9199 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9200 * netif_wake_queue().
9201 */
9202static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9203{
9204 struct bnx2x *bp = netdev_priv(dev);
9205 struct bnx2x_fastpath *fp;
9206 struct sw_tx_bd *tx_buf;
9207 struct eth_tx_bd *tx_bd;
9208 struct eth_tx_parse_bd *pbd = NULL;
9209 u16 pkt_prod, bd_prod;
9210 int nbd, fp_index = 0;
9211 dma_addr_t mapping;
9212
9213#ifdef BNX2X_STOP_ON_ERROR
9214 if (unlikely(bp->panic))
9215 return NETDEV_TX_BUSY;
9216#endif
9217
9218 fp_index = smp_processor_id() % (bp->num_queues);
9219
9220 fp = &bp->fp[fp_index];
9221 if (unlikely(bnx2x_tx_avail(bp->fp) <
9222 (skb_shinfo(skb)->nr_frags + 3))) {
9223 bp->slowpath->eth_stats.driver_xoff++,
9224 netif_stop_queue(dev);
9225 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9226 return NETDEV_TX_BUSY;
9227 }
9228
9229 /*
9230 This is a bit ugly. First we use one BD which we mark as start,
9231 then for TSO or xsum we have a parsing info BD,
9232 and only then we have the rest of the TSO bds.
9233 (don't forget to mark the last one as last,
9234 and to unmap only AFTER you write to the BD ...)
9235 I would like to thank DovH for this mess.
9236 */
9237
9238 pkt_prod = fp->tx_pkt_prod++;
9239 bd_prod = fp->tx_bd_prod;
9240 bd_prod = TX_BD(bd_prod);
9241
9242 /* get a tx_buff and first bd */
9243 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9244 tx_bd = &fp->tx_desc_ring[bd_prod];
9245
9246 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9247 tx_bd->general_data = (UNICAST_ADDRESS <<
9248 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9249 tx_bd->general_data |= 1; /* header nbd */
9250
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009251 /* remember the first bd of the packet */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009252 tx_buf->first_bd = bd_prod;
9253
9254 DP(NETIF_MSG_TX_QUEUED,
9255 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9256 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9257
9258 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9259 struct iphdr *iph = ip_hdr(skb);
9260 u8 len;
9261
9262 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9263
9264 /* turn on parsing and get a bd */
9265 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9266 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9267 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9268
9269 /* for now NS flag is not used in Linux */
9270 pbd->global_data = (len |
Eliezer Tamir96fc1782008-02-28 11:57:55 -08009271 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009272 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9273 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9274 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9275 if (iph->protocol == IPPROTO_TCP) {
9276 struct tcphdr *th = tcp_hdr(skb);
9277
9278 tx_bd->bd_flags.as_bitfield |=
9279 ETH_TX_BD_FLAGS_TCP_CSUM;
Eliezer Tamir96fc1782008-02-28 11:57:55 -08009280 pbd->tcp_flags = pbd_tcp_flags(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009281 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9282 pbd->tcp_pseudo_csum = swab16(th->check);
9283
9284 } else if (iph->protocol == IPPROTO_UDP) {
9285 struct udphdr *uh = udp_hdr(skb);
9286
9287 tx_bd->bd_flags.as_bitfield |=
9288 ETH_TX_BD_FLAGS_TCP_CSUM;
9289 pbd->total_hlen += cpu_to_le16(4);
9290 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9291 pbd->cs_offset = 5; /* 10 >> 1 */
9292 pbd->tcp_pseudo_csum = 0;
9293 /* HW bug: we need to subtract 10 bytes before the
9294 * UDP header from the csum
9295 */
9296 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9297 csum_partial(((u8 *)(uh)-10), 10, 0)));
9298 }
9299 }
9300
9301 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9302 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9303 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9304 } else {
9305 tx_bd->vlan = cpu_to_le16(pkt_prod);
9306 }
9307
9308 mapping = pci_map_single(bp->pdev, skb->data,
9309 skb->len, PCI_DMA_TODEVICE);
9310
9311 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9312 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9313 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9314 tx_bd->nbd = cpu_to_le16(nbd);
9315 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9316
9317 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9318 " nbytes %d flags %x vlan %u\n",
9319 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9320 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9321
9322 if (skb_shinfo(skb)->gso_size &&
9323 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
Eliezer Tamir96fc1782008-02-28 11:57:55 -08009324 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009325
9326 DP(NETIF_MSG_TX_QUEUED,
9327 "TSO packet len %d hlen %d total len %d tso size %d\n",
9328 skb->len, hlen, skb_headlen(skb),
9329 skb_shinfo(skb)->gso_size);
9330
9331 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9332
9333 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9334 /* we split the first bd into headers and data bds
9335 * to ease the pain of our fellow micocode engineers
9336 * we use one mapping for both bds
9337 * So far this has only been observed to happen
9338 * in Other Operating Systems(TM)
9339 */
9340
9341 /* first fix first bd */
9342 nbd++;
9343 tx_bd->nbd = cpu_to_le16(nbd);
9344 tx_bd->nbytes = cpu_to_le16(hlen);
9345
9346 /* we only print this as an error
9347 * because we don't think this will ever happen.
9348 */
9349 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9350 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9351 tx_bd->addr_lo, tx_bd->nbd);
9352
9353 /* now get a new data bd
9354 * (after the pbd) and fill it */
9355 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9356 tx_bd = &fp->tx_desc_ring[bd_prod];
9357
9358 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9359 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9360 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9361 tx_bd->vlan = cpu_to_le16(pkt_prod);
9362 /* this marks the bd
9363 * as one that has no individual mapping
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009364 * the FW ignores this flag in a bd not marked start
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009365 */
9366 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9367 DP(NETIF_MSG_TX_QUEUED,
9368 "TSO split data size is %d (%x:%x)\n",
9369 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9370 }
9371
9372 if (!pbd) {
9373 /* supposed to be unreached
9374 * (and therefore not handled properly...)
9375 */
9376 BNX2X_ERR("LSO with no PBD\n");
9377 BUG();
9378 }
9379
9380 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9381 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9382 pbd->ip_id = swab16(ip_hdr(skb)->id);
9383 pbd->tcp_pseudo_csum =
9384 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9385 ip_hdr(skb)->daddr,
9386 0, IPPROTO_TCP, 0));
9387 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9388 }
9389
9390 {
9391 int i;
9392
9393 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9394 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9395
9396 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9397 tx_bd = &fp->tx_desc_ring[bd_prod];
9398
9399 mapping = pci_map_page(bp->pdev, frag->page,
9400 frag->page_offset,
9401 frag->size, PCI_DMA_TODEVICE);
9402
9403 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9404 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9405 tx_bd->nbytes = cpu_to_le16(frag->size);
9406 tx_bd->vlan = cpu_to_le16(pkt_prod);
9407 tx_bd->bd_flags.as_bitfield = 0;
9408 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9409 " addr (%x:%x) nbytes %d flags %x\n",
9410 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9411 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9412 } /* for */
9413 }
9414
9415 /* now at last mark the bd as the last bd */
9416 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9417
9418 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9419 tx_bd, tx_bd->bd_flags.as_bitfield);
9420
9421 tx_buf->skb = skb;
9422
9423 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9424
9425 /* now send a tx doorbell, counting the next bd
9426 * if the packet contains or ends with it
9427 */
9428 if (TX_BD_POFF(bd_prod) < nbd)
9429 nbd++;
9430
9431 if (pbd)
9432 DP(NETIF_MSG_TX_QUEUED,
9433 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9434 " tcp_flags %x xsum %x seq %u hlen %u\n",
9435 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9436 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9437 pbd->tcp_send_seq, pbd->total_hlen);
9438
9439 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9440
Eliezer Tamir96fc1782008-02-28 11:57:55 -08009441 fp->hw_tx_prods->bds_prod =
9442 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009443 mb(); /* FW restriction: must not reorder writing nbd and packets */
Eliezer Tamir96fc1782008-02-28 11:57:55 -08009444 fp->hw_tx_prods->packets_prod =
9445 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009446 DOORBELL(bp, fp_index, 0);
9447
9448 mmiowb();
9449
9450 fp->tx_bd_prod = bd_prod;
9451 dev->trans_start = jiffies;
9452
9453 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9454 netif_stop_queue(dev);
9455 bp->slowpath->eth_stats.driver_xoff++;
9456 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9457 netif_wake_queue(dev);
9458 }
9459 fp->tx_pkt++;
9460
9461 return NETDEV_TX_OK;
9462}
9463
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009464/* Called with rtnl_lock */
9465static int bnx2x_open(struct net_device *dev)
9466{
9467 struct bnx2x *bp = netdev_priv(dev);
9468
9469 bnx2x_set_power_state(bp, PCI_D0);
9470
9471 return bnx2x_nic_load(bp, 1);
9472}
9473
9474/* Called with rtnl_lock */
9475static int bnx2x_close(struct net_device *dev)
9476{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009477 struct bnx2x *bp = netdev_priv(dev);
9478
9479 /* Unload the driver, release IRQs */
Eliezer Tamir228241e2008-02-28 11:56:57 -08009480 bnx2x_nic_unload(bp, 1);
9481
9482 if (!CHIP_REV_IS_SLOW(bp))
9483 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009484
9485 return 0;
9486}
9487
9488/* Called with rtnl_lock */
9489static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9490{
9491 struct sockaddr *addr = p;
9492 struct bnx2x *bp = netdev_priv(dev);
9493
9494 if (!is_valid_ether_addr(addr->sa_data))
9495 return -EINVAL;
9496
9497 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9498 if (netif_running(dev))
9499 bnx2x_set_mac_addr(bp);
9500
9501 return 0;
9502}
9503
9504/* Called with rtnl_lock */
9505static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9506{
9507 struct mii_ioctl_data *data = if_mii(ifr);
9508 struct bnx2x *bp = netdev_priv(dev);
9509 int err;
9510
9511 switch (cmd) {
9512 case SIOCGMIIPHY:
9513 data->phy_id = bp->phy_addr;
9514
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009515 /* fallthrough */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009516 case SIOCGMIIREG: {
9517 u32 mii_regval;
9518
9519 spin_lock_bh(&bp->phy_lock);
9520 if (bp->state == BNX2X_STATE_OPEN) {
9521 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9522 &mii_regval);
9523
9524 data->val_out = mii_regval;
9525 } else {
9526 err = -EAGAIN;
9527 }
9528 spin_unlock_bh(&bp->phy_lock);
9529 return err;
9530 }
9531
9532 case SIOCSMIIREG:
9533 if (!capable(CAP_NET_ADMIN))
9534 return -EPERM;
9535
9536 spin_lock_bh(&bp->phy_lock);
9537 if (bp->state == BNX2X_STATE_OPEN) {
9538 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9539 data->val_in);
9540 } else {
9541 err = -EAGAIN;
9542 }
9543 spin_unlock_bh(&bp->phy_lock);
9544 return err;
9545
9546 default:
9547 /* do nothing */
9548 break;
9549 }
9550
9551 return -EOPNOTSUPP;
9552}
9553
9554/* Called with rtnl_lock */
9555static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9556{
9557 struct bnx2x *bp = netdev_priv(dev);
9558
9559 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9560 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9561 return -EINVAL;
9562
9563 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009564 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009565 * only updated as part of load
9566 */
9567 dev->mtu = new_mtu;
9568
9569 if (netif_running(dev)) {
9570 bnx2x_nic_unload(bp, 0);
9571 bnx2x_nic_load(bp, 0);
9572 }
9573 return 0;
9574}
9575
9576static void bnx2x_tx_timeout(struct net_device *dev)
9577{
9578 struct bnx2x *bp = netdev_priv(dev);
9579
9580#ifdef BNX2X_STOP_ON_ERROR
9581 if (!bp->panic)
9582 bnx2x_panic();
9583#endif
9584 /* This allows the netif to be shutdown gracefully before resetting */
9585 schedule_work(&bp->reset_task);
9586}
9587
9588#ifdef BCM_VLAN
9589/* Called with rtnl_lock */
9590static void bnx2x_vlan_rx_register(struct net_device *dev,
9591 struct vlan_group *vlgrp)
9592{
9593 struct bnx2x *bp = netdev_priv(dev);
9594
9595 bp->vlgrp = vlgrp;
9596 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -08009597 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009598}
9599#endif
9600
9601#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9602static void poll_bnx2x(struct net_device *dev)
9603{
9604 struct bnx2x *bp = netdev_priv(dev);
9605
9606 disable_irq(bp->pdev->irq);
9607 bnx2x_interrupt(bp->pdev->irq, dev);
9608 enable_irq(bp->pdev->irq);
9609}
9610#endif
9611
9612static void bnx2x_reset_task(struct work_struct *work)
9613{
9614 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9615
9616#ifdef BNX2X_STOP_ON_ERROR
9617 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9618 " so reset not done to allow debug dump,\n"
9619 KERN_ERR " you will need to reboot when done\n");
9620 return;
9621#endif
9622
9623 if (!netif_running(bp->dev))
9624 return;
9625
Eliezer Tamir228241e2008-02-28 11:56:57 -08009626 rtnl_lock();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009627
Eliezer Tamir228241e2008-02-28 11:56:57 -08009628 if (bp->state != BNX2X_STATE_OPEN) {
9629 DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
9630 goto reset_task_exit;
9631 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009632
9633 bnx2x_nic_unload(bp, 0);
9634 bnx2x_nic_load(bp, 0);
9635
Eliezer Tamir228241e2008-02-28 11:56:57 -08009636reset_task_exit:
9637 rtnl_unlock();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009638}
9639
9640static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9641 struct net_device *dev)
9642{
9643 struct bnx2x *bp;
9644 int rc;
9645
9646 SET_NETDEV_DEV(dev, &pdev->dev);
9647 bp = netdev_priv(dev);
9648
9649 bp->flags = 0;
9650 bp->port = PCI_FUNC(pdev->devfn);
9651
9652 rc = pci_enable_device(pdev);
9653 if (rc) {
9654 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9655 goto err_out;
9656 }
9657
9658 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9659 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9660 " aborting\n");
9661 rc = -ENODEV;
9662 goto err_out_disable;
9663 }
9664
9665 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9666 printk(KERN_ERR PFX "Cannot find second PCI device"
9667 " base address, aborting\n");
9668 rc = -ENODEV;
9669 goto err_out_disable;
9670 }
9671
9672 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9673 if (rc) {
9674 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9675 " aborting\n");
9676 goto err_out_disable;
9677 }
9678
9679 pci_set_master(pdev);
9680
9681 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9682 if (bp->pm_cap == 0) {
9683 printk(KERN_ERR PFX "Cannot find power management"
9684 " capability, aborting\n");
9685 rc = -EIO;
9686 goto err_out_release;
9687 }
9688
9689 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9690 if (bp->pcie_cap == 0) {
9691 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9692 " aborting\n");
9693 rc = -EIO;
9694 goto err_out_release;
9695 }
9696
9697 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9698 bp->flags |= USING_DAC_FLAG;
9699 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9700 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9701 " failed, aborting\n");
9702 rc = -EIO;
9703 goto err_out_release;
9704 }
9705
9706 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9707 printk(KERN_ERR PFX "System does not support DMA,"
9708 " aborting\n");
9709 rc = -EIO;
9710 goto err_out_release;
9711 }
9712
9713 bp->dev = dev;
9714 bp->pdev = pdev;
9715
9716 spin_lock_init(&bp->phy_lock);
9717
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009718 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9719 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9720
Jeff Garzikcba05162007-11-23 21:50:34 -05009721 dev->base_addr = pci_resource_start(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009722
9723 dev->irq = pdev->irq;
9724
9725 bp->regview = ioremap_nocache(dev->base_addr,
9726 pci_resource_len(pdev, 0));
9727 if (!bp->regview) {
9728 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9729 rc = -ENOMEM;
9730 goto err_out_release;
9731 }
9732
9733 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9734 pci_resource_len(pdev, 2));
9735 if (!bp->doorbells) {
9736 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9737 rc = -ENOMEM;
9738 goto err_out_unmap;
9739 }
9740
9741 bnx2x_set_power_state(bp, PCI_D0);
9742
9743 bnx2x_get_hwinfo(bp);
9744
9745 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009746 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009747 " will only init first device\n");
9748 onefunc = 1;
9749 nomcp = 1;
9750 }
9751
9752 if (nomcp) {
9753 printk(KERN_ERR PFX "MCP disabled, will only"
9754 " init first device\n");
9755 onefunc = 1;
9756 }
9757
9758 if (onefunc && bp->port) {
9759 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9760 rc = -ENODEV;
9761 goto err_out_unmap;
9762 }
9763
9764 bp->tx_ring_size = MAX_TX_AVAIL;
9765 bp->rx_ring_size = MAX_RX_AVAIL;
9766
9767 bp->rx_csum = 1;
9768
9769 bp->rx_offset = 0;
9770
9771 bp->tx_quick_cons_trip_int = 0xff;
9772 bp->tx_quick_cons_trip = 0xff;
9773 bp->tx_ticks_int = 50;
9774 bp->tx_ticks = 50;
9775
9776 bp->rx_quick_cons_trip_int = 0xff;
9777 bp->rx_quick_cons_trip = 0xff;
9778 bp->rx_ticks_int = 25;
9779 bp->rx_ticks = 25;
9780
9781 bp->stats_ticks = 1000000 & 0xffff00;
9782
9783 bp->timer_interval = HZ;
9784 bp->current_interval = (poll ? poll : HZ);
9785
9786 init_timer(&bp->timer);
9787 bp->timer.expires = jiffies + bp->current_interval;
9788 bp->timer.data = (unsigned long) bp;
9789 bp->timer.function = bnx2x_timer;
9790
9791 return 0;
9792
9793err_out_unmap:
9794 if (bp->regview) {
9795 iounmap(bp->regview);
9796 bp->regview = NULL;
9797 }
9798
9799 if (bp->doorbells) {
9800 iounmap(bp->doorbells);
9801 bp->doorbells = NULL;
9802 }
9803
9804err_out_release:
9805 pci_release_regions(pdev);
9806
9807err_out_disable:
9808 pci_disable_device(pdev);
9809 pci_set_drvdata(pdev, NULL);
9810
9811err_out:
9812 return rc;
9813}
9814
Eliezer Tamir25047952008-02-28 11:50:16 -08009815static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9816{
9817 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9818
9819 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9820 return val;
9821}
9822
9823/* return value of 1=2.5GHz 2=5GHz */
9824static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9825{
9826 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9827
9828 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9829 return val;
9830}
9831
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009832static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9833 const struct pci_device_id *ent)
9834{
9835 static int version_printed;
9836 struct net_device *dev = NULL;
9837 struct bnx2x *bp;
Eliezer Tamir25047952008-02-28 11:50:16 -08009838 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009839 int port = PCI_FUNC(pdev->devfn);
Eliezer Tamir25047952008-02-28 11:50:16 -08009840 DECLARE_MAC_BUF(mac);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009841
9842 if (version_printed++ == 0)
9843 printk(KERN_INFO "%s", version);
9844
9845 /* dev zeroed in init_etherdev */
9846 dev = alloc_etherdev(sizeof(*bp));
9847 if (!dev)
9848 return -ENOMEM;
9849
9850 netif_carrier_off(dev);
9851
9852 bp = netdev_priv(dev);
9853 bp->msglevel = debug;
9854
9855 if (port && onefunc) {
9856 printk(KERN_ERR PFX "second function disabled. exiting\n");
Eliezer Tamir25047952008-02-28 11:50:16 -08009857 free_netdev(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009858 return 0;
9859 }
9860
9861 rc = bnx2x_init_board(pdev, dev);
9862 if (rc < 0) {
9863 free_netdev(dev);
9864 return rc;
9865 }
9866
9867 dev->hard_start_xmit = bnx2x_start_xmit;
9868 dev->watchdog_timeo = TX_TIMEOUT;
9869
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009870 dev->ethtool_ops = &bnx2x_ethtool_ops;
9871 dev->open = bnx2x_open;
9872 dev->stop = bnx2x_close;
9873 dev->set_multicast_list = bnx2x_set_rx_mode;
9874 dev->set_mac_address = bnx2x_change_mac_addr;
9875 dev->do_ioctl = bnx2x_ioctl;
9876 dev->change_mtu = bnx2x_change_mtu;
9877 dev->tx_timeout = bnx2x_tx_timeout;
9878#ifdef BCM_VLAN
9879 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9880#endif
9881#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9882 dev->poll_controller = poll_bnx2x;
9883#endif
9884 dev->features |= NETIF_F_SG;
9885 if (bp->flags & USING_DAC_FLAG)
9886 dev->features |= NETIF_F_HIGHDMA;
9887 dev->features |= NETIF_F_IP_CSUM;
9888#ifdef BCM_VLAN
9889 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9890#endif
9891 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9892
9893 rc = register_netdev(dev);
9894 if (rc) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009895 dev_err(&pdev->dev, "Cannot register net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009896 if (bp->regview)
9897 iounmap(bp->regview);
9898 if (bp->doorbells)
9899 iounmap(bp->doorbells);
9900 pci_release_regions(pdev);
9901 pci_disable_device(pdev);
9902 pci_set_drvdata(pdev, NULL);
9903 free_netdev(dev);
9904 return rc;
9905 }
9906
9907 pci_set_drvdata(pdev, dev);
9908
9909 bp->name = board_info[ent->driver_data].name;
Eliezer Tamir25047952008-02-28 11:50:16 -08009910 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9911 " IRQ %d, ", dev->name, bp->name,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009912 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9913 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Eliezer Tamir25047952008-02-28 11:50:16 -08009914 bnx2x_get_pcie_width(bp),
9915 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9916 dev->base_addr, bp->pdev->irq);
9917 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009918 return 0;
9919}
9920
9921static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9922{
9923 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009924 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009925
Eliezer Tamir228241e2008-02-28 11:56:57 -08009926 if (!dev) {
9927 /* we get here if init_one() fails */
9928 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9929 return;
9930 }
9931
9932 bp = netdev_priv(dev);
9933
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009934 unregister_netdev(dev);
9935
9936 if (bp->regview)
9937 iounmap(bp->regview);
9938
9939 if (bp->doorbells)
9940 iounmap(bp->doorbells);
9941
9942 free_netdev(dev);
9943 pci_release_regions(pdev);
9944 pci_disable_device(pdev);
9945 pci_set_drvdata(pdev, NULL);
9946}
9947
9948static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9949{
9950 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009951 struct bnx2x *bp;
9952
9953 if (!dev)
9954 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009955
9956 if (!netif_running(dev))
9957 return 0;
9958
Eliezer Tamir228241e2008-02-28 11:56:57 -08009959 bp = netdev_priv(dev);
9960
9961 bnx2x_nic_unload(bp, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009962
9963 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009964
Eliezer Tamir228241e2008-02-28 11:56:57 -08009965 pci_save_state(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009966 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -08009967
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009968 return 0;
9969}
9970
9971static int bnx2x_resume(struct pci_dev *pdev)
9972{
9973 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009974 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009975 int rc;
9976
Eliezer Tamir228241e2008-02-28 11:56:57 -08009977 if (!dev) {
9978 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9979 return -ENODEV;
9980 }
9981
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009982 if (!netif_running(dev))
9983 return 0;
9984
Eliezer Tamir228241e2008-02-28 11:56:57 -08009985 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009986
Eliezer Tamir228241e2008-02-28 11:56:57 -08009987 pci_restore_state(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009988 bnx2x_set_power_state(bp, PCI_D0);
9989 netif_device_attach(dev);
9990
9991 rc = bnx2x_nic_load(bp, 0);
9992 if (rc)
9993 return rc;
9994
9995 return 0;
9996}
9997
9998static struct pci_driver bnx2x_pci_driver = {
9999 .name = DRV_MODULE_NAME,
10000 .id_table = bnx2x_pci_tbl,
10001 .probe = bnx2x_init_one,
10002 .remove = __devexit_p(bnx2x_remove_one),
10003 .suspend = bnx2x_suspend,
10004 .resume = bnx2x_resume,
10005};
10006
10007static int __init bnx2x_init(void)
10008{
10009 return pci_register_driver(&bnx2x_pci_driver);
10010}
10011
10012static void __exit bnx2x_cleanup(void)
10013{
10014 pci_unregister_driver(&bnx2x_pci_driver);
10015}
10016
10017module_init(bnx2x_init);
10018module_exit(bnx2x_cleanup);
10019