blob: 8e96154be031a113896bb4ba80521f8e01409f6e [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#define BCM_TSO 1
Michael Chanf2a4f052006-03-23 01:13:12 -080046#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080049#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070050#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080051
Michael Chanb6016b72005-05-26 13:03:09 -070052#include "bnx2.h"
53#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080054#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070055
56#define DRV_MODULE_NAME "bnx2"
57#define PFX DRV_MODULE_NAME ": "
Michael Chanb659f442007-02-02 00:46:35 -080058#define DRV_MODULE_VERSION "1.5.5"
59#define DRV_MODULE_RELDATE "February 1, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070060
61#define RUN_AT(x) (jiffies + (x))
62
63/* Time in jiffies before concluding the transmitter is hung. */
64#define TX_TIMEOUT (5*HZ)
65
Randy Dunlape19360f2006-04-10 23:22:06 -070066static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070067 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68
69MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080070MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070071MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_MODULE_VERSION);
73
74static int disable_msi = 0;
75
76module_param(disable_msi, int, 0);
77MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
78
79typedef enum {
80 BCM5706 = 0,
81 NC370T,
82 NC370I,
83 BCM5706S,
84 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080085 BCM5708,
86 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080087 BCM5709,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chanb6016b72005-05-26 13:03:09 -0700102 };
103
104static struct pci_device_id bnx2_pci_tbl[] = {
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chanb6016b72005-05-26 13:03:09 -0700121 { 0, }
122};
123
124static struct flash_spec flash_table[] =
125{
126 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800127 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700128 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
129 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
130 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800131 /* Expansion entry 0001 */
132 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700133 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800134 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
135 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700136 /* Saifun SA25F010 (non-buffered flash) */
137 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800138 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700139 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
140 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
141 "Non-buffered flash (128kB)"},
142 /* Saifun SA25F020 (non-buffered flash) */
143 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800144 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700145 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
146 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
147 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800148 /* Expansion entry 0100 */
149 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
150 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
152 "Entry 0100"},
153 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400154 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800155 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
156 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
157 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
158 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
159 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
160 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
161 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
162 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
163 /* Saifun SA25F005 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
166 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
168 "Non-buffered flash (64kB)"},
169 /* Fast EEPROM */
170 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
171 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
172 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
173 "EEPROM - fast"},
174 /* Expansion entry 1001 */
175 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
176 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 "Entry 1001"},
179 /* Expansion entry 1010 */
180 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
181 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183 "Entry 1010"},
184 /* ATMEL AT45DB011B (buffered flash) */
185 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
186 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
187 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
188 "Buffered flash (128kB)"},
189 /* Expansion entry 1100 */
190 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
191 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
193 "Entry 1100"},
194 /* Expansion entry 1101 */
195 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
196 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
198 "Entry 1101"},
199 /* Ateml Expansion entry 1110 */
200 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
201 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
202 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
203 "Entry 1110 (Atmel)"},
204 /* ATMEL AT45DB021B (buffered flash) */
205 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
206 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
207 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
208 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700209};
210
211MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
212
Michael Chane89bbf12005-08-25 15:36:58 -0700213static inline u32 bnx2_tx_avail(struct bnx2 *bp)
214{
Michael Chan2f8af122006-08-15 01:39:10 -0700215 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700216
Michael Chan2f8af122006-08-15 01:39:10 -0700217 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800218
219 /* The ring uses 256 indices for 255 entries, one of them
220 * needs to be skipped.
221 */
222 diff = bp->tx_prod - bp->tx_cons;
223 if (unlikely(diff >= TX_DESC_CNT)) {
224 diff &= 0xffff;
225 if (diff == TX_DESC_CNT)
226 diff = MAX_TX_DESC_CNT;
227 }
Michael Chane89bbf12005-08-25 15:36:58 -0700228 return (bp->tx_ring_size - diff);
229}
230
Michael Chanb6016b72005-05-26 13:03:09 -0700231static u32
232bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
233{
234 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
235 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
236}
237
238static void
239bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
240{
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
242 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
243}
244
245static void
246bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
247{
248 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800249 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
250 int i;
251
252 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
253 REG_WR(bp, BNX2_CTX_CTX_CTRL,
254 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
255 for (i = 0; i < 5; i++) {
256 u32 val;
257 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
258 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
259 break;
260 udelay(5);
261 }
262 } else {
263 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
264 REG_WR(bp, BNX2_CTX_DATA, val);
265 }
Michael Chanb6016b72005-05-26 13:03:09 -0700266}
267
268static int
269bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
270{
271 u32 val1;
272 int i, ret;
273
274 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
275 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
276 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
277
278 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
279 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
280
281 udelay(40);
282 }
283
284 val1 = (bp->phy_addr << 21) | (reg << 16) |
285 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
286 BNX2_EMAC_MDIO_COMM_START_BUSY;
287 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
288
289 for (i = 0; i < 50; i++) {
290 udelay(10);
291
292 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
293 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
294 udelay(5);
295
296 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
297 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
298
299 break;
300 }
301 }
302
303 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
304 *val = 0x0;
305 ret = -EBUSY;
306 }
307 else {
308 *val = val1;
309 ret = 0;
310 }
311
312 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
313 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
314 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315
316 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
317 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
318
319 udelay(40);
320 }
321
322 return ret;
323}
324
325static int
326bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
327{
328 u32 val1;
329 int i, ret;
330
331 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
332 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
333 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
334
335 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
336 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337
338 udelay(40);
339 }
340
341 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
342 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
343 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
344 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400345
Michael Chanb6016b72005-05-26 13:03:09 -0700346 for (i = 0; i < 50; i++) {
347 udelay(10);
348
349 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
350 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
351 udelay(5);
352 break;
353 }
354 }
355
356 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
357 ret = -EBUSY;
358 else
359 ret = 0;
360
361 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
362 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
364
365 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
366 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
367
368 udelay(40);
369 }
370
371 return ret;
372}
373
374static void
375bnx2_disable_int(struct bnx2 *bp)
376{
377 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
378 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
379 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
380}
381
382static void
383bnx2_enable_int(struct bnx2 *bp)
384{
Michael Chanb6016b72005-05-26 13:03:09 -0700385 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800386 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
387 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
388
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700390 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
391
Michael Chanbf5295b2006-03-23 01:11:56 -0800392 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700393}
394
395static void
396bnx2_disable_int_sync(struct bnx2 *bp)
397{
398 atomic_inc(&bp->intr_sem);
399 bnx2_disable_int(bp);
400 synchronize_irq(bp->pdev->irq);
401}
402
403static void
404bnx2_netif_stop(struct bnx2 *bp)
405{
406 bnx2_disable_int_sync(bp);
407 if (netif_running(bp->dev)) {
408 netif_poll_disable(bp->dev);
409 netif_tx_disable(bp->dev);
410 bp->dev->trans_start = jiffies; /* prevent tx timeout */
411 }
412}
413
414static void
415bnx2_netif_start(struct bnx2 *bp)
416{
417 if (atomic_dec_and_test(&bp->intr_sem)) {
418 if (netif_running(bp->dev)) {
419 netif_wake_queue(bp->dev);
420 netif_poll_enable(bp->dev);
421 bnx2_enable_int(bp);
422 }
423 }
424}
425
426static void
427bnx2_free_mem(struct bnx2 *bp)
428{
Michael Chan13daffa2006-03-20 17:49:20 -0800429 int i;
430
Michael Chan59b47d82006-11-19 14:10:45 -0800431 for (i = 0; i < bp->ctx_pages; i++) {
432 if (bp->ctx_blk[i]) {
433 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
434 bp->ctx_blk[i],
435 bp->ctx_blk_mapping[i]);
436 bp->ctx_blk[i] = NULL;
437 }
438 }
Michael Chanb6016b72005-05-26 13:03:09 -0700439 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800440 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700441 bp->status_blk, bp->status_blk_mapping);
442 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800443 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700444 }
445 if (bp->tx_desc_ring) {
446 pci_free_consistent(bp->pdev,
447 sizeof(struct tx_bd) * TX_DESC_CNT,
448 bp->tx_desc_ring, bp->tx_desc_mapping);
449 bp->tx_desc_ring = NULL;
450 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400451 kfree(bp->tx_buf_ring);
452 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800453 for (i = 0; i < bp->rx_max_ring; i++) {
454 if (bp->rx_desc_ring[i])
455 pci_free_consistent(bp->pdev,
456 sizeof(struct rx_bd) * RX_DESC_CNT,
457 bp->rx_desc_ring[i],
458 bp->rx_desc_mapping[i]);
459 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700460 }
Michael Chan13daffa2006-03-20 17:49:20 -0800461 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400462 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700463}
464
465static int
466bnx2_alloc_mem(struct bnx2 *bp)
467{
Michael Chan0f31f992006-03-23 01:12:38 -0800468 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800469
Michael Chan0f31f992006-03-23 01:12:38 -0800470 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
471 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700472 if (bp->tx_buf_ring == NULL)
473 return -ENOMEM;
474
Michael Chanb6016b72005-05-26 13:03:09 -0700475 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
476 sizeof(struct tx_bd) *
477 TX_DESC_CNT,
478 &bp->tx_desc_mapping);
479 if (bp->tx_desc_ring == NULL)
480 goto alloc_mem_err;
481
Michael Chan13daffa2006-03-20 17:49:20 -0800482 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
483 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->rx_buf_ring == NULL)
485 goto alloc_mem_err;
486
Michael Chan13daffa2006-03-20 17:49:20 -0800487 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
488 bp->rx_max_ring);
489
490 for (i = 0; i < bp->rx_max_ring; i++) {
491 bp->rx_desc_ring[i] =
492 pci_alloc_consistent(bp->pdev,
493 sizeof(struct rx_bd) * RX_DESC_CNT,
494 &bp->rx_desc_mapping[i]);
495 if (bp->rx_desc_ring[i] == NULL)
496 goto alloc_mem_err;
497
498 }
Michael Chanb6016b72005-05-26 13:03:09 -0700499
Michael Chan0f31f992006-03-23 01:12:38 -0800500 /* Combine status and statistics blocks into one allocation. */
501 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
502 bp->status_stats_size = status_blk_size +
503 sizeof(struct statistics_block);
504
505 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700506 &bp->status_blk_mapping);
507 if (bp->status_blk == NULL)
508 goto alloc_mem_err;
509
Michael Chan0f31f992006-03-23 01:12:38 -0800510 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
513 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700514
Michael Chan0f31f992006-03-23 01:12:38 -0800515 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700516
Michael Chan59b47d82006-11-19 14:10:45 -0800517 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
518 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
519 if (bp->ctx_pages == 0)
520 bp->ctx_pages = 1;
521 for (i = 0; i < bp->ctx_pages; i++) {
522 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
523 BCM_PAGE_SIZE,
524 &bp->ctx_blk_mapping[i]);
525 if (bp->ctx_blk[i] == NULL)
526 goto alloc_mem_err;
527 }
528 }
Michael Chanb6016b72005-05-26 13:03:09 -0700529 return 0;
530
531alloc_mem_err:
532 bnx2_free_mem(bp);
533 return -ENOMEM;
534}
535
536static void
Michael Chane3648b32005-11-04 08:51:21 -0800537bnx2_report_fw_link(struct bnx2 *bp)
538{
539 u32 fw_link_status = 0;
540
541 if (bp->link_up) {
542 u32 bmsr;
543
544 switch (bp->line_speed) {
545 case SPEED_10:
546 if (bp->duplex == DUPLEX_HALF)
547 fw_link_status = BNX2_LINK_STATUS_10HALF;
548 else
549 fw_link_status = BNX2_LINK_STATUS_10FULL;
550 break;
551 case SPEED_100:
552 if (bp->duplex == DUPLEX_HALF)
553 fw_link_status = BNX2_LINK_STATUS_100HALF;
554 else
555 fw_link_status = BNX2_LINK_STATUS_100FULL;
556 break;
557 case SPEED_1000:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_1000HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_1000FULL;
562 break;
563 case SPEED_2500:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_2500HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_2500FULL;
568 break;
569 }
570
571 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
572
573 if (bp->autoneg) {
574 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
575
576 bnx2_read_phy(bp, MII_BMSR, &bmsr);
577 bnx2_read_phy(bp, MII_BMSR, &bmsr);
578
579 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
580 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
581 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
582 else
583 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
584 }
585 }
586 else
587 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
588
589 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
590}
591
592static void
Michael Chanb6016b72005-05-26 13:03:09 -0700593bnx2_report_link(struct bnx2 *bp)
594{
595 if (bp->link_up) {
596 netif_carrier_on(bp->dev);
597 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
598
599 printk("%d Mbps ", bp->line_speed);
600
601 if (bp->duplex == DUPLEX_FULL)
602 printk("full duplex");
603 else
604 printk("half duplex");
605
606 if (bp->flow_ctrl) {
607 if (bp->flow_ctrl & FLOW_CTRL_RX) {
608 printk(", receive ");
609 if (bp->flow_ctrl & FLOW_CTRL_TX)
610 printk("& transmit ");
611 }
612 else {
613 printk(", transmit ");
614 }
615 printk("flow control ON");
616 }
617 printk("\n");
618 }
619 else {
620 netif_carrier_off(bp->dev);
621 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
622 }
Michael Chane3648b32005-11-04 08:51:21 -0800623
624 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700625}
626
627static void
628bnx2_resolve_flow_ctrl(struct bnx2 *bp)
629{
630 u32 local_adv, remote_adv;
631
632 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400633 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700634 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
635
636 if (bp->duplex == DUPLEX_FULL) {
637 bp->flow_ctrl = bp->req_flow_ctrl;
638 }
639 return;
640 }
641
642 if (bp->duplex != DUPLEX_FULL) {
643 return;
644 }
645
Michael Chan5b0c76a2005-11-04 08:45:49 -0800646 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
647 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
648 u32 val;
649
650 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
651 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
652 bp->flow_ctrl |= FLOW_CTRL_TX;
653 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_RX;
655 return;
656 }
657
Michael Chanb6016b72005-05-26 13:03:09 -0700658 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
659 bnx2_read_phy(bp, MII_LPA, &remote_adv);
660
661 if (bp->phy_flags & PHY_SERDES_FLAG) {
662 u32 new_local_adv = 0;
663 u32 new_remote_adv = 0;
664
665 if (local_adv & ADVERTISE_1000XPAUSE)
666 new_local_adv |= ADVERTISE_PAUSE_CAP;
667 if (local_adv & ADVERTISE_1000XPSE_ASYM)
668 new_local_adv |= ADVERTISE_PAUSE_ASYM;
669 if (remote_adv & ADVERTISE_1000XPAUSE)
670 new_remote_adv |= ADVERTISE_PAUSE_CAP;
671 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
672 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
673
674 local_adv = new_local_adv;
675 remote_adv = new_remote_adv;
676 }
677
678 /* See Table 28B-3 of 802.3ab-1999 spec. */
679 if (local_adv & ADVERTISE_PAUSE_CAP) {
680 if(local_adv & ADVERTISE_PAUSE_ASYM) {
681 if (remote_adv & ADVERTISE_PAUSE_CAP) {
682 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
683 }
684 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
685 bp->flow_ctrl = FLOW_CTRL_RX;
686 }
687 }
688 else {
689 if (remote_adv & ADVERTISE_PAUSE_CAP) {
690 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
691 }
692 }
693 }
694 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
695 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
696 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
697
698 bp->flow_ctrl = FLOW_CTRL_TX;
699 }
700 }
701}
702
703static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800704bnx2_5708s_linkup(struct bnx2 *bp)
705{
706 u32 val;
707
708 bp->link_up = 1;
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
711 case BCM5708S_1000X_STAT1_SPEED_10:
712 bp->line_speed = SPEED_10;
713 break;
714 case BCM5708S_1000X_STAT1_SPEED_100:
715 bp->line_speed = SPEED_100;
716 break;
717 case BCM5708S_1000X_STAT1_SPEED_1G:
718 bp->line_speed = SPEED_1000;
719 break;
720 case BCM5708S_1000X_STAT1_SPEED_2G5:
721 bp->line_speed = SPEED_2500;
722 break;
723 }
724 if (val & BCM5708S_1000X_STAT1_FD)
725 bp->duplex = DUPLEX_FULL;
726 else
727 bp->duplex = DUPLEX_HALF;
728
729 return 0;
730}
731
732static int
733bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700734{
735 u32 bmcr, local_adv, remote_adv, common;
736
737 bp->link_up = 1;
738 bp->line_speed = SPEED_1000;
739
740 bnx2_read_phy(bp, MII_BMCR, &bmcr);
741 if (bmcr & BMCR_FULLDPLX) {
742 bp->duplex = DUPLEX_FULL;
743 }
744 else {
745 bp->duplex = DUPLEX_HALF;
746 }
747
748 if (!(bmcr & BMCR_ANENABLE)) {
749 return 0;
750 }
751
752 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
753 bnx2_read_phy(bp, MII_LPA, &remote_adv);
754
755 common = local_adv & remote_adv;
756 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
757
758 if (common & ADVERTISE_1000XFULL) {
759 bp->duplex = DUPLEX_FULL;
760 }
761 else {
762 bp->duplex = DUPLEX_HALF;
763 }
764 }
765
766 return 0;
767}
768
769static int
770bnx2_copper_linkup(struct bnx2 *bp)
771{
772 u32 bmcr;
773
774 bnx2_read_phy(bp, MII_BMCR, &bmcr);
775 if (bmcr & BMCR_ANENABLE) {
776 u32 local_adv, remote_adv, common;
777
778 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
779 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
780
781 common = local_adv & (remote_adv >> 2);
782 if (common & ADVERTISE_1000FULL) {
783 bp->line_speed = SPEED_1000;
784 bp->duplex = DUPLEX_FULL;
785 }
786 else if (common & ADVERTISE_1000HALF) {
787 bp->line_speed = SPEED_1000;
788 bp->duplex = DUPLEX_HALF;
789 }
790 else {
791 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
792 bnx2_read_phy(bp, MII_LPA, &remote_adv);
793
794 common = local_adv & remote_adv;
795 if (common & ADVERTISE_100FULL) {
796 bp->line_speed = SPEED_100;
797 bp->duplex = DUPLEX_FULL;
798 }
799 else if (common & ADVERTISE_100HALF) {
800 bp->line_speed = SPEED_100;
801 bp->duplex = DUPLEX_HALF;
802 }
803 else if (common & ADVERTISE_10FULL) {
804 bp->line_speed = SPEED_10;
805 bp->duplex = DUPLEX_FULL;
806 }
807 else if (common & ADVERTISE_10HALF) {
808 bp->line_speed = SPEED_10;
809 bp->duplex = DUPLEX_HALF;
810 }
811 else {
812 bp->line_speed = 0;
813 bp->link_up = 0;
814 }
815 }
816 }
817 else {
818 if (bmcr & BMCR_SPEED100) {
819 bp->line_speed = SPEED_100;
820 }
821 else {
822 bp->line_speed = SPEED_10;
823 }
824 if (bmcr & BMCR_FULLDPLX) {
825 bp->duplex = DUPLEX_FULL;
826 }
827 else {
828 bp->duplex = DUPLEX_HALF;
829 }
830 }
831
832 return 0;
833}
834
835static int
836bnx2_set_mac_link(struct bnx2 *bp)
837{
838 u32 val;
839
840 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
841 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
842 (bp->duplex == DUPLEX_HALF)) {
843 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
844 }
845
846 /* Configure the EMAC mode register. */
847 val = REG_RD(bp, BNX2_EMAC_MODE);
848
849 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800850 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800851 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700852
853 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800854 switch (bp->line_speed) {
855 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800856 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
857 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800858 break;
859 }
860 /* fall through */
861 case SPEED_100:
862 val |= BNX2_EMAC_MODE_PORT_MII;
863 break;
864 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800865 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800866 /* fall through */
867 case SPEED_1000:
868 val |= BNX2_EMAC_MODE_PORT_GMII;
869 break;
870 }
Michael Chanb6016b72005-05-26 13:03:09 -0700871 }
872 else {
873 val |= BNX2_EMAC_MODE_PORT_GMII;
874 }
875
876 /* Set the MAC to operate in the appropriate duplex mode. */
877 if (bp->duplex == DUPLEX_HALF)
878 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
879 REG_WR(bp, BNX2_EMAC_MODE, val);
880
881 /* Enable/disable rx PAUSE. */
882 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
883
884 if (bp->flow_ctrl & FLOW_CTRL_RX)
885 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
886 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
887
888 /* Enable/disable tx PAUSE. */
889 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
890 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
891
892 if (bp->flow_ctrl & FLOW_CTRL_TX)
893 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
894 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
895
896 /* Acknowledge the interrupt. */
897 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
898
899 return 0;
900}
901
902static int
903bnx2_set_link(struct bnx2 *bp)
904{
905 u32 bmsr;
906 u8 link_up;
907
Michael Chan80be4432006-11-19 14:07:28 -0800908 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700909 bp->link_up = 1;
910 return 0;
911 }
912
913 link_up = bp->link_up;
914
915 bnx2_read_phy(bp, MII_BMSR, &bmsr);
916 bnx2_read_phy(bp, MII_BMSR, &bmsr);
917
918 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
919 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
920 u32 val;
921
922 val = REG_RD(bp, BNX2_EMAC_STATUS);
923 if (val & BNX2_EMAC_STATUS_LINK)
924 bmsr |= BMSR_LSTATUS;
925 else
926 bmsr &= ~BMSR_LSTATUS;
927 }
928
929 if (bmsr & BMSR_LSTATUS) {
930 bp->link_up = 1;
931
932 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
934 bnx2_5706s_linkup(bp);
935 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
936 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700937 }
938 else {
939 bnx2_copper_linkup(bp);
940 }
941 bnx2_resolve_flow_ctrl(bp);
942 }
943 else {
944 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
945 (bp->autoneg & AUTONEG_SPEED)) {
946
947 u32 bmcr;
948
949 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800950 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700951 if (!(bmcr & BMCR_ANENABLE)) {
952 bnx2_write_phy(bp, MII_BMCR, bmcr |
953 BMCR_ANENABLE);
954 }
955 }
956 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
957 bp->link_up = 0;
958 }
959
960 if (bp->link_up != link_up) {
961 bnx2_report_link(bp);
962 }
963
964 bnx2_set_mac_link(bp);
965
966 return 0;
967}
968
969static int
970bnx2_reset_phy(struct bnx2 *bp)
971{
972 int i;
973 u32 reg;
974
975 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
976
977#define PHY_RESET_MAX_WAIT 100
978 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
979 udelay(10);
980
981 bnx2_read_phy(bp, MII_BMCR, &reg);
982 if (!(reg & BMCR_RESET)) {
983 udelay(20);
984 break;
985 }
986 }
987 if (i == PHY_RESET_MAX_WAIT) {
988 return -EBUSY;
989 }
990 return 0;
991}
992
993static u32
994bnx2_phy_get_pause_adv(struct bnx2 *bp)
995{
996 u32 adv = 0;
997
998 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
999 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1000
1001 if (bp->phy_flags & PHY_SERDES_FLAG) {
1002 adv = ADVERTISE_1000XPAUSE;
1003 }
1004 else {
1005 adv = ADVERTISE_PAUSE_CAP;
1006 }
1007 }
1008 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1009 if (bp->phy_flags & PHY_SERDES_FLAG) {
1010 adv = ADVERTISE_1000XPSE_ASYM;
1011 }
1012 else {
1013 adv = ADVERTISE_PAUSE_ASYM;
1014 }
1015 }
1016 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1017 if (bp->phy_flags & PHY_SERDES_FLAG) {
1018 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1019 }
1020 else {
1021 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1022 }
1023 }
1024 return adv;
1025}
1026
1027static int
1028bnx2_setup_serdes_phy(struct bnx2 *bp)
1029{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001030 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001031 u32 new_adv = 0;
1032
1033 if (!(bp->autoneg & AUTONEG_SPEED)) {
1034 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001035 int force_link_down = 0;
1036
Michael Chan80be4432006-11-19 14:07:28 -08001037 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1038 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1039
1040 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1041 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1042 new_bmcr |= BMCR_SPEED1000;
1043 if (bp->req_line_speed == SPEED_2500) {
1044 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1045 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1046 if (!(up1 & BCM5708S_UP1_2G5)) {
1047 up1 |= BCM5708S_UP1_2G5;
1048 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1049 force_link_down = 1;
1050 }
1051 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001052 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1053 if (up1 & BCM5708S_UP1_2G5) {
1054 up1 &= ~BCM5708S_UP1_2G5;
1055 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1056 force_link_down = 1;
1057 }
1058 }
1059
Michael Chanb6016b72005-05-26 13:03:09 -07001060 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001061 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001062 new_bmcr |= BMCR_FULLDPLX;
1063 }
1064 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001065 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001066 new_bmcr &= ~BMCR_FULLDPLX;
1067 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001068 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001069 /* Force a link down visible on the other side */
1070 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001071 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1072 ~(ADVERTISE_1000XFULL |
1073 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001074 bnx2_write_phy(bp, MII_BMCR, bmcr |
1075 BMCR_ANRESTART | BMCR_ANENABLE);
1076
1077 bp->link_up = 0;
1078 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001079 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001080 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001081 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001082 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001083 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1084 }
1085 return 0;
1086 }
1087
Michael Chan5b0c76a2005-11-04 08:45:49 -08001088 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1089 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1090 up1 |= BCM5708S_UP1_2G5;
1091 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1092 }
1093
Michael Chanb6016b72005-05-26 13:03:09 -07001094 if (bp->advertising & ADVERTISED_1000baseT_Full)
1095 new_adv |= ADVERTISE_1000XFULL;
1096
1097 new_adv |= bnx2_phy_get_pause_adv(bp);
1098
1099 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1100 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1101
1102 bp->serdes_an_pending = 0;
1103 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1104 /* Force a link down visible on the other side */
1105 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001106 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001107 spin_unlock_bh(&bp->phy_lock);
1108 msleep(20);
1109 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001110 }
1111
1112 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1113 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1114 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001115 /* Speed up link-up time when the link partner
1116 * does not autonegotiate which is very common
1117 * in blade servers. Some blade servers use
1118 * IPMI for kerboard input and it's important
1119 * to minimize link disruptions. Autoneg. involves
1120 * exchanging base pages plus 3 next pages and
1121 * normally completes in about 120 msec.
1122 */
1123 bp->current_interval = SERDES_AN_TIMEOUT;
1124 bp->serdes_an_pending = 1;
1125 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001126 }
1127
1128 return 0;
1129}
1130
1131#define ETHTOOL_ALL_FIBRE_SPEED \
1132 (ADVERTISED_1000baseT_Full)
1133
1134#define ETHTOOL_ALL_COPPER_SPEED \
1135 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1136 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1137 ADVERTISED_1000baseT_Full)
1138
1139#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1140 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001141
Michael Chanb6016b72005-05-26 13:03:09 -07001142#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1143
1144static int
1145bnx2_setup_copper_phy(struct bnx2 *bp)
1146{
1147 u32 bmcr;
1148 u32 new_bmcr;
1149
1150 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1151
1152 if (bp->autoneg & AUTONEG_SPEED) {
1153 u32 adv_reg, adv1000_reg;
1154 u32 new_adv_reg = 0;
1155 u32 new_adv1000_reg = 0;
1156
1157 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1158 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1159 ADVERTISE_PAUSE_ASYM);
1160
1161 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1162 adv1000_reg &= PHY_ALL_1000_SPEED;
1163
1164 if (bp->advertising & ADVERTISED_10baseT_Half)
1165 new_adv_reg |= ADVERTISE_10HALF;
1166 if (bp->advertising & ADVERTISED_10baseT_Full)
1167 new_adv_reg |= ADVERTISE_10FULL;
1168 if (bp->advertising & ADVERTISED_100baseT_Half)
1169 new_adv_reg |= ADVERTISE_100HALF;
1170 if (bp->advertising & ADVERTISED_100baseT_Full)
1171 new_adv_reg |= ADVERTISE_100FULL;
1172 if (bp->advertising & ADVERTISED_1000baseT_Full)
1173 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001174
Michael Chanb6016b72005-05-26 13:03:09 -07001175 new_adv_reg |= ADVERTISE_CSMA;
1176
1177 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1178
1179 if ((adv1000_reg != new_adv1000_reg) ||
1180 (adv_reg != new_adv_reg) ||
1181 ((bmcr & BMCR_ANENABLE) == 0)) {
1182
1183 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1184 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1185 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1186 BMCR_ANENABLE);
1187 }
1188 else if (bp->link_up) {
1189 /* Flow ctrl may have changed from auto to forced */
1190 /* or vice-versa. */
1191
1192 bnx2_resolve_flow_ctrl(bp);
1193 bnx2_set_mac_link(bp);
1194 }
1195 return 0;
1196 }
1197
1198 new_bmcr = 0;
1199 if (bp->req_line_speed == SPEED_100) {
1200 new_bmcr |= BMCR_SPEED100;
1201 }
1202 if (bp->req_duplex == DUPLEX_FULL) {
1203 new_bmcr |= BMCR_FULLDPLX;
1204 }
1205 if (new_bmcr != bmcr) {
1206 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001207
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001210
Michael Chanb6016b72005-05-26 13:03:09 -07001211 if (bmsr & BMSR_LSTATUS) {
1212 /* Force link down */
1213 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001214 spin_unlock_bh(&bp->phy_lock);
1215 msleep(50);
1216 spin_lock_bh(&bp->phy_lock);
1217
1218 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1219 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001220 }
1221
1222 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1223
1224 /* Normally, the new speed is setup after the link has
1225 * gone down and up again. In some cases, link will not go
1226 * down so we need to set up the new speed here.
1227 */
1228 if (bmsr & BMSR_LSTATUS) {
1229 bp->line_speed = bp->req_line_speed;
1230 bp->duplex = bp->req_duplex;
1231 bnx2_resolve_flow_ctrl(bp);
1232 bnx2_set_mac_link(bp);
1233 }
1234 }
1235 return 0;
1236}
1237
1238static int
1239bnx2_setup_phy(struct bnx2 *bp)
1240{
1241 if (bp->loopback == MAC_LOOPBACK)
1242 return 0;
1243
1244 if (bp->phy_flags & PHY_SERDES_FLAG) {
1245 return (bnx2_setup_serdes_phy(bp));
1246 }
1247 else {
1248 return (bnx2_setup_copper_phy(bp));
1249 }
1250}
1251
1252static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001253bnx2_init_5708s_phy(struct bnx2 *bp)
1254{
1255 u32 val;
1256
1257 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1258 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1260
1261 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1262 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1263 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1264
1265 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1266 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1267 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1268
1269 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1270 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1271 val |= BCM5708S_UP1_2G5;
1272 bnx2_write_phy(bp, BCM5708S_UP1, val);
1273 }
1274
1275 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001276 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1277 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001278 /* increase tx signal amplitude */
1279 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1280 BCM5708S_BLK_ADDR_TX_MISC);
1281 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1282 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1283 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1284 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1285 }
1286
Michael Chane3648b32005-11-04 08:51:21 -08001287 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001288 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1289
1290 if (val) {
1291 u32 is_backplane;
1292
Michael Chane3648b32005-11-04 08:51:21 -08001293 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001294 BNX2_SHARED_HW_CFG_CONFIG);
1295 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1296 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1297 BCM5708S_BLK_ADDR_TX_MISC);
1298 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1299 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1300 BCM5708S_BLK_ADDR_DIG);
1301 }
1302 }
1303 return 0;
1304}
1305
1306static int
1307bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001308{
1309 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1310
Michael Chan59b47d82006-11-19 14:10:45 -08001311 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1312 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001313
1314 if (bp->dev->mtu > 1500) {
1315 u32 val;
1316
1317 /* Set extended packet length bit */
1318 bnx2_write_phy(bp, 0x18, 0x7);
1319 bnx2_read_phy(bp, 0x18, &val);
1320 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1321
1322 bnx2_write_phy(bp, 0x1c, 0x6c00);
1323 bnx2_read_phy(bp, 0x1c, &val);
1324 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1325 }
1326 else {
1327 u32 val;
1328
1329 bnx2_write_phy(bp, 0x18, 0x7);
1330 bnx2_read_phy(bp, 0x18, &val);
1331 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1332
1333 bnx2_write_phy(bp, 0x1c, 0x6c00);
1334 bnx2_read_phy(bp, 0x1c, &val);
1335 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1336 }
1337
1338 return 0;
1339}
1340
1341static int
1342bnx2_init_copper_phy(struct bnx2 *bp)
1343{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001344 u32 val;
1345
Michael Chanb6016b72005-05-26 13:03:09 -07001346 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1347 bnx2_write_phy(bp, 0x18, 0x0c00);
1348 bnx2_write_phy(bp, 0x17, 0x000a);
1349 bnx2_write_phy(bp, 0x15, 0x310b);
1350 bnx2_write_phy(bp, 0x17, 0x201f);
1351 bnx2_write_phy(bp, 0x15, 0x9506);
1352 bnx2_write_phy(bp, 0x17, 0x401f);
1353 bnx2_write_phy(bp, 0x15, 0x14e2);
1354 bnx2_write_phy(bp, 0x18, 0x0400);
1355 }
1356
Michael Chanb659f442007-02-02 00:46:35 -08001357 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1358 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1359 MII_BNX2_DSP_EXPAND_REG | 0x8);
1360 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1361 val &= ~(1 << 8);
1362 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1363 }
1364
Michael Chanb6016b72005-05-26 13:03:09 -07001365 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001366 /* Set extended packet length bit */
1367 bnx2_write_phy(bp, 0x18, 0x7);
1368 bnx2_read_phy(bp, 0x18, &val);
1369 bnx2_write_phy(bp, 0x18, val | 0x4000);
1370
1371 bnx2_read_phy(bp, 0x10, &val);
1372 bnx2_write_phy(bp, 0x10, val | 0x1);
1373 }
1374 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001375 bnx2_write_phy(bp, 0x18, 0x7);
1376 bnx2_read_phy(bp, 0x18, &val);
1377 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1378
1379 bnx2_read_phy(bp, 0x10, &val);
1380 bnx2_write_phy(bp, 0x10, val & ~0x1);
1381 }
1382
Michael Chan5b0c76a2005-11-04 08:45:49 -08001383 /* ethernet@wirespeed */
1384 bnx2_write_phy(bp, 0x18, 0x7007);
1385 bnx2_read_phy(bp, 0x18, &val);
1386 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001387 return 0;
1388}
1389
1390
1391static int
1392bnx2_init_phy(struct bnx2 *bp)
1393{
1394 u32 val;
1395 int rc = 0;
1396
1397 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1398 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1399
1400 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1401
1402 bnx2_reset_phy(bp);
1403
1404 bnx2_read_phy(bp, MII_PHYSID1, &val);
1405 bp->phy_id = val << 16;
1406 bnx2_read_phy(bp, MII_PHYSID2, &val);
1407 bp->phy_id |= val & 0xffff;
1408
1409 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001410 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1411 rc = bnx2_init_5706s_phy(bp);
1412 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1413 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001414 }
1415 else {
1416 rc = bnx2_init_copper_phy(bp);
1417 }
1418
1419 bnx2_setup_phy(bp);
1420
1421 return rc;
1422}
1423
1424static int
1425bnx2_set_mac_loopback(struct bnx2 *bp)
1426{
1427 u32 mac_mode;
1428
1429 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1430 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1431 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1432 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1433 bp->link_up = 1;
1434 return 0;
1435}
1436
Michael Chanbc5a0692006-01-23 16:13:22 -08001437static int bnx2_test_link(struct bnx2 *);
1438
1439static int
1440bnx2_set_phy_loopback(struct bnx2 *bp)
1441{
1442 u32 mac_mode;
1443 int rc, i;
1444
1445 spin_lock_bh(&bp->phy_lock);
1446 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1447 BMCR_SPEED1000);
1448 spin_unlock_bh(&bp->phy_lock);
1449 if (rc)
1450 return rc;
1451
1452 for (i = 0; i < 10; i++) {
1453 if (bnx2_test_link(bp) == 0)
1454 break;
Michael Chan80be4432006-11-19 14:07:28 -08001455 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001456 }
1457
1458 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1459 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1460 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001461 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001462
1463 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1464 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1465 bp->link_up = 1;
1466 return 0;
1467}
1468
Michael Chanb6016b72005-05-26 13:03:09 -07001469static int
Michael Chanb090ae22006-01-23 16:07:10 -08001470bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001471{
1472 int i;
1473 u32 val;
1474
Michael Chanb6016b72005-05-26 13:03:09 -07001475 bp->fw_wr_seq++;
1476 msg_data |= bp->fw_wr_seq;
1477
Michael Chane3648b32005-11-04 08:51:21 -08001478 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001479
1480 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001481 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1482 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001483
Michael Chane3648b32005-11-04 08:51:21 -08001484 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001485
1486 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1487 break;
1488 }
Michael Chanb090ae22006-01-23 16:07:10 -08001489 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1490 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001491
1492 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001493 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1494 if (!silent)
1495 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1496 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001497
1498 msg_data &= ~BNX2_DRV_MSG_CODE;
1499 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1500
Michael Chane3648b32005-11-04 08:51:21 -08001501 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001502
Michael Chanb6016b72005-05-26 13:03:09 -07001503 return -EBUSY;
1504 }
1505
Michael Chanb090ae22006-01-23 16:07:10 -08001506 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1507 return -EIO;
1508
Michael Chanb6016b72005-05-26 13:03:09 -07001509 return 0;
1510}
1511
Michael Chan59b47d82006-11-19 14:10:45 -08001512static int
1513bnx2_init_5709_context(struct bnx2 *bp)
1514{
1515 int i, ret = 0;
1516 u32 val;
1517
1518 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1519 val |= (BCM_PAGE_BITS - 8) << 16;
1520 REG_WR(bp, BNX2_CTX_COMMAND, val);
1521 for (i = 0; i < bp->ctx_pages; i++) {
1522 int j;
1523
1524 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1525 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1526 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1527 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1528 (u64) bp->ctx_blk_mapping[i] >> 32);
1529 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1530 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1531 for (j = 0; j < 10; j++) {
1532
1533 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1534 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1535 break;
1536 udelay(5);
1537 }
1538 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1539 ret = -EBUSY;
1540 break;
1541 }
1542 }
1543 return ret;
1544}
1545
Michael Chanb6016b72005-05-26 13:03:09 -07001546static void
1547bnx2_init_context(struct bnx2 *bp)
1548{
1549 u32 vcid;
1550
1551 vcid = 96;
1552 while (vcid) {
1553 u32 vcid_addr, pcid_addr, offset;
1554
1555 vcid--;
1556
1557 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1558 u32 new_vcid;
1559
1560 vcid_addr = GET_PCID_ADDR(vcid);
1561 if (vcid & 0x8) {
1562 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1563 }
1564 else {
1565 new_vcid = vcid;
1566 }
1567 pcid_addr = GET_PCID_ADDR(new_vcid);
1568 }
1569 else {
1570 vcid_addr = GET_CID_ADDR(vcid);
1571 pcid_addr = vcid_addr;
1572 }
1573
1574 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1575 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1576
1577 /* Zero out the context. */
1578 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1579 CTX_WR(bp, 0x00, offset, 0);
1580 }
1581
1582 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1583 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1584 }
1585}
1586
1587static int
1588bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1589{
1590 u16 *good_mbuf;
1591 u32 good_mbuf_cnt;
1592 u32 val;
1593
1594 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1595 if (good_mbuf == NULL) {
1596 printk(KERN_ERR PFX "Failed to allocate memory in "
1597 "bnx2_alloc_bad_rbuf\n");
1598 return -ENOMEM;
1599 }
1600
1601 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1602 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1603
1604 good_mbuf_cnt = 0;
1605
1606 /* Allocate a bunch of mbufs and save the good ones in an array. */
1607 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1608 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1609 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1610
1611 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1612
1613 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1614
1615 /* The addresses with Bit 9 set are bad memory blocks. */
1616 if (!(val & (1 << 9))) {
1617 good_mbuf[good_mbuf_cnt] = (u16) val;
1618 good_mbuf_cnt++;
1619 }
1620
1621 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1622 }
1623
1624 /* Free the good ones back to the mbuf pool thus discarding
1625 * all the bad ones. */
1626 while (good_mbuf_cnt) {
1627 good_mbuf_cnt--;
1628
1629 val = good_mbuf[good_mbuf_cnt];
1630 val = (val << 9) | val | 1;
1631
1632 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1633 }
1634 kfree(good_mbuf);
1635 return 0;
1636}
1637
1638static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001639bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001640{
1641 u32 val;
1642 u8 *mac_addr = bp->dev->dev_addr;
1643
1644 val = (mac_addr[0] << 8) | mac_addr[1];
1645
1646 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1647
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001648 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001649 (mac_addr[4] << 8) | mac_addr[5];
1650
1651 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1652}
1653
1654static inline int
1655bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1656{
1657 struct sk_buff *skb;
1658 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1659 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001660 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001661 unsigned long align;
1662
Michael Chan932f3772006-08-15 01:39:36 -07001663 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001664 if (skb == NULL) {
1665 return -ENOMEM;
1666 }
1667
Michael Chan59b47d82006-11-19 14:10:45 -08001668 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1669 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001670
Michael Chanb6016b72005-05-26 13:03:09 -07001671 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1672 PCI_DMA_FROMDEVICE);
1673
1674 rx_buf->skb = skb;
1675 pci_unmap_addr_set(rx_buf, mapping, mapping);
1676
1677 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1678 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1679
1680 bp->rx_prod_bseq += bp->rx_buf_use_size;
1681
1682 return 0;
1683}
1684
1685static void
1686bnx2_phy_int(struct bnx2 *bp)
1687{
1688 u32 new_link_state, old_link_state;
1689
1690 new_link_state = bp->status_blk->status_attn_bits &
1691 STATUS_ATTN_BITS_LINK_STATE;
1692 old_link_state = bp->status_blk->status_attn_bits_ack &
1693 STATUS_ATTN_BITS_LINK_STATE;
1694 if (new_link_state != old_link_state) {
1695 if (new_link_state) {
1696 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1697 STATUS_ATTN_BITS_LINK_STATE);
1698 }
1699 else {
1700 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1701 STATUS_ATTN_BITS_LINK_STATE);
1702 }
1703 bnx2_set_link(bp);
1704 }
1705}
1706
1707static void
1708bnx2_tx_int(struct bnx2 *bp)
1709{
Michael Chanf4e418f2005-11-04 08:53:48 -08001710 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001711 u16 hw_cons, sw_cons, sw_ring_cons;
1712 int tx_free_bd = 0;
1713
Michael Chanf4e418f2005-11-04 08:53:48 -08001714 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001715 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1716 hw_cons++;
1717 }
1718 sw_cons = bp->tx_cons;
1719
1720 while (sw_cons != hw_cons) {
1721 struct sw_bd *tx_buf;
1722 struct sk_buff *skb;
1723 int i, last;
1724
1725 sw_ring_cons = TX_RING_IDX(sw_cons);
1726
1727 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1728 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001729
Michael Chanb6016b72005-05-26 13:03:09 -07001730 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001731 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001732 u16 last_idx, last_ring_idx;
1733
1734 last_idx = sw_cons +
1735 skb_shinfo(skb)->nr_frags + 1;
1736 last_ring_idx = sw_ring_cons +
1737 skb_shinfo(skb)->nr_frags + 1;
1738 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1739 last_idx++;
1740 }
1741 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1742 break;
1743 }
1744 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001745
Michael Chanb6016b72005-05-26 13:03:09 -07001746 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1747 skb_headlen(skb), PCI_DMA_TODEVICE);
1748
1749 tx_buf->skb = NULL;
1750 last = skb_shinfo(skb)->nr_frags;
1751
1752 for (i = 0; i < last; i++) {
1753 sw_cons = NEXT_TX_BD(sw_cons);
1754
1755 pci_unmap_page(bp->pdev,
1756 pci_unmap_addr(
1757 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1758 mapping),
1759 skb_shinfo(skb)->frags[i].size,
1760 PCI_DMA_TODEVICE);
1761 }
1762
1763 sw_cons = NEXT_TX_BD(sw_cons);
1764
1765 tx_free_bd += last + 1;
1766
Michael Chan745720e2006-06-29 12:37:41 -07001767 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001768
Michael Chanf4e418f2005-11-04 08:53:48 -08001769 hw_cons = bp->hw_tx_cons =
1770 sblk->status_tx_quick_consumer_index0;
1771
Michael Chanb6016b72005-05-26 13:03:09 -07001772 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1773 hw_cons++;
1774 }
1775 }
1776
Michael Chane89bbf12005-08-25 15:36:58 -07001777 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001778 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1779 * before checking for netif_queue_stopped(). Without the
1780 * memory barrier, there is a small possibility that bnx2_start_xmit()
1781 * will miss it and cause the queue to be stopped forever.
1782 */
1783 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001784
Michael Chan2f8af122006-08-15 01:39:10 -07001785 if (unlikely(netif_queue_stopped(bp->dev)) &&
1786 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1787 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001788 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001789 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001790 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001791 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001792 }
Michael Chanb6016b72005-05-26 13:03:09 -07001793}
1794
1795static inline void
1796bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1797 u16 cons, u16 prod)
1798{
Michael Chan236b6392006-03-20 17:49:02 -08001799 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1800 struct rx_bd *cons_bd, *prod_bd;
1801
1802 cons_rx_buf = &bp->rx_buf_ring[cons];
1803 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001804
1805 pci_dma_sync_single_for_device(bp->pdev,
1806 pci_unmap_addr(cons_rx_buf, mapping),
1807 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1808
Michael Chan236b6392006-03-20 17:49:02 -08001809 bp->rx_prod_bseq += bp->rx_buf_use_size;
1810
1811 prod_rx_buf->skb = skb;
1812
1813 if (cons == prod)
1814 return;
1815
Michael Chanb6016b72005-05-26 13:03:09 -07001816 pci_unmap_addr_set(prod_rx_buf, mapping,
1817 pci_unmap_addr(cons_rx_buf, mapping));
1818
Michael Chan3fdfcc22006-03-20 17:49:49 -08001819 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1820 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001821 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1822 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001823}
1824
1825static int
1826bnx2_rx_int(struct bnx2 *bp, int budget)
1827{
Michael Chanf4e418f2005-11-04 08:53:48 -08001828 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001829 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1830 struct l2_fhdr *rx_hdr;
1831 int rx_pkt = 0;
1832
Michael Chanf4e418f2005-11-04 08:53:48 -08001833 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001834 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1835 hw_cons++;
1836 }
1837 sw_cons = bp->rx_cons;
1838 sw_prod = bp->rx_prod;
1839
1840 /* Memory barrier necessary as speculative reads of the rx
1841 * buffer can be ahead of the index in the status block
1842 */
1843 rmb();
1844 while (sw_cons != hw_cons) {
1845 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001846 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001847 struct sw_bd *rx_buf;
1848 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001849 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001850
1851 sw_ring_cons = RX_RING_IDX(sw_cons);
1852 sw_ring_prod = RX_RING_IDX(sw_prod);
1853
1854 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1855 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001856
1857 rx_buf->skb = NULL;
1858
1859 dma_addr = pci_unmap_addr(rx_buf, mapping);
1860
1861 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001862 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1863
1864 rx_hdr = (struct l2_fhdr *) skb->data;
1865 len = rx_hdr->l2_fhdr_pkt_len - 4;
1866
Michael Chanade2bfe2006-01-23 16:09:51 -08001867 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001868 (L2_FHDR_ERRORS_BAD_CRC |
1869 L2_FHDR_ERRORS_PHY_DECODE |
1870 L2_FHDR_ERRORS_ALIGNMENT |
1871 L2_FHDR_ERRORS_TOO_SHORT |
1872 L2_FHDR_ERRORS_GIANT_FRAME)) {
1873
1874 goto reuse_rx;
1875 }
1876
1877 /* Since we don't have a jumbo ring, copy small packets
1878 * if mtu > 1500
1879 */
1880 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1881 struct sk_buff *new_skb;
1882
Michael Chan932f3772006-08-15 01:39:36 -07001883 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001884 if (new_skb == NULL)
1885 goto reuse_rx;
1886
1887 /* aligned copy */
1888 memcpy(new_skb->data,
1889 skb->data + bp->rx_offset - 2,
1890 len + 2);
1891
1892 skb_reserve(new_skb, 2);
1893 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001894
1895 bnx2_reuse_rx_skb(bp, skb,
1896 sw_ring_cons, sw_ring_prod);
1897
1898 skb = new_skb;
1899 }
1900 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001901 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001902 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1903
1904 skb_reserve(skb, bp->rx_offset);
1905 skb_put(skb, len);
1906 }
1907 else {
1908reuse_rx:
1909 bnx2_reuse_rx_skb(bp, skb,
1910 sw_ring_cons, sw_ring_prod);
1911 goto next_rx;
1912 }
1913
1914 skb->protocol = eth_type_trans(skb, bp->dev);
1915
1916 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001917 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001918
Michael Chan745720e2006-06-29 12:37:41 -07001919 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001920 goto next_rx;
1921
1922 }
1923
Michael Chanb6016b72005-05-26 13:03:09 -07001924 skb->ip_summed = CHECKSUM_NONE;
1925 if (bp->rx_csum &&
1926 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1927 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1928
Michael Chanade2bfe2006-01-23 16:09:51 -08001929 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1930 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001931 skb->ip_summed = CHECKSUM_UNNECESSARY;
1932 }
1933
1934#ifdef BCM_VLAN
1935 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1936 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1937 rx_hdr->l2_fhdr_vlan_tag);
1938 }
1939 else
1940#endif
1941 netif_receive_skb(skb);
1942
1943 bp->dev->last_rx = jiffies;
1944 rx_pkt++;
1945
1946next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001947 sw_cons = NEXT_RX_BD(sw_cons);
1948 sw_prod = NEXT_RX_BD(sw_prod);
1949
1950 if ((rx_pkt == budget))
1951 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001952
1953 /* Refresh hw_cons to see if there is new work */
1954 if (sw_cons == hw_cons) {
1955 hw_cons = bp->hw_rx_cons =
1956 sblk->status_rx_quick_consumer_index0;
1957 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1958 hw_cons++;
1959 rmb();
1960 }
Michael Chanb6016b72005-05-26 13:03:09 -07001961 }
1962 bp->rx_cons = sw_cons;
1963 bp->rx_prod = sw_prod;
1964
1965 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1966
1967 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1968
1969 mmiowb();
1970
1971 return rx_pkt;
1972
1973}
1974
1975/* MSI ISR - The only difference between this and the INTx ISR
1976 * is that the MSI interrupt is always serviced.
1977 */
1978static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001979bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001980{
1981 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001982 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001983
Michael Chanc921e4c2005-09-08 13:15:32 -07001984 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001985 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1986 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1987 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1988
1989 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001990 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1991 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001992
Michael Chan73eef4c2005-08-25 15:39:15 -07001993 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001994
Michael Chan73eef4c2005-08-25 15:39:15 -07001995 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001996}
1997
1998static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001999bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002000{
2001 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002002 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002003
2004 /* When using INTx, it is possible for the interrupt to arrive
2005 * at the CPU before the status block posted prior to the
2006 * interrupt. Reading a register will flush the status block.
2007 * When using MSI, the MSI message will always complete after
2008 * the status block write.
2009 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002010 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002011 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2012 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002013 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002014
2015 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2016 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2017 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2018
2019 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002020 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2021 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002022
Michael Chan73eef4c2005-08-25 15:39:15 -07002023 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002024
Michael Chan73eef4c2005-08-25 15:39:15 -07002025 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002026}
2027
Michael Chanf4e418f2005-11-04 08:53:48 -08002028static inline int
2029bnx2_has_work(struct bnx2 *bp)
2030{
2031 struct status_block *sblk = bp->status_blk;
2032
2033 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2034 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2035 return 1;
2036
2037 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2038 bp->link_up)
2039 return 1;
2040
2041 return 0;
2042}
2043
Michael Chanb6016b72005-05-26 13:03:09 -07002044static int
2045bnx2_poll(struct net_device *dev, int *budget)
2046{
Michael Chan972ec0d2006-01-23 16:12:43 -08002047 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002048
Michael Chanb6016b72005-05-26 13:03:09 -07002049 if ((bp->status_blk->status_attn_bits &
2050 STATUS_ATTN_BITS_LINK_STATE) !=
2051 (bp->status_blk->status_attn_bits_ack &
2052 STATUS_ATTN_BITS_LINK_STATE)) {
2053
Michael Chanc770a652005-08-25 15:38:39 -07002054 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002055 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002056 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002057
2058 /* This is needed to take care of transient status
2059 * during link changes.
2060 */
2061 REG_WR(bp, BNX2_HC_COMMAND,
2062 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2063 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002064 }
2065
Michael Chanf4e418f2005-11-04 08:53:48 -08002066 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002067 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002068
Michael Chanf4e418f2005-11-04 08:53:48 -08002069 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002070 int orig_budget = *budget;
2071 int work_done;
2072
2073 if (orig_budget > dev->quota)
2074 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002075
Michael Chanb6016b72005-05-26 13:03:09 -07002076 work_done = bnx2_rx_int(bp, orig_budget);
2077 *budget -= work_done;
2078 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002079 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002080
Michael Chanf4e418f2005-11-04 08:53:48 -08002081 bp->last_status_idx = bp->status_blk->status_idx;
2082 rmb();
2083
2084 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002085 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002086 if (likely(bp->flags & USING_MSI_FLAG)) {
2087 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2088 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2089 bp->last_status_idx);
2090 return 0;
2091 }
Michael Chanb6016b72005-05-26 13:03:09 -07002092 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002093 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2094 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2095 bp->last_status_idx);
2096
2097 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2098 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2099 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002100 return 0;
2101 }
2102
2103 return 1;
2104}
2105
Herbert Xu932ff272006-06-09 12:20:56 -07002106/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002107 * from set_multicast.
2108 */
2109static void
2110bnx2_set_rx_mode(struct net_device *dev)
2111{
Michael Chan972ec0d2006-01-23 16:12:43 -08002112 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002113 u32 rx_mode, sort_mode;
2114 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002115
Michael Chanc770a652005-08-25 15:38:39 -07002116 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002117
2118 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2119 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2120 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2121#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002122 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002123 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002124#else
Michael Chane29054f2006-01-23 16:06:06 -08002125 if (!(bp->flags & ASF_ENABLE_FLAG))
2126 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002127#endif
2128 if (dev->flags & IFF_PROMISC) {
2129 /* Promiscuous mode. */
2130 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002131 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2132 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002133 }
2134 else if (dev->flags & IFF_ALLMULTI) {
2135 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2136 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2137 0xffffffff);
2138 }
2139 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2140 }
2141 else {
2142 /* Accept one or more multicast(s). */
2143 struct dev_mc_list *mclist;
2144 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2145 u32 regidx;
2146 u32 bit;
2147 u32 crc;
2148
2149 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2150
2151 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2152 i++, mclist = mclist->next) {
2153
2154 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2155 bit = crc & 0xff;
2156 regidx = (bit & 0xe0) >> 5;
2157 bit &= 0x1f;
2158 mc_filter[regidx] |= (1 << bit);
2159 }
2160
2161 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2162 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2163 mc_filter[i]);
2164 }
2165
2166 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2167 }
2168
2169 if (rx_mode != bp->rx_mode) {
2170 bp->rx_mode = rx_mode;
2171 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2172 }
2173
2174 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2175 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2176 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2177
Michael Chanc770a652005-08-25 15:38:39 -07002178 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002179}
2180
Michael Chanfba9fe92006-06-12 22:21:25 -07002181#define FW_BUF_SIZE 0x8000
2182
2183static int
2184bnx2_gunzip_init(struct bnx2 *bp)
2185{
2186 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2187 goto gunzip_nomem1;
2188
2189 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2190 goto gunzip_nomem2;
2191
2192 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2193 if (bp->strm->workspace == NULL)
2194 goto gunzip_nomem3;
2195
2196 return 0;
2197
2198gunzip_nomem3:
2199 kfree(bp->strm);
2200 bp->strm = NULL;
2201
2202gunzip_nomem2:
2203 vfree(bp->gunzip_buf);
2204 bp->gunzip_buf = NULL;
2205
2206gunzip_nomem1:
2207 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2208 "uncompression.\n", bp->dev->name);
2209 return -ENOMEM;
2210}
2211
2212static void
2213bnx2_gunzip_end(struct bnx2 *bp)
2214{
2215 kfree(bp->strm->workspace);
2216
2217 kfree(bp->strm);
2218 bp->strm = NULL;
2219
2220 if (bp->gunzip_buf) {
2221 vfree(bp->gunzip_buf);
2222 bp->gunzip_buf = NULL;
2223 }
2224}
2225
2226static int
2227bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2228{
2229 int n, rc;
2230
2231 /* check gzip header */
2232 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2233 return -EINVAL;
2234
2235 n = 10;
2236
2237#define FNAME 0x8
2238 if (zbuf[3] & FNAME)
2239 while ((zbuf[n++] != 0) && (n < len));
2240
2241 bp->strm->next_in = zbuf + n;
2242 bp->strm->avail_in = len - n;
2243 bp->strm->next_out = bp->gunzip_buf;
2244 bp->strm->avail_out = FW_BUF_SIZE;
2245
2246 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2247 if (rc != Z_OK)
2248 return rc;
2249
2250 rc = zlib_inflate(bp->strm, Z_FINISH);
2251
2252 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2253 *outbuf = bp->gunzip_buf;
2254
2255 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2256 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2257 bp->dev->name, bp->strm->msg);
2258
2259 zlib_inflateEnd(bp->strm);
2260
2261 if (rc == Z_STREAM_END)
2262 return 0;
2263
2264 return rc;
2265}
2266
Michael Chanb6016b72005-05-26 13:03:09 -07002267static void
2268load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2269 u32 rv2p_proc)
2270{
2271 int i;
2272 u32 val;
2273
2274
2275 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002276 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002277 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002278 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002279 rv2p_code++;
2280
2281 if (rv2p_proc == RV2P_PROC1) {
2282 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2283 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2284 }
2285 else {
2286 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2287 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2288 }
2289 }
2290
2291 /* Reset the processor, un-stall is done later. */
2292 if (rv2p_proc == RV2P_PROC1) {
2293 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2294 }
2295 else {
2296 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2297 }
2298}
2299
Michael Chanaf3ee512006-11-19 14:09:25 -08002300static int
Michael Chanb6016b72005-05-26 13:03:09 -07002301load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2302{
2303 u32 offset;
2304 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002305 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002306
2307 /* Halt the CPU. */
2308 val = REG_RD_IND(bp, cpu_reg->mode);
2309 val |= cpu_reg->mode_value_halt;
2310 REG_WR_IND(bp, cpu_reg->mode, val);
2311 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2312
2313 /* Load the Text area. */
2314 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002315 if (fw->gz_text) {
2316 u32 text_len;
2317 void *text;
2318
2319 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2320 &text_len);
2321 if (rc)
2322 return rc;
2323
2324 fw->text = text;
2325 }
2326 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002327 int j;
2328
2329 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002330 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002331 }
2332 }
2333
2334 /* Load the Data area. */
2335 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2336 if (fw->data) {
2337 int j;
2338
2339 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2340 REG_WR_IND(bp, offset, fw->data[j]);
2341 }
2342 }
2343
2344 /* Load the SBSS area. */
2345 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2346 if (fw->sbss) {
2347 int j;
2348
2349 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2350 REG_WR_IND(bp, offset, fw->sbss[j]);
2351 }
2352 }
2353
2354 /* Load the BSS area. */
2355 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2356 if (fw->bss) {
2357 int j;
2358
2359 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2360 REG_WR_IND(bp, offset, fw->bss[j]);
2361 }
2362 }
2363
2364 /* Load the Read-Only area. */
2365 offset = cpu_reg->spad_base +
2366 (fw->rodata_addr - cpu_reg->mips_view_base);
2367 if (fw->rodata) {
2368 int j;
2369
2370 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2371 REG_WR_IND(bp, offset, fw->rodata[j]);
2372 }
2373 }
2374
2375 /* Clear the pre-fetch instruction. */
2376 REG_WR_IND(bp, cpu_reg->inst, 0);
2377 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2378
2379 /* Start the CPU. */
2380 val = REG_RD_IND(bp, cpu_reg->mode);
2381 val &= ~cpu_reg->mode_value_halt;
2382 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2383 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002384
2385 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002386}
2387
Michael Chanfba9fe92006-06-12 22:21:25 -07002388static int
Michael Chanb6016b72005-05-26 13:03:09 -07002389bnx2_init_cpus(struct bnx2 *bp)
2390{
2391 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002392 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002393 int rc = 0;
2394 void *text;
2395 u32 text_len;
2396
2397 if ((rc = bnx2_gunzip_init(bp)) != 0)
2398 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002399
2400 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002401 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2402 &text_len);
2403 if (rc)
2404 goto init_cpu_err;
2405
2406 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2407
2408 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2409 &text_len);
2410 if (rc)
2411 goto init_cpu_err;
2412
2413 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002414
2415 /* Initialize the RX Processor. */
2416 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2417 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2418 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2419 cpu_reg.state = BNX2_RXP_CPU_STATE;
2420 cpu_reg.state_value_clear = 0xffffff;
2421 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2422 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2423 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2424 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2425 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2426 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2427 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002428
Michael Chand43584c2006-11-19 14:14:35 -08002429 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2430 fw = &bnx2_rxp_fw_09;
2431 else
2432 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002433
Michael Chanaf3ee512006-11-19 14:09:25 -08002434 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002435 if (rc)
2436 goto init_cpu_err;
2437
Michael Chanb6016b72005-05-26 13:03:09 -07002438 /* Initialize the TX Processor. */
2439 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2440 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2441 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2442 cpu_reg.state = BNX2_TXP_CPU_STATE;
2443 cpu_reg.state_value_clear = 0xffffff;
2444 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2445 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2446 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2447 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2448 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2449 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2450 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002451
Michael Chand43584c2006-11-19 14:14:35 -08002452 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2453 fw = &bnx2_txp_fw_09;
2454 else
2455 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002456
Michael Chanaf3ee512006-11-19 14:09:25 -08002457 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002458 if (rc)
2459 goto init_cpu_err;
2460
Michael Chanb6016b72005-05-26 13:03:09 -07002461 /* Initialize the TX Patch-up Processor. */
2462 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2463 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2464 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2465 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2466 cpu_reg.state_value_clear = 0xffffff;
2467 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2468 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2469 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2470 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2471 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2472 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2473 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002474
Michael Chand43584c2006-11-19 14:14:35 -08002475 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2476 fw = &bnx2_tpat_fw_09;
2477 else
2478 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002479
Michael Chanaf3ee512006-11-19 14:09:25 -08002480 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002481 if (rc)
2482 goto init_cpu_err;
2483
Michael Chanb6016b72005-05-26 13:03:09 -07002484 /* Initialize the Completion Processor. */
2485 cpu_reg.mode = BNX2_COM_CPU_MODE;
2486 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2487 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2488 cpu_reg.state = BNX2_COM_CPU_STATE;
2489 cpu_reg.state_value_clear = 0xffffff;
2490 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2491 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2492 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2493 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2494 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2495 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2496 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002497
Michael Chand43584c2006-11-19 14:14:35 -08002498 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2499 fw = &bnx2_com_fw_09;
2500 else
2501 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002502
Michael Chanaf3ee512006-11-19 14:09:25 -08002503 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002504 if (rc)
2505 goto init_cpu_err;
2506
Michael Chand43584c2006-11-19 14:14:35 -08002507 /* Initialize the Command Processor. */
2508 cpu_reg.mode = BNX2_CP_CPU_MODE;
2509 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2510 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2511 cpu_reg.state = BNX2_CP_CPU_STATE;
2512 cpu_reg.state_value_clear = 0xffffff;
2513 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2514 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2515 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2516 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2517 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2518 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2519 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002520
Michael Chand43584c2006-11-19 14:14:35 -08002521 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2522 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002523
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002524 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002525 if (rc)
2526 goto init_cpu_err;
2527 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002528init_cpu_err:
2529 bnx2_gunzip_end(bp);
2530 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002531}
2532
2533static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002534bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002535{
2536 u16 pmcsr;
2537
2538 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2539
2540 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002541 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002542 u32 val;
2543
2544 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2545 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2546 PCI_PM_CTRL_PME_STATUS);
2547
2548 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2549 /* delay required during transition out of D3hot */
2550 msleep(20);
2551
2552 val = REG_RD(bp, BNX2_EMAC_MODE);
2553 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2554 val &= ~BNX2_EMAC_MODE_MPKT;
2555 REG_WR(bp, BNX2_EMAC_MODE, val);
2556
2557 val = REG_RD(bp, BNX2_RPM_CONFIG);
2558 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2559 REG_WR(bp, BNX2_RPM_CONFIG, val);
2560 break;
2561 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002562 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002563 int i;
2564 u32 val, wol_msg;
2565
2566 if (bp->wol) {
2567 u32 advertising;
2568 u8 autoneg;
2569
2570 autoneg = bp->autoneg;
2571 advertising = bp->advertising;
2572
2573 bp->autoneg = AUTONEG_SPEED;
2574 bp->advertising = ADVERTISED_10baseT_Half |
2575 ADVERTISED_10baseT_Full |
2576 ADVERTISED_100baseT_Half |
2577 ADVERTISED_100baseT_Full |
2578 ADVERTISED_Autoneg;
2579
2580 bnx2_setup_copper_phy(bp);
2581
2582 bp->autoneg = autoneg;
2583 bp->advertising = advertising;
2584
2585 bnx2_set_mac_addr(bp);
2586
2587 val = REG_RD(bp, BNX2_EMAC_MODE);
2588
2589 /* Enable port mode. */
2590 val &= ~BNX2_EMAC_MODE_PORT;
2591 val |= BNX2_EMAC_MODE_PORT_MII |
2592 BNX2_EMAC_MODE_MPKT_RCVD |
2593 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002594 BNX2_EMAC_MODE_MPKT;
2595
2596 REG_WR(bp, BNX2_EMAC_MODE, val);
2597
2598 /* receive all multicast */
2599 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2600 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2601 0xffffffff);
2602 }
2603 REG_WR(bp, BNX2_EMAC_RX_MODE,
2604 BNX2_EMAC_RX_MODE_SORT_MODE);
2605
2606 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2607 BNX2_RPM_SORT_USER0_MC_EN;
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2609 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2610 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2611 BNX2_RPM_SORT_USER0_ENA);
2612
2613 /* Need to enable EMAC and RPM for WOL. */
2614 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2615 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2616 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2617 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2618
2619 val = REG_RD(bp, BNX2_RPM_CONFIG);
2620 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2621 REG_WR(bp, BNX2_RPM_CONFIG, val);
2622
2623 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2624 }
2625 else {
2626 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2627 }
2628
Michael Chandda1e392006-01-23 16:08:14 -08002629 if (!(bp->flags & NO_WOL_FLAG))
2630 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002631
2632 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2633 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2634 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2635
2636 if (bp->wol)
2637 pmcsr |= 3;
2638 }
2639 else {
2640 pmcsr |= 3;
2641 }
2642 if (bp->wol) {
2643 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2644 }
2645 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2646 pmcsr);
2647
2648 /* No more memory access after this point until
2649 * device is brought back to D0.
2650 */
2651 udelay(50);
2652 break;
2653 }
2654 default:
2655 return -EINVAL;
2656 }
2657 return 0;
2658}
2659
2660static int
2661bnx2_acquire_nvram_lock(struct bnx2 *bp)
2662{
2663 u32 val;
2664 int j;
2665
2666 /* Request access to the flash interface. */
2667 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2668 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2669 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2670 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2671 break;
2672
2673 udelay(5);
2674 }
2675
2676 if (j >= NVRAM_TIMEOUT_COUNT)
2677 return -EBUSY;
2678
2679 return 0;
2680}
2681
2682static int
2683bnx2_release_nvram_lock(struct bnx2 *bp)
2684{
2685 int j;
2686 u32 val;
2687
2688 /* Relinquish nvram interface. */
2689 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2690
2691 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2692 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2693 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2694 break;
2695
2696 udelay(5);
2697 }
2698
2699 if (j >= NVRAM_TIMEOUT_COUNT)
2700 return -EBUSY;
2701
2702 return 0;
2703}
2704
2705
2706static int
2707bnx2_enable_nvram_write(struct bnx2 *bp)
2708{
2709 u32 val;
2710
2711 val = REG_RD(bp, BNX2_MISC_CFG);
2712 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2713
2714 if (!bp->flash_info->buffered) {
2715 int j;
2716
2717 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2718 REG_WR(bp, BNX2_NVM_COMMAND,
2719 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2720
2721 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2722 udelay(5);
2723
2724 val = REG_RD(bp, BNX2_NVM_COMMAND);
2725 if (val & BNX2_NVM_COMMAND_DONE)
2726 break;
2727 }
2728
2729 if (j >= NVRAM_TIMEOUT_COUNT)
2730 return -EBUSY;
2731 }
2732 return 0;
2733}
2734
2735static void
2736bnx2_disable_nvram_write(struct bnx2 *bp)
2737{
2738 u32 val;
2739
2740 val = REG_RD(bp, BNX2_MISC_CFG);
2741 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2742}
2743
2744
2745static void
2746bnx2_enable_nvram_access(struct bnx2 *bp)
2747{
2748 u32 val;
2749
2750 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2751 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002752 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002753 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2754}
2755
2756static void
2757bnx2_disable_nvram_access(struct bnx2 *bp)
2758{
2759 u32 val;
2760
2761 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2762 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002763 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002764 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2765 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2766}
2767
2768static int
2769bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2770{
2771 u32 cmd;
2772 int j;
2773
2774 if (bp->flash_info->buffered)
2775 /* Buffered flash, no erase needed */
2776 return 0;
2777
2778 /* Build an erase command */
2779 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2780 BNX2_NVM_COMMAND_DOIT;
2781
2782 /* Need to clear DONE bit separately. */
2783 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2784
2785 /* Address of the NVRAM to read from. */
2786 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2787
2788 /* Issue an erase command. */
2789 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2790
2791 /* Wait for completion. */
2792 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2793 u32 val;
2794
2795 udelay(5);
2796
2797 val = REG_RD(bp, BNX2_NVM_COMMAND);
2798 if (val & BNX2_NVM_COMMAND_DONE)
2799 break;
2800 }
2801
2802 if (j >= NVRAM_TIMEOUT_COUNT)
2803 return -EBUSY;
2804
2805 return 0;
2806}
2807
2808static int
2809bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2810{
2811 u32 cmd;
2812 int j;
2813
2814 /* Build the command word. */
2815 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2816
2817 /* Calculate an offset of a buffered flash. */
2818 if (bp->flash_info->buffered) {
2819 offset = ((offset / bp->flash_info->page_size) <<
2820 bp->flash_info->page_bits) +
2821 (offset % bp->flash_info->page_size);
2822 }
2823
2824 /* Need to clear DONE bit separately. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2826
2827 /* Address of the NVRAM to read from. */
2828 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2829
2830 /* Issue a read command. */
2831 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2832
2833 /* Wait for completion. */
2834 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2835 u32 val;
2836
2837 udelay(5);
2838
2839 val = REG_RD(bp, BNX2_NVM_COMMAND);
2840 if (val & BNX2_NVM_COMMAND_DONE) {
2841 val = REG_RD(bp, BNX2_NVM_READ);
2842
2843 val = be32_to_cpu(val);
2844 memcpy(ret_val, &val, 4);
2845 break;
2846 }
2847 }
2848 if (j >= NVRAM_TIMEOUT_COUNT)
2849 return -EBUSY;
2850
2851 return 0;
2852}
2853
2854
2855static int
2856bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2857{
2858 u32 cmd, val32;
2859 int j;
2860
2861 /* Build the command word. */
2862 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2863
2864 /* Calculate an offset of a buffered flash. */
2865 if (bp->flash_info->buffered) {
2866 offset = ((offset / bp->flash_info->page_size) <<
2867 bp->flash_info->page_bits) +
2868 (offset % bp->flash_info->page_size);
2869 }
2870
2871 /* Need to clear DONE bit separately. */
2872 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2873
2874 memcpy(&val32, val, 4);
2875 val32 = cpu_to_be32(val32);
2876
2877 /* Write the data. */
2878 REG_WR(bp, BNX2_NVM_WRITE, val32);
2879
2880 /* Address of the NVRAM to write to. */
2881 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2882
2883 /* Issue the write command. */
2884 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2885
2886 /* Wait for completion. */
2887 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2888 udelay(5);
2889
2890 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2891 break;
2892 }
2893 if (j >= NVRAM_TIMEOUT_COUNT)
2894 return -EBUSY;
2895
2896 return 0;
2897}
2898
2899static int
2900bnx2_init_nvram(struct bnx2 *bp)
2901{
2902 u32 val;
2903 int j, entry_count, rc;
2904 struct flash_spec *flash;
2905
2906 /* Determine the selected interface. */
2907 val = REG_RD(bp, BNX2_NVM_CFG1);
2908
2909 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2910
2911 rc = 0;
2912 if (val & 0x40000000) {
2913
2914 /* Flash interface has been reconfigured */
2915 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002916 j++, flash++) {
2917 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2918 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002919 bp->flash_info = flash;
2920 break;
2921 }
2922 }
2923 }
2924 else {
Michael Chan37137702005-11-04 08:49:17 -08002925 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002926 /* Not yet been reconfigured */
2927
Michael Chan37137702005-11-04 08:49:17 -08002928 if (val & (1 << 23))
2929 mask = FLASH_BACKUP_STRAP_MASK;
2930 else
2931 mask = FLASH_STRAP_MASK;
2932
Michael Chanb6016b72005-05-26 13:03:09 -07002933 for (j = 0, flash = &flash_table[0]; j < entry_count;
2934 j++, flash++) {
2935
Michael Chan37137702005-11-04 08:49:17 -08002936 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002937 bp->flash_info = flash;
2938
2939 /* Request access to the flash interface. */
2940 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2941 return rc;
2942
2943 /* Enable access to flash interface */
2944 bnx2_enable_nvram_access(bp);
2945
2946 /* Reconfigure the flash interface */
2947 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2948 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2949 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2950 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2951
2952 /* Disable access to flash interface */
2953 bnx2_disable_nvram_access(bp);
2954 bnx2_release_nvram_lock(bp);
2955
2956 break;
2957 }
2958 }
2959 } /* if (val & 0x40000000) */
2960
2961 if (j == entry_count) {
2962 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002963 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002964 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002965 }
2966
Michael Chan1122db72006-01-23 16:11:42 -08002967 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2968 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2969 if (val)
2970 bp->flash_size = val;
2971 else
2972 bp->flash_size = bp->flash_info->total_size;
2973
Michael Chanb6016b72005-05-26 13:03:09 -07002974 return rc;
2975}
2976
2977static int
2978bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2979 int buf_size)
2980{
2981 int rc = 0;
2982 u32 cmd_flags, offset32, len32, extra;
2983
2984 if (buf_size == 0)
2985 return 0;
2986
2987 /* Request access to the flash interface. */
2988 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2989 return rc;
2990
2991 /* Enable access to flash interface */
2992 bnx2_enable_nvram_access(bp);
2993
2994 len32 = buf_size;
2995 offset32 = offset;
2996 extra = 0;
2997
2998 cmd_flags = 0;
2999
3000 if (offset32 & 3) {
3001 u8 buf[4];
3002 u32 pre_len;
3003
3004 offset32 &= ~3;
3005 pre_len = 4 - (offset & 3);
3006
3007 if (pre_len >= len32) {
3008 pre_len = len32;
3009 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3010 BNX2_NVM_COMMAND_LAST;
3011 }
3012 else {
3013 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3014 }
3015
3016 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3017
3018 if (rc)
3019 return rc;
3020
3021 memcpy(ret_buf, buf + (offset & 3), pre_len);
3022
3023 offset32 += 4;
3024 ret_buf += pre_len;
3025 len32 -= pre_len;
3026 }
3027 if (len32 & 3) {
3028 extra = 4 - (len32 & 3);
3029 len32 = (len32 + 4) & ~3;
3030 }
3031
3032 if (len32 == 4) {
3033 u8 buf[4];
3034
3035 if (cmd_flags)
3036 cmd_flags = BNX2_NVM_COMMAND_LAST;
3037 else
3038 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3039 BNX2_NVM_COMMAND_LAST;
3040
3041 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3042
3043 memcpy(ret_buf, buf, 4 - extra);
3044 }
3045 else if (len32 > 0) {
3046 u8 buf[4];
3047
3048 /* Read the first word. */
3049 if (cmd_flags)
3050 cmd_flags = 0;
3051 else
3052 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3053
3054 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3055
3056 /* Advance to the next dword. */
3057 offset32 += 4;
3058 ret_buf += 4;
3059 len32 -= 4;
3060
3061 while (len32 > 4 && rc == 0) {
3062 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3063
3064 /* Advance to the next dword. */
3065 offset32 += 4;
3066 ret_buf += 4;
3067 len32 -= 4;
3068 }
3069
3070 if (rc)
3071 return rc;
3072
3073 cmd_flags = BNX2_NVM_COMMAND_LAST;
3074 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3075
3076 memcpy(ret_buf, buf, 4 - extra);
3077 }
3078
3079 /* Disable access to flash interface */
3080 bnx2_disable_nvram_access(bp);
3081
3082 bnx2_release_nvram_lock(bp);
3083
3084 return rc;
3085}
3086
3087static int
3088bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3089 int buf_size)
3090{
3091 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003092 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003093 int rc = 0;
3094 int align_start, align_end;
3095
3096 buf = data_buf;
3097 offset32 = offset;
3098 len32 = buf_size;
3099 align_start = align_end = 0;
3100
3101 if ((align_start = (offset32 & 3))) {
3102 offset32 &= ~3;
Michael Chan6a13add2006-12-14 15:56:50 -08003103 len32 += (4 - align_start);
Michael Chanb6016b72005-05-26 13:03:09 -07003104 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3105 return rc;
3106 }
3107
3108 if (len32 & 3) {
3109 if ((len32 > 4) || !align_start) {
3110 align_end = 4 - (len32 & 3);
3111 len32 += align_end;
3112 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3113 end, 4))) {
3114 return rc;
3115 }
3116 }
3117 }
3118
3119 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003120 align_buf = kmalloc(len32, GFP_KERNEL);
3121 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003122 return -ENOMEM;
3123 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003124 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003125 }
3126 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003127 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003128 }
Michael Chane6be7632007-01-08 19:56:13 -08003129 memcpy(align_buf + align_start, data_buf, buf_size);
3130 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003131 }
3132
Michael Chanae181bc2006-05-22 16:39:20 -07003133 if (bp->flash_info->buffered == 0) {
3134 flash_buffer = kmalloc(264, GFP_KERNEL);
3135 if (flash_buffer == NULL) {
3136 rc = -ENOMEM;
3137 goto nvram_write_end;
3138 }
3139 }
3140
Michael Chanb6016b72005-05-26 13:03:09 -07003141 written = 0;
3142 while ((written < len32) && (rc == 0)) {
3143 u32 page_start, page_end, data_start, data_end;
3144 u32 addr, cmd_flags;
3145 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003146
3147 /* Find the page_start addr */
3148 page_start = offset32 + written;
3149 page_start -= (page_start % bp->flash_info->page_size);
3150 /* Find the page_end addr */
3151 page_end = page_start + bp->flash_info->page_size;
3152 /* Find the data_start addr */
3153 data_start = (written == 0) ? offset32 : page_start;
3154 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003155 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003156 (offset32 + len32) : page_end;
3157
3158 /* Request access to the flash interface. */
3159 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3160 goto nvram_write_end;
3161
3162 /* Enable access to flash interface */
3163 bnx2_enable_nvram_access(bp);
3164
3165 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3166 if (bp->flash_info->buffered == 0) {
3167 int j;
3168
3169 /* Read the whole page into the buffer
3170 * (non-buffer flash only) */
3171 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3172 if (j == (bp->flash_info->page_size - 4)) {
3173 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3174 }
3175 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003176 page_start + j,
3177 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003178 cmd_flags);
3179
3180 if (rc)
3181 goto nvram_write_end;
3182
3183 cmd_flags = 0;
3184 }
3185 }
3186
3187 /* Enable writes to flash interface (unlock write-protect) */
3188 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3189 goto nvram_write_end;
3190
3191 /* Erase the page */
3192 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3193 goto nvram_write_end;
3194
3195 /* Re-enable the write again for the actual write */
3196 bnx2_enable_nvram_write(bp);
3197
3198 /* Loop to write back the buffer data from page_start to
3199 * data_start */
3200 i = 0;
3201 if (bp->flash_info->buffered == 0) {
3202 for (addr = page_start; addr < data_start;
3203 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003204
Michael Chanb6016b72005-05-26 13:03:09 -07003205 rc = bnx2_nvram_write_dword(bp, addr,
3206 &flash_buffer[i], cmd_flags);
3207
3208 if (rc != 0)
3209 goto nvram_write_end;
3210
3211 cmd_flags = 0;
3212 }
3213 }
3214
3215 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003216 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003217 if ((addr == page_end - 4) ||
3218 ((bp->flash_info->buffered) &&
3219 (addr == data_end - 4))) {
3220
3221 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3222 }
3223 rc = bnx2_nvram_write_dword(bp, addr, buf,
3224 cmd_flags);
3225
3226 if (rc != 0)
3227 goto nvram_write_end;
3228
3229 cmd_flags = 0;
3230 buf += 4;
3231 }
3232
3233 /* Loop to write back the buffer data from data_end
3234 * to page_end */
3235 if (bp->flash_info->buffered == 0) {
3236 for (addr = data_end; addr < page_end;
3237 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003238
Michael Chanb6016b72005-05-26 13:03:09 -07003239 if (addr == page_end-4) {
3240 cmd_flags = BNX2_NVM_COMMAND_LAST;
3241 }
3242 rc = bnx2_nvram_write_dword(bp, addr,
3243 &flash_buffer[i], cmd_flags);
3244
3245 if (rc != 0)
3246 goto nvram_write_end;
3247
3248 cmd_flags = 0;
3249 }
3250 }
3251
3252 /* Disable writes to flash interface (lock write-protect) */
3253 bnx2_disable_nvram_write(bp);
3254
3255 /* Disable access to flash interface */
3256 bnx2_disable_nvram_access(bp);
3257 bnx2_release_nvram_lock(bp);
3258
3259 /* Increment written */
3260 written += data_end - data_start;
3261 }
3262
3263nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003264 kfree(flash_buffer);
3265 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003266 return rc;
3267}
3268
3269static int
3270bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3271{
3272 u32 val;
3273 int i, rc = 0;
3274
3275 /* Wait for the current PCI transaction to complete before
3276 * issuing a reset. */
3277 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3278 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3279 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3280 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3281 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3282 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3283 udelay(5);
3284
Michael Chanb090ae22006-01-23 16:07:10 -08003285 /* Wait for the firmware to tell us it is ok to issue a reset. */
3286 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3287
Michael Chanb6016b72005-05-26 13:03:09 -07003288 /* Deposit a driver reset signature so the firmware knows that
3289 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003291 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3292
Michael Chanb6016b72005-05-26 13:03:09 -07003293 /* Do a dummy read to force the chip to complete all current transaction
3294 * before we issue a reset. */
3295 val = REG_RD(bp, BNX2_MISC_ID);
3296
Michael Chan234754d2006-11-19 14:11:41 -08003297 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3298 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3299 REG_RD(bp, BNX2_MISC_COMMAND);
3300 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003301
Michael Chan234754d2006-11-19 14:11:41 -08003302 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3303 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003304
Michael Chan234754d2006-11-19 14:11:41 -08003305 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003306
Michael Chan234754d2006-11-19 14:11:41 -08003307 } else {
3308 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3309 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3310 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3311
3312 /* Chip reset. */
3313 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3314
3315 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3316 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3317 current->state = TASK_UNINTERRUPTIBLE;
3318 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003319 }
Michael Chanb6016b72005-05-26 13:03:09 -07003320
Michael Chan234754d2006-11-19 14:11:41 -08003321 /* Reset takes approximate 30 usec */
3322 for (i = 0; i < 10; i++) {
3323 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3324 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3325 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3326 break;
3327 udelay(10);
3328 }
3329
3330 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3331 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3332 printk(KERN_ERR PFX "Chip reset did not complete\n");
3333 return -EBUSY;
3334 }
Michael Chanb6016b72005-05-26 13:03:09 -07003335 }
3336
3337 /* Make sure byte swapping is properly configured. */
3338 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3339 if (val != 0x01020304) {
3340 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3341 return -ENODEV;
3342 }
3343
Michael Chanb6016b72005-05-26 13:03:09 -07003344 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003345 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3346 if (rc)
3347 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003348
3349 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3350 /* Adjust the voltage regular to two steps lower. The default
3351 * of this register is 0x0000000e. */
3352 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3353
3354 /* Remove bad rbuf memory from the free pool. */
3355 rc = bnx2_alloc_bad_rbuf(bp);
3356 }
3357
3358 return rc;
3359}
3360
3361static int
3362bnx2_init_chip(struct bnx2 *bp)
3363{
3364 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003365 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003366
3367 /* Make sure the interrupt is not active. */
3368 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3369
3370 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3371 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3372#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003373 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003374#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003375 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003376 DMA_READ_CHANS << 12 |
3377 DMA_WRITE_CHANS << 16;
3378
3379 val |= (0x2 << 20) | (1 << 11);
3380
Michael Chandda1e392006-01-23 16:08:14 -08003381 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003382 val |= (1 << 23);
3383
3384 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3385 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3386 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3387
3388 REG_WR(bp, BNX2_DMA_CONFIG, val);
3389
3390 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3391 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3392 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3393 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3394 }
3395
3396 if (bp->flags & PCIX_FLAG) {
3397 u16 val16;
3398
3399 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3400 &val16);
3401 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3402 val16 & ~PCI_X_CMD_ERO);
3403 }
3404
3405 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3406 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3407 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3408 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3409
3410 /* Initialize context mapping and zero out the quick contexts. The
3411 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003412 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3413 bnx2_init_5709_context(bp);
3414 else
3415 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003416
Michael Chanfba9fe92006-06-12 22:21:25 -07003417 if ((rc = bnx2_init_cpus(bp)) != 0)
3418 return rc;
3419
Michael Chanb6016b72005-05-26 13:03:09 -07003420 bnx2_init_nvram(bp);
3421
3422 bnx2_set_mac_addr(bp);
3423
3424 val = REG_RD(bp, BNX2_MQ_CONFIG);
3425 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3426 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3427 REG_WR(bp, BNX2_MQ_CONFIG, val);
3428
3429 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3430 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3431 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3432
3433 val = (BCM_PAGE_BITS - 8) << 24;
3434 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3435
3436 /* Configure page size. */
3437 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3438 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3439 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3440 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3441
3442 val = bp->mac_addr[0] +
3443 (bp->mac_addr[1] << 8) +
3444 (bp->mac_addr[2] << 16) +
3445 bp->mac_addr[3] +
3446 (bp->mac_addr[4] << 8) +
3447 (bp->mac_addr[5] << 16);
3448 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3449
3450 /* Program the MTU. Also include 4 bytes for CRC32. */
3451 val = bp->dev->mtu + ETH_HLEN + 4;
3452 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3453 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3454 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3455
3456 bp->last_status_idx = 0;
3457 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3458
3459 /* Set up how to generate a link change interrupt. */
3460 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3461
3462 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3463 (u64) bp->status_blk_mapping & 0xffffffff);
3464 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3465
3466 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3467 (u64) bp->stats_blk_mapping & 0xffffffff);
3468 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3469 (u64) bp->stats_blk_mapping >> 32);
3470
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003471 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003472 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3473
3474 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3475 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3476
3477 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3478 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3479
3480 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3481
3482 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3483
3484 REG_WR(bp, BNX2_HC_COM_TICKS,
3485 (bp->com_ticks_int << 16) | bp->com_ticks);
3486
3487 REG_WR(bp, BNX2_HC_CMD_TICKS,
3488 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3489
3490 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3491 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3492
3493 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3494 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3495 else {
3496 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3497 BNX2_HC_CONFIG_TX_TMR_MODE |
3498 BNX2_HC_CONFIG_COLLECT_STATS);
3499 }
3500
3501 /* Clear internal stats counters. */
3502 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3503
3504 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3505
Michael Chane29054f2006-01-23 16:06:06 -08003506 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3507 BNX2_PORT_FEATURE_ASF_ENABLED)
3508 bp->flags |= ASF_ENABLE_FLAG;
3509
Michael Chanb6016b72005-05-26 13:03:09 -07003510 /* Initialize the receive filter. */
3511 bnx2_set_rx_mode(bp->dev);
3512
Michael Chanb090ae22006-01-23 16:07:10 -08003513 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3514 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003515
3516 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3517 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3518
3519 udelay(20);
3520
Michael Chanbf5295b2006-03-23 01:11:56 -08003521 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3522
Michael Chanb090ae22006-01-23 16:07:10 -08003523 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003524}
3525
Michael Chan59b47d82006-11-19 14:10:45 -08003526static void
3527bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3528{
3529 u32 val, offset0, offset1, offset2, offset3;
3530
3531 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3532 offset0 = BNX2_L2CTX_TYPE_XI;
3533 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3534 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3535 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3536 } else {
3537 offset0 = BNX2_L2CTX_TYPE;
3538 offset1 = BNX2_L2CTX_CMD_TYPE;
3539 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3540 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3541 }
3542 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3544
3545 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3546 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3547
3548 val = (u64) bp->tx_desc_mapping >> 32;
3549 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3550
3551 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3552 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3553}
Michael Chanb6016b72005-05-26 13:03:09 -07003554
3555static void
3556bnx2_init_tx_ring(struct bnx2 *bp)
3557{
3558 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003559 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003560
Michael Chan2f8af122006-08-15 01:39:10 -07003561 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3562
Michael Chanb6016b72005-05-26 13:03:09 -07003563 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003564
Michael Chanb6016b72005-05-26 13:03:09 -07003565 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3566 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3567
3568 bp->tx_prod = 0;
3569 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003570 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003571 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003572
Michael Chan59b47d82006-11-19 14:10:45 -08003573 cid = TX_CID;
3574 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3575 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003576
Michael Chan59b47d82006-11-19 14:10:45 -08003577 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003578}
3579
3580static void
3581bnx2_init_rx_ring(struct bnx2 *bp)
3582{
3583 struct rx_bd *rxbd;
3584 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003585 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003586 u32 val;
3587
3588 /* 8 for CRC and VLAN */
3589 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003590 /* hw alignment */
3591 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003592
3593 ring_prod = prod = bp->rx_prod = 0;
3594 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003595 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003596 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003597
Michael Chan13daffa2006-03-20 17:49:20 -08003598 for (i = 0; i < bp->rx_max_ring; i++) {
3599 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003600
Michael Chan13daffa2006-03-20 17:49:20 -08003601 rxbd = &bp->rx_desc_ring[i][0];
3602 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3603 rxbd->rx_bd_len = bp->rx_buf_use_size;
3604 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3605 }
3606 if (i == (bp->rx_max_ring - 1))
3607 j = 0;
3608 else
3609 j = i + 1;
3610 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3611 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3612 0xffffffff;
3613 }
Michael Chanb6016b72005-05-26 13:03:09 -07003614
3615 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3616 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3617 val |= 0x02 << 8;
3618 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3619
Michael Chan13daffa2006-03-20 17:49:20 -08003620 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003621 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3622
Michael Chan13daffa2006-03-20 17:49:20 -08003623 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003624 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3625
Michael Chan236b6392006-03-20 17:49:02 -08003626 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003627 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3628 break;
3629 }
3630 prod = NEXT_RX_BD(prod);
3631 ring_prod = RX_RING_IDX(prod);
3632 }
3633 bp->rx_prod = prod;
3634
3635 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3636
3637 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3638}
3639
3640static void
Michael Chan13daffa2006-03-20 17:49:20 -08003641bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3642{
3643 u32 num_rings, max;
3644
3645 bp->rx_ring_size = size;
3646 num_rings = 1;
3647 while (size > MAX_RX_DESC_CNT) {
3648 size -= MAX_RX_DESC_CNT;
3649 num_rings++;
3650 }
3651 /* round to next power of 2 */
3652 max = MAX_RX_RINGS;
3653 while ((max & num_rings) == 0)
3654 max >>= 1;
3655
3656 if (num_rings != max)
3657 max <<= 1;
3658
3659 bp->rx_max_ring = max;
3660 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3661}
3662
3663static void
Michael Chanb6016b72005-05-26 13:03:09 -07003664bnx2_free_tx_skbs(struct bnx2 *bp)
3665{
3666 int i;
3667
3668 if (bp->tx_buf_ring == NULL)
3669 return;
3670
3671 for (i = 0; i < TX_DESC_CNT; ) {
3672 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3673 struct sk_buff *skb = tx_buf->skb;
3674 int j, last;
3675
3676 if (skb == NULL) {
3677 i++;
3678 continue;
3679 }
3680
3681 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3682 skb_headlen(skb), PCI_DMA_TODEVICE);
3683
3684 tx_buf->skb = NULL;
3685
3686 last = skb_shinfo(skb)->nr_frags;
3687 for (j = 0; j < last; j++) {
3688 tx_buf = &bp->tx_buf_ring[i + j + 1];
3689 pci_unmap_page(bp->pdev,
3690 pci_unmap_addr(tx_buf, mapping),
3691 skb_shinfo(skb)->frags[j].size,
3692 PCI_DMA_TODEVICE);
3693 }
Michael Chan745720e2006-06-29 12:37:41 -07003694 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003695 i += j + 1;
3696 }
3697
3698}
3699
3700static void
3701bnx2_free_rx_skbs(struct bnx2 *bp)
3702{
3703 int i;
3704
3705 if (bp->rx_buf_ring == NULL)
3706 return;
3707
Michael Chan13daffa2006-03-20 17:49:20 -08003708 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003709 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3710 struct sk_buff *skb = rx_buf->skb;
3711
Michael Chan05d0f1c2005-11-04 08:53:48 -08003712 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003713 continue;
3714
3715 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3716 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3717
3718 rx_buf->skb = NULL;
3719
Michael Chan745720e2006-06-29 12:37:41 -07003720 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003721 }
3722}
3723
3724static void
3725bnx2_free_skbs(struct bnx2 *bp)
3726{
3727 bnx2_free_tx_skbs(bp);
3728 bnx2_free_rx_skbs(bp);
3729}
3730
3731static int
3732bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3733{
3734 int rc;
3735
3736 rc = bnx2_reset_chip(bp, reset_code);
3737 bnx2_free_skbs(bp);
3738 if (rc)
3739 return rc;
3740
Michael Chanfba9fe92006-06-12 22:21:25 -07003741 if ((rc = bnx2_init_chip(bp)) != 0)
3742 return rc;
3743
Michael Chanb6016b72005-05-26 13:03:09 -07003744 bnx2_init_tx_ring(bp);
3745 bnx2_init_rx_ring(bp);
3746 return 0;
3747}
3748
3749static int
3750bnx2_init_nic(struct bnx2 *bp)
3751{
3752 int rc;
3753
3754 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3755 return rc;
3756
Michael Chan80be4432006-11-19 14:07:28 -08003757 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003758 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003759 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003760 bnx2_set_link(bp);
3761 return 0;
3762}
3763
3764static int
3765bnx2_test_registers(struct bnx2 *bp)
3766{
3767 int ret;
3768 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003769 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003770 u16 offset;
3771 u16 flags;
3772 u32 rw_mask;
3773 u32 ro_mask;
3774 } reg_tbl[] = {
3775 { 0x006c, 0, 0x00000000, 0x0000003f },
3776 { 0x0090, 0, 0xffffffff, 0x00000000 },
3777 { 0x0094, 0, 0x00000000, 0x00000000 },
3778
3779 { 0x0404, 0, 0x00003f00, 0x00000000 },
3780 { 0x0418, 0, 0x00000000, 0xffffffff },
3781 { 0x041c, 0, 0x00000000, 0xffffffff },
3782 { 0x0420, 0, 0x00000000, 0x80ffffff },
3783 { 0x0424, 0, 0x00000000, 0x00000000 },
3784 { 0x0428, 0, 0x00000000, 0x00000001 },
3785 { 0x0450, 0, 0x00000000, 0x0000ffff },
3786 { 0x0454, 0, 0x00000000, 0xffffffff },
3787 { 0x0458, 0, 0x00000000, 0xffffffff },
3788
3789 { 0x0808, 0, 0x00000000, 0xffffffff },
3790 { 0x0854, 0, 0x00000000, 0xffffffff },
3791 { 0x0868, 0, 0x00000000, 0x77777777 },
3792 { 0x086c, 0, 0x00000000, 0x77777777 },
3793 { 0x0870, 0, 0x00000000, 0x77777777 },
3794 { 0x0874, 0, 0x00000000, 0x77777777 },
3795
3796 { 0x0c00, 0, 0x00000000, 0x00000001 },
3797 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3798 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003799
3800 { 0x1000, 0, 0x00000000, 0x00000001 },
3801 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003802
3803 { 0x1408, 0, 0x01c00800, 0x00000000 },
3804 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3805 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003806 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003807 { 0x14b0, 0, 0x00000002, 0x00000001 },
3808 { 0x14b8, 0, 0x00000000, 0x00000000 },
3809 { 0x14c0, 0, 0x00000000, 0x00000009 },
3810 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3811 { 0x14cc, 0, 0x00000000, 0x00000001 },
3812 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003813
3814 { 0x1800, 0, 0x00000000, 0x00000001 },
3815 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003816
3817 { 0x2800, 0, 0x00000000, 0x00000001 },
3818 { 0x2804, 0, 0x00000000, 0x00003f01 },
3819 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3820 { 0x2810, 0, 0xffff0000, 0x00000000 },
3821 { 0x2814, 0, 0xffff0000, 0x00000000 },
3822 { 0x2818, 0, 0xffff0000, 0x00000000 },
3823 { 0x281c, 0, 0xffff0000, 0x00000000 },
3824 { 0x2834, 0, 0xffffffff, 0x00000000 },
3825 { 0x2840, 0, 0x00000000, 0xffffffff },
3826 { 0x2844, 0, 0x00000000, 0xffffffff },
3827 { 0x2848, 0, 0xffffffff, 0x00000000 },
3828 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3829
3830 { 0x2c00, 0, 0x00000000, 0x00000011 },
3831 { 0x2c04, 0, 0x00000000, 0x00030007 },
3832
Michael Chanb6016b72005-05-26 13:03:09 -07003833 { 0x3c00, 0, 0x00000000, 0x00000001 },
3834 { 0x3c04, 0, 0x00000000, 0x00070000 },
3835 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3836 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3837 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3838 { 0x3c14, 0, 0x00000000, 0xffffffff },
3839 { 0x3c18, 0, 0x00000000, 0xffffffff },
3840 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3841 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003842
3843 { 0x5004, 0, 0x00000000, 0x0000007f },
3844 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3845 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3846
Michael Chanb6016b72005-05-26 13:03:09 -07003847 { 0x5c00, 0, 0x00000000, 0x00000001 },
3848 { 0x5c04, 0, 0x00000000, 0x0003000f },
3849 { 0x5c08, 0, 0x00000003, 0x00000000 },
3850 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3851 { 0x5c10, 0, 0x00000000, 0xffffffff },
3852 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3853 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3854 { 0x5c88, 0, 0x00000000, 0x00077373 },
3855 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3856
3857 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3858 { 0x680c, 0, 0xffffffff, 0x00000000 },
3859 { 0x6810, 0, 0xffffffff, 0x00000000 },
3860 { 0x6814, 0, 0xffffffff, 0x00000000 },
3861 { 0x6818, 0, 0xffffffff, 0x00000000 },
3862 { 0x681c, 0, 0xffffffff, 0x00000000 },
3863 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3865 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3866 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3869 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3870 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3871 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3872 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3873 { 0x684c, 0, 0xffffffff, 0x00000000 },
3874 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3877 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3878 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3879 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3880
3881 { 0xffff, 0, 0x00000000, 0x00000000 },
3882 };
3883
3884 ret = 0;
3885 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3886 u32 offset, rw_mask, ro_mask, save_val, val;
3887
3888 offset = (u32) reg_tbl[i].offset;
3889 rw_mask = reg_tbl[i].rw_mask;
3890 ro_mask = reg_tbl[i].ro_mask;
3891
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003892 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003893
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003894 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003895
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003896 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003897 if ((val & rw_mask) != 0) {
3898 goto reg_test_err;
3899 }
3900
3901 if ((val & ro_mask) != (save_val & ro_mask)) {
3902 goto reg_test_err;
3903 }
3904
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003905 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003906
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003907 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003908 if ((val & rw_mask) != rw_mask) {
3909 goto reg_test_err;
3910 }
3911
3912 if ((val & ro_mask) != (save_val & ro_mask)) {
3913 goto reg_test_err;
3914 }
3915
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003916 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003917 continue;
3918
3919reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003920 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003921 ret = -ENODEV;
3922 break;
3923 }
3924 return ret;
3925}
3926
3927static int
3928bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3929{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003930 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003931 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3932 int i;
3933
3934 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3935 u32 offset;
3936
3937 for (offset = 0; offset < size; offset += 4) {
3938
3939 REG_WR_IND(bp, start + offset, test_pattern[i]);
3940
3941 if (REG_RD_IND(bp, start + offset) !=
3942 test_pattern[i]) {
3943 return -ENODEV;
3944 }
3945 }
3946 }
3947 return 0;
3948}
3949
3950static int
3951bnx2_test_memory(struct bnx2 *bp)
3952{
3953 int ret = 0;
3954 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003955 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003956 u32 offset;
3957 u32 len;
3958 } mem_tbl[] = {
3959 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003960 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003961 { 0xe0000, 0x4000 },
3962 { 0x120000, 0x4000 },
3963 { 0x1a0000, 0x4000 },
3964 { 0x160000, 0x4000 },
3965 { 0xffffffff, 0 },
3966 };
3967
3968 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3969 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3970 mem_tbl[i].len)) != 0) {
3971 return ret;
3972 }
3973 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003974
Michael Chanb6016b72005-05-26 13:03:09 -07003975 return ret;
3976}
3977
Michael Chanbc5a0692006-01-23 16:13:22 -08003978#define BNX2_MAC_LOOPBACK 0
3979#define BNX2_PHY_LOOPBACK 1
3980
Michael Chanb6016b72005-05-26 13:03:09 -07003981static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003982bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003983{
3984 unsigned int pkt_size, num_pkts, i;
3985 struct sk_buff *skb, *rx_skb;
3986 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003987 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003988 dma_addr_t map;
3989 struct tx_bd *txbd;
3990 struct sw_bd *rx_buf;
3991 struct l2_fhdr *rx_hdr;
3992 int ret = -ENODEV;
3993
Michael Chanbc5a0692006-01-23 16:13:22 -08003994 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3995 bp->loopback = MAC_LOOPBACK;
3996 bnx2_set_mac_loopback(bp);
3997 }
3998 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003999 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004000 bnx2_set_phy_loopback(bp);
4001 }
4002 else
4003 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004004
4005 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004006 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004007 if (!skb)
4008 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004009 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004010 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004011 memset(packet + 6, 0x0, 8);
4012 for (i = 14; i < pkt_size; i++)
4013 packet[i] = (unsigned char) (i & 0xff);
4014
4015 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4016 PCI_DMA_TODEVICE);
4017
Michael Chanbf5295b2006-03-23 01:11:56 -08004018 REG_WR(bp, BNX2_HC_COMMAND,
4019 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4020
Michael Chanb6016b72005-05-26 13:03:09 -07004021 REG_RD(bp, BNX2_HC_COMMAND);
4022
4023 udelay(5);
4024 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4025
Michael Chanb6016b72005-05-26 13:03:09 -07004026 num_pkts = 0;
4027
Michael Chanbc5a0692006-01-23 16:13:22 -08004028 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004029
4030 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4031 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4032 txbd->tx_bd_mss_nbytes = pkt_size;
4033 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4034
4035 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004036 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4037 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004038
Michael Chan234754d2006-11-19 14:11:41 -08004039 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4040 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004041
4042 udelay(100);
4043
Michael Chanbf5295b2006-03-23 01:11:56 -08004044 REG_WR(bp, BNX2_HC_COMMAND,
4045 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4046
Michael Chanb6016b72005-05-26 13:03:09 -07004047 REG_RD(bp, BNX2_HC_COMMAND);
4048
4049 udelay(5);
4050
4051 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004052 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004053
Michael Chanbc5a0692006-01-23 16:13:22 -08004054 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004055 goto loopback_test_done;
4056 }
4057
4058 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4059 if (rx_idx != rx_start_idx + num_pkts) {
4060 goto loopback_test_done;
4061 }
4062
4063 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4064 rx_skb = rx_buf->skb;
4065
4066 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4067 skb_reserve(rx_skb, bp->rx_offset);
4068
4069 pci_dma_sync_single_for_cpu(bp->pdev,
4070 pci_unmap_addr(rx_buf, mapping),
4071 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4072
Michael Chanade2bfe2006-01-23 16:09:51 -08004073 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004074 (L2_FHDR_ERRORS_BAD_CRC |
4075 L2_FHDR_ERRORS_PHY_DECODE |
4076 L2_FHDR_ERRORS_ALIGNMENT |
4077 L2_FHDR_ERRORS_TOO_SHORT |
4078 L2_FHDR_ERRORS_GIANT_FRAME)) {
4079
4080 goto loopback_test_done;
4081 }
4082
4083 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4084 goto loopback_test_done;
4085 }
4086
4087 for (i = 14; i < pkt_size; i++) {
4088 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4089 goto loopback_test_done;
4090 }
4091 }
4092
4093 ret = 0;
4094
4095loopback_test_done:
4096 bp->loopback = 0;
4097 return ret;
4098}
4099
Michael Chanbc5a0692006-01-23 16:13:22 -08004100#define BNX2_MAC_LOOPBACK_FAILED 1
4101#define BNX2_PHY_LOOPBACK_FAILED 2
4102#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4103 BNX2_PHY_LOOPBACK_FAILED)
4104
4105static int
4106bnx2_test_loopback(struct bnx2 *bp)
4107{
4108 int rc = 0;
4109
4110 if (!netif_running(bp->dev))
4111 return BNX2_LOOPBACK_FAILED;
4112
4113 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4114 spin_lock_bh(&bp->phy_lock);
4115 bnx2_init_phy(bp);
4116 spin_unlock_bh(&bp->phy_lock);
4117 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4118 rc |= BNX2_MAC_LOOPBACK_FAILED;
4119 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4120 rc |= BNX2_PHY_LOOPBACK_FAILED;
4121 return rc;
4122}
4123
Michael Chanb6016b72005-05-26 13:03:09 -07004124#define NVRAM_SIZE 0x200
4125#define CRC32_RESIDUAL 0xdebb20e3
4126
4127static int
4128bnx2_test_nvram(struct bnx2 *bp)
4129{
4130 u32 buf[NVRAM_SIZE / 4];
4131 u8 *data = (u8 *) buf;
4132 int rc = 0;
4133 u32 magic, csum;
4134
4135 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4136 goto test_nvram_done;
4137
4138 magic = be32_to_cpu(buf[0]);
4139 if (magic != 0x669955aa) {
4140 rc = -ENODEV;
4141 goto test_nvram_done;
4142 }
4143
4144 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4145 goto test_nvram_done;
4146
4147 csum = ether_crc_le(0x100, data);
4148 if (csum != CRC32_RESIDUAL) {
4149 rc = -ENODEV;
4150 goto test_nvram_done;
4151 }
4152
4153 csum = ether_crc_le(0x100, data + 0x100);
4154 if (csum != CRC32_RESIDUAL) {
4155 rc = -ENODEV;
4156 }
4157
4158test_nvram_done:
4159 return rc;
4160}
4161
4162static int
4163bnx2_test_link(struct bnx2 *bp)
4164{
4165 u32 bmsr;
4166
Michael Chanc770a652005-08-25 15:38:39 -07004167 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004168 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4169 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004170 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004171
Michael Chanb6016b72005-05-26 13:03:09 -07004172 if (bmsr & BMSR_LSTATUS) {
4173 return 0;
4174 }
4175 return -ENODEV;
4176}
4177
4178static int
4179bnx2_test_intr(struct bnx2 *bp)
4180{
4181 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004182 u16 status_idx;
4183
4184 if (!netif_running(bp->dev))
4185 return -ENODEV;
4186
4187 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4188
4189 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004190 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004191 REG_RD(bp, BNX2_HC_COMMAND);
4192
4193 for (i = 0; i < 10; i++) {
4194 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4195 status_idx) {
4196
4197 break;
4198 }
4199
4200 msleep_interruptible(10);
4201 }
4202 if (i < 10)
4203 return 0;
4204
4205 return -ENODEV;
4206}
4207
4208static void
Michael Chan48b01e22006-11-19 14:08:00 -08004209bnx2_5706_serdes_timer(struct bnx2 *bp)
4210{
4211 spin_lock(&bp->phy_lock);
4212 if (bp->serdes_an_pending)
4213 bp->serdes_an_pending--;
4214 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4215 u32 bmcr;
4216
4217 bp->current_interval = bp->timer_interval;
4218
4219 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4220
4221 if (bmcr & BMCR_ANENABLE) {
4222 u32 phy1, phy2;
4223
4224 bnx2_write_phy(bp, 0x1c, 0x7c00);
4225 bnx2_read_phy(bp, 0x1c, &phy1);
4226
4227 bnx2_write_phy(bp, 0x17, 0x0f01);
4228 bnx2_read_phy(bp, 0x15, &phy2);
4229 bnx2_write_phy(bp, 0x17, 0x0f01);
4230 bnx2_read_phy(bp, 0x15, &phy2);
4231
4232 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4233 !(phy2 & 0x20)) { /* no CONFIG */
4234
4235 bmcr &= ~BMCR_ANENABLE;
4236 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4237 bnx2_write_phy(bp, MII_BMCR, bmcr);
4238 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4239 }
4240 }
4241 }
4242 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4243 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4244 u32 phy2;
4245
4246 bnx2_write_phy(bp, 0x17, 0x0f01);
4247 bnx2_read_phy(bp, 0x15, &phy2);
4248 if (phy2 & 0x20) {
4249 u32 bmcr;
4250
4251 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4252 bmcr |= BMCR_ANENABLE;
4253 bnx2_write_phy(bp, MII_BMCR, bmcr);
4254
4255 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4256 }
4257 } else
4258 bp->current_interval = bp->timer_interval;
4259
4260 spin_unlock(&bp->phy_lock);
4261}
4262
4263static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004264bnx2_5708_serdes_timer(struct bnx2 *bp)
4265{
4266 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4267 bp->serdes_an_pending = 0;
4268 return;
4269 }
4270
4271 spin_lock(&bp->phy_lock);
4272 if (bp->serdes_an_pending)
4273 bp->serdes_an_pending--;
4274 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4275 u32 bmcr;
4276
4277 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4278
4279 if (bmcr & BMCR_ANENABLE) {
4280 bmcr &= ~BMCR_ANENABLE;
4281 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4282 bnx2_write_phy(bp, MII_BMCR, bmcr);
4283 bp->current_interval = SERDES_FORCED_TIMEOUT;
4284 } else {
4285 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4286 bmcr |= BMCR_ANENABLE;
4287 bnx2_write_phy(bp, MII_BMCR, bmcr);
4288 bp->serdes_an_pending = 2;
4289 bp->current_interval = bp->timer_interval;
4290 }
4291
4292 } else
4293 bp->current_interval = bp->timer_interval;
4294
4295 spin_unlock(&bp->phy_lock);
4296}
4297
4298static void
Michael Chanb6016b72005-05-26 13:03:09 -07004299bnx2_timer(unsigned long data)
4300{
4301 struct bnx2 *bp = (struct bnx2 *) data;
4302 u32 msg;
4303
Michael Chancd339a02005-08-25 15:35:24 -07004304 if (!netif_running(bp->dev))
4305 return;
4306
Michael Chanb6016b72005-05-26 13:03:09 -07004307 if (atomic_read(&bp->intr_sem) != 0)
4308 goto bnx2_restart_timer;
4309
4310 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004311 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004312
Michael Chancea94db2006-06-12 22:16:13 -07004313 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4314
Michael Chanf8dd0642006-11-19 14:08:29 -08004315 if (bp->phy_flags & PHY_SERDES_FLAG) {
4316 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4317 bnx2_5706_serdes_timer(bp);
4318 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4319 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004320 }
4321
4322bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004323 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004324}
4325
4326/* Called with rtnl_lock */
4327static int
4328bnx2_open(struct net_device *dev)
4329{
Michael Chan972ec0d2006-01-23 16:12:43 -08004330 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004331 int rc;
4332
Pavel Machek829ca9a2005-09-03 15:56:56 -07004333 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004334 bnx2_disable_int(bp);
4335
4336 rc = bnx2_alloc_mem(bp);
4337 if (rc)
4338 return rc;
4339
4340 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4341 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4342 !disable_msi) {
4343
4344 if (pci_enable_msi(bp->pdev) == 0) {
4345 bp->flags |= USING_MSI_FLAG;
4346 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4347 dev);
4348 }
4349 else {
4350 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004351 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004352 }
4353 }
4354 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004355 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004356 dev->name, dev);
4357 }
4358 if (rc) {
4359 bnx2_free_mem(bp);
4360 return rc;
4361 }
4362
4363 rc = bnx2_init_nic(bp);
4364
4365 if (rc) {
4366 free_irq(bp->pdev->irq, dev);
4367 if (bp->flags & USING_MSI_FLAG) {
4368 pci_disable_msi(bp->pdev);
4369 bp->flags &= ~USING_MSI_FLAG;
4370 }
4371 bnx2_free_skbs(bp);
4372 bnx2_free_mem(bp);
4373 return rc;
4374 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004375
Michael Chancd339a02005-08-25 15:35:24 -07004376 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004377
4378 atomic_set(&bp->intr_sem, 0);
4379
4380 bnx2_enable_int(bp);
4381
4382 if (bp->flags & USING_MSI_FLAG) {
4383 /* Test MSI to make sure it is working
4384 * If MSI test fails, go back to INTx mode
4385 */
4386 if (bnx2_test_intr(bp) != 0) {
4387 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4388 " using MSI, switching to INTx mode. Please"
4389 " report this failure to the PCI maintainer"
4390 " and include system chipset information.\n",
4391 bp->dev->name);
4392
4393 bnx2_disable_int(bp);
4394 free_irq(bp->pdev->irq, dev);
4395 pci_disable_msi(bp->pdev);
4396 bp->flags &= ~USING_MSI_FLAG;
4397
4398 rc = bnx2_init_nic(bp);
4399
4400 if (!rc) {
4401 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004402 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004403 }
4404 if (rc) {
4405 bnx2_free_skbs(bp);
4406 bnx2_free_mem(bp);
4407 del_timer_sync(&bp->timer);
4408 return rc;
4409 }
4410 bnx2_enable_int(bp);
4411 }
4412 }
4413 if (bp->flags & USING_MSI_FLAG) {
4414 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4415 }
4416
4417 netif_start_queue(dev);
4418
4419 return 0;
4420}
4421
4422static void
David Howellsc4028952006-11-22 14:57:56 +00004423bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004424{
David Howellsc4028952006-11-22 14:57:56 +00004425 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004426
Michael Chanafdc08b2005-08-25 15:34:29 -07004427 if (!netif_running(bp->dev))
4428 return;
4429
4430 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004431 bnx2_netif_stop(bp);
4432
4433 bnx2_init_nic(bp);
4434
4435 atomic_set(&bp->intr_sem, 1);
4436 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004437 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004438}
4439
4440static void
4441bnx2_tx_timeout(struct net_device *dev)
4442{
Michael Chan972ec0d2006-01-23 16:12:43 -08004443 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004444
4445 /* This allows the netif to be shutdown gracefully before resetting */
4446 schedule_work(&bp->reset_task);
4447}
4448
4449#ifdef BCM_VLAN
4450/* Called with rtnl_lock */
4451static void
4452bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4453{
Michael Chan972ec0d2006-01-23 16:12:43 -08004454 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004455
4456 bnx2_netif_stop(bp);
4457
4458 bp->vlgrp = vlgrp;
4459 bnx2_set_rx_mode(dev);
4460
4461 bnx2_netif_start(bp);
4462}
4463
4464/* Called with rtnl_lock */
4465static void
4466bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4467{
Michael Chan972ec0d2006-01-23 16:12:43 -08004468 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004469
4470 bnx2_netif_stop(bp);
4471
4472 if (bp->vlgrp)
4473 bp->vlgrp->vlan_devices[vid] = NULL;
4474 bnx2_set_rx_mode(dev);
4475
4476 bnx2_netif_start(bp);
4477}
4478#endif
4479
Herbert Xu932ff272006-06-09 12:20:56 -07004480/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004481 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4482 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004483 */
4484static int
4485bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4486{
Michael Chan972ec0d2006-01-23 16:12:43 -08004487 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004488 dma_addr_t mapping;
4489 struct tx_bd *txbd;
4490 struct sw_bd *tx_buf;
4491 u32 len, vlan_tag_flags, last_frag, mss;
4492 u16 prod, ring_prod;
4493 int i;
4494
Michael Chane89bbf12005-08-25 15:36:58 -07004495 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004496 netif_stop_queue(dev);
4497 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4498 dev->name);
4499
4500 return NETDEV_TX_BUSY;
4501 }
4502 len = skb_headlen(skb);
4503 prod = bp->tx_prod;
4504 ring_prod = TX_RING_IDX(prod);
4505
4506 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004507 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004508 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4509 }
4510
4511 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4512 vlan_tag_flags |=
4513 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4514 }
Herbert Xu79671682006-06-22 02:40:14 -07004515 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004516 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4517 u32 tcp_opt_len, ip_tcp_len;
4518
4519 if (skb_header_cloned(skb) &&
4520 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4521 dev_kfree_skb(skb);
4522 return NETDEV_TX_OK;
4523 }
4524
4525 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4526 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4527
4528 tcp_opt_len = 0;
4529 if (skb->h.th->doff > 5) {
4530 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4531 }
4532 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4533
4534 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004535 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004536 skb->h.th->check =
4537 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4538 skb->nh.iph->daddr,
4539 0, IPPROTO_TCP, 0);
4540
4541 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4542 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4543 (tcp_opt_len >> 2)) << 8;
4544 }
4545 }
4546 else
Michael Chanb6016b72005-05-26 13:03:09 -07004547 {
4548 mss = 0;
4549 }
4550
4551 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004552
Michael Chanb6016b72005-05-26 13:03:09 -07004553 tx_buf = &bp->tx_buf_ring[ring_prod];
4554 tx_buf->skb = skb;
4555 pci_unmap_addr_set(tx_buf, mapping, mapping);
4556
4557 txbd = &bp->tx_desc_ring[ring_prod];
4558
4559 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4560 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4561 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4562 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4563
4564 last_frag = skb_shinfo(skb)->nr_frags;
4565
4566 for (i = 0; i < last_frag; i++) {
4567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4568
4569 prod = NEXT_TX_BD(prod);
4570 ring_prod = TX_RING_IDX(prod);
4571 txbd = &bp->tx_desc_ring[ring_prod];
4572
4573 len = frag->size;
4574 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4575 len, PCI_DMA_TODEVICE);
4576 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4577 mapping, mapping);
4578
4579 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4580 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4581 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4582 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4583
4584 }
4585 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4586
4587 prod = NEXT_TX_BD(prod);
4588 bp->tx_prod_bseq += skb->len;
4589
Michael Chan234754d2006-11-19 14:11:41 -08004590 REG_WR16(bp, bp->tx_bidx_addr, prod);
4591 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004592
4593 mmiowb();
4594
4595 bp->tx_prod = prod;
4596 dev->trans_start = jiffies;
4597
Michael Chane89bbf12005-08-25 15:36:58 -07004598 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004599 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004600 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004601 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004602 }
4603
4604 return NETDEV_TX_OK;
4605}
4606
4607/* Called with rtnl_lock */
4608static int
4609bnx2_close(struct net_device *dev)
4610{
Michael Chan972ec0d2006-01-23 16:12:43 -08004611 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004612 u32 reset_code;
4613
Michael Chanafdc08b2005-08-25 15:34:29 -07004614 /* Calling flush_scheduled_work() may deadlock because
4615 * linkwatch_event() may be on the workqueue and it will try to get
4616 * the rtnl_lock which we are holding.
4617 */
4618 while (bp->in_reset_task)
4619 msleep(1);
4620
Michael Chanb6016b72005-05-26 13:03:09 -07004621 bnx2_netif_stop(bp);
4622 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004623 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004624 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004625 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004626 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4627 else
4628 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4629 bnx2_reset_chip(bp, reset_code);
4630 free_irq(bp->pdev->irq, dev);
4631 if (bp->flags & USING_MSI_FLAG) {
4632 pci_disable_msi(bp->pdev);
4633 bp->flags &= ~USING_MSI_FLAG;
4634 }
4635 bnx2_free_skbs(bp);
4636 bnx2_free_mem(bp);
4637 bp->link_up = 0;
4638 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004639 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004640 return 0;
4641}
4642
4643#define GET_NET_STATS64(ctr) \
4644 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4645 (unsigned long) (ctr##_lo)
4646
4647#define GET_NET_STATS32(ctr) \
4648 (ctr##_lo)
4649
4650#if (BITS_PER_LONG == 64)
4651#define GET_NET_STATS GET_NET_STATS64
4652#else
4653#define GET_NET_STATS GET_NET_STATS32
4654#endif
4655
4656static struct net_device_stats *
4657bnx2_get_stats(struct net_device *dev)
4658{
Michael Chan972ec0d2006-01-23 16:12:43 -08004659 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004660 struct statistics_block *stats_blk = bp->stats_blk;
4661 struct net_device_stats *net_stats = &bp->net_stats;
4662
4663 if (bp->stats_blk == NULL) {
4664 return net_stats;
4665 }
4666 net_stats->rx_packets =
4667 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4668 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4669 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4670
4671 net_stats->tx_packets =
4672 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4673 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4674 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4675
4676 net_stats->rx_bytes =
4677 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4678
4679 net_stats->tx_bytes =
4680 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4681
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004682 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004683 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4684
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004685 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004686 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4687
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004688 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004689 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4690 stats_blk->stat_EtherStatsOverrsizePkts);
4691
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004692 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004693 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4694
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004695 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004696 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4697
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004698 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004699 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4700
4701 net_stats->rx_errors = net_stats->rx_length_errors +
4702 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4703 net_stats->rx_crc_errors;
4704
4705 net_stats->tx_aborted_errors =
4706 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4707 stats_blk->stat_Dot3StatsLateCollisions);
4708
Michael Chan5b0c76a2005-11-04 08:45:49 -08004709 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4710 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004711 net_stats->tx_carrier_errors = 0;
4712 else {
4713 net_stats->tx_carrier_errors =
4714 (unsigned long)
4715 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4716 }
4717
4718 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004719 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004720 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4721 +
4722 net_stats->tx_aborted_errors +
4723 net_stats->tx_carrier_errors;
4724
Michael Chancea94db2006-06-12 22:16:13 -07004725 net_stats->rx_missed_errors =
4726 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4727 stats_blk->stat_FwRxDrop);
4728
Michael Chanb6016b72005-05-26 13:03:09 -07004729 return net_stats;
4730}
4731
4732/* All ethtool functions called with rtnl_lock */
4733
4734static int
4735bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4736{
Michael Chan972ec0d2006-01-23 16:12:43 -08004737 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004738
4739 cmd->supported = SUPPORTED_Autoneg;
4740 if (bp->phy_flags & PHY_SERDES_FLAG) {
4741 cmd->supported |= SUPPORTED_1000baseT_Full |
4742 SUPPORTED_FIBRE;
4743
4744 cmd->port = PORT_FIBRE;
4745 }
4746 else {
4747 cmd->supported |= SUPPORTED_10baseT_Half |
4748 SUPPORTED_10baseT_Full |
4749 SUPPORTED_100baseT_Half |
4750 SUPPORTED_100baseT_Full |
4751 SUPPORTED_1000baseT_Full |
4752 SUPPORTED_TP;
4753
4754 cmd->port = PORT_TP;
4755 }
4756
4757 cmd->advertising = bp->advertising;
4758
4759 if (bp->autoneg & AUTONEG_SPEED) {
4760 cmd->autoneg = AUTONEG_ENABLE;
4761 }
4762 else {
4763 cmd->autoneg = AUTONEG_DISABLE;
4764 }
4765
4766 if (netif_carrier_ok(dev)) {
4767 cmd->speed = bp->line_speed;
4768 cmd->duplex = bp->duplex;
4769 }
4770 else {
4771 cmd->speed = -1;
4772 cmd->duplex = -1;
4773 }
4774
4775 cmd->transceiver = XCVR_INTERNAL;
4776 cmd->phy_address = bp->phy_addr;
4777
4778 return 0;
4779}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004780
Michael Chanb6016b72005-05-26 13:03:09 -07004781static int
4782bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4783{
Michael Chan972ec0d2006-01-23 16:12:43 -08004784 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004785 u8 autoneg = bp->autoneg;
4786 u8 req_duplex = bp->req_duplex;
4787 u16 req_line_speed = bp->req_line_speed;
4788 u32 advertising = bp->advertising;
4789
4790 if (cmd->autoneg == AUTONEG_ENABLE) {
4791 autoneg |= AUTONEG_SPEED;
4792
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004793 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004794
4795 /* allow advertising 1 speed */
4796 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4797 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4798 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4799 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4800
4801 if (bp->phy_flags & PHY_SERDES_FLAG)
4802 return -EINVAL;
4803
4804 advertising = cmd->advertising;
4805
4806 }
4807 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4808 advertising = cmd->advertising;
4809 }
4810 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4811 return -EINVAL;
4812 }
4813 else {
4814 if (bp->phy_flags & PHY_SERDES_FLAG) {
4815 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4816 }
4817 else {
4818 advertising = ETHTOOL_ALL_COPPER_SPEED;
4819 }
4820 }
4821 advertising |= ADVERTISED_Autoneg;
4822 }
4823 else {
4824 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004825 if ((cmd->speed != SPEED_1000 &&
4826 cmd->speed != SPEED_2500) ||
4827 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004828 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004829
4830 if (cmd->speed == SPEED_2500 &&
4831 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4832 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004833 }
4834 else if (cmd->speed == SPEED_1000) {
4835 return -EINVAL;
4836 }
4837 autoneg &= ~AUTONEG_SPEED;
4838 req_line_speed = cmd->speed;
4839 req_duplex = cmd->duplex;
4840 advertising = 0;
4841 }
4842
4843 bp->autoneg = autoneg;
4844 bp->advertising = advertising;
4845 bp->req_line_speed = req_line_speed;
4846 bp->req_duplex = req_duplex;
4847
Michael Chanc770a652005-08-25 15:38:39 -07004848 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004849
4850 bnx2_setup_phy(bp);
4851
Michael Chanc770a652005-08-25 15:38:39 -07004852 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004853
4854 return 0;
4855}
4856
4857static void
4858bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4859{
Michael Chan972ec0d2006-01-23 16:12:43 -08004860 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004861
4862 strcpy(info->driver, DRV_MODULE_NAME);
4863 strcpy(info->version, DRV_MODULE_VERSION);
4864 strcpy(info->bus_info, pci_name(bp->pdev));
4865 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4866 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4867 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004868 info->fw_version[1] = info->fw_version[3] = '.';
4869 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004870}
4871
Michael Chan244ac4f2006-03-20 17:48:46 -08004872#define BNX2_REGDUMP_LEN (32 * 1024)
4873
4874static int
4875bnx2_get_regs_len(struct net_device *dev)
4876{
4877 return BNX2_REGDUMP_LEN;
4878}
4879
4880static void
4881bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4882{
4883 u32 *p = _p, i, offset;
4884 u8 *orig_p = _p;
4885 struct bnx2 *bp = netdev_priv(dev);
4886 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4887 0x0800, 0x0880, 0x0c00, 0x0c10,
4888 0x0c30, 0x0d08, 0x1000, 0x101c,
4889 0x1040, 0x1048, 0x1080, 0x10a4,
4890 0x1400, 0x1490, 0x1498, 0x14f0,
4891 0x1500, 0x155c, 0x1580, 0x15dc,
4892 0x1600, 0x1658, 0x1680, 0x16d8,
4893 0x1800, 0x1820, 0x1840, 0x1854,
4894 0x1880, 0x1894, 0x1900, 0x1984,
4895 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4896 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4897 0x2000, 0x2030, 0x23c0, 0x2400,
4898 0x2800, 0x2820, 0x2830, 0x2850,
4899 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4900 0x3c00, 0x3c94, 0x4000, 0x4010,
4901 0x4080, 0x4090, 0x43c0, 0x4458,
4902 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4903 0x4fc0, 0x5010, 0x53c0, 0x5444,
4904 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4905 0x5fc0, 0x6000, 0x6400, 0x6428,
4906 0x6800, 0x6848, 0x684c, 0x6860,
4907 0x6888, 0x6910, 0x8000 };
4908
4909 regs->version = 0;
4910
4911 memset(p, 0, BNX2_REGDUMP_LEN);
4912
4913 if (!netif_running(bp->dev))
4914 return;
4915
4916 i = 0;
4917 offset = reg_boundaries[0];
4918 p += offset;
4919 while (offset < BNX2_REGDUMP_LEN) {
4920 *p++ = REG_RD(bp, offset);
4921 offset += 4;
4922 if (offset == reg_boundaries[i + 1]) {
4923 offset = reg_boundaries[i + 2];
4924 p = (u32 *) (orig_p + offset);
4925 i += 2;
4926 }
4927 }
4928}
4929
Michael Chanb6016b72005-05-26 13:03:09 -07004930static void
4931bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4932{
Michael Chan972ec0d2006-01-23 16:12:43 -08004933 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004934
4935 if (bp->flags & NO_WOL_FLAG) {
4936 wol->supported = 0;
4937 wol->wolopts = 0;
4938 }
4939 else {
4940 wol->supported = WAKE_MAGIC;
4941 if (bp->wol)
4942 wol->wolopts = WAKE_MAGIC;
4943 else
4944 wol->wolopts = 0;
4945 }
4946 memset(&wol->sopass, 0, sizeof(wol->sopass));
4947}
4948
4949static int
4950bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4951{
Michael Chan972ec0d2006-01-23 16:12:43 -08004952 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004953
4954 if (wol->wolopts & ~WAKE_MAGIC)
4955 return -EINVAL;
4956
4957 if (wol->wolopts & WAKE_MAGIC) {
4958 if (bp->flags & NO_WOL_FLAG)
4959 return -EINVAL;
4960
4961 bp->wol = 1;
4962 }
4963 else {
4964 bp->wol = 0;
4965 }
4966 return 0;
4967}
4968
4969static int
4970bnx2_nway_reset(struct net_device *dev)
4971{
Michael Chan972ec0d2006-01-23 16:12:43 -08004972 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004973 u32 bmcr;
4974
4975 if (!(bp->autoneg & AUTONEG_SPEED)) {
4976 return -EINVAL;
4977 }
4978
Michael Chanc770a652005-08-25 15:38:39 -07004979 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004980
4981 /* Force a link down visible on the other side */
4982 if (bp->phy_flags & PHY_SERDES_FLAG) {
4983 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004984 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004985
4986 msleep(20);
4987
Michael Chanc770a652005-08-25 15:38:39 -07004988 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004989
4990 bp->current_interval = SERDES_AN_TIMEOUT;
4991 bp->serdes_an_pending = 1;
4992 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004993 }
4994
4995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4996 bmcr &= ~BMCR_LOOPBACK;
4997 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4998
Michael Chanc770a652005-08-25 15:38:39 -07004999 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005000
5001 return 0;
5002}
5003
5004static int
5005bnx2_get_eeprom_len(struct net_device *dev)
5006{
Michael Chan972ec0d2006-01-23 16:12:43 -08005007 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005008
Michael Chan1122db72006-01-23 16:11:42 -08005009 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005010 return 0;
5011
Michael Chan1122db72006-01-23 16:11:42 -08005012 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005013}
5014
5015static int
5016bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5017 u8 *eebuf)
5018{
Michael Chan972ec0d2006-01-23 16:12:43 -08005019 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005020 int rc;
5021
John W. Linville1064e942005-11-10 12:58:24 -08005022 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005023
5024 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5025
5026 return rc;
5027}
5028
5029static int
5030bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5031 u8 *eebuf)
5032{
Michael Chan972ec0d2006-01-23 16:12:43 -08005033 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005034 int rc;
5035
John W. Linville1064e942005-11-10 12:58:24 -08005036 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005037
5038 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5039
5040 return rc;
5041}
5042
5043static int
5044bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5045{
Michael Chan972ec0d2006-01-23 16:12:43 -08005046 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005047
5048 memset(coal, 0, sizeof(struct ethtool_coalesce));
5049
5050 coal->rx_coalesce_usecs = bp->rx_ticks;
5051 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5052 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5053 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5054
5055 coal->tx_coalesce_usecs = bp->tx_ticks;
5056 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5057 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5058 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5059
5060 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5061
5062 return 0;
5063}
5064
5065static int
5066bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5067{
Michael Chan972ec0d2006-01-23 16:12:43 -08005068 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005069
5070 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5071 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5072
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005073 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005074 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5075
5076 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5077 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5078
5079 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5080 if (bp->rx_quick_cons_trip_int > 0xff)
5081 bp->rx_quick_cons_trip_int = 0xff;
5082
5083 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5084 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5085
5086 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5087 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5088
5089 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5090 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5091
5092 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5093 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5094 0xff;
5095
5096 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5097 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5098 bp->stats_ticks &= 0xffff00;
5099
5100 if (netif_running(bp->dev)) {
5101 bnx2_netif_stop(bp);
5102 bnx2_init_nic(bp);
5103 bnx2_netif_start(bp);
5104 }
5105
5106 return 0;
5107}
5108
5109static void
5110bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5111{
Michael Chan972ec0d2006-01-23 16:12:43 -08005112 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005113
Michael Chan13daffa2006-03-20 17:49:20 -08005114 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005115 ering->rx_mini_max_pending = 0;
5116 ering->rx_jumbo_max_pending = 0;
5117
5118 ering->rx_pending = bp->rx_ring_size;
5119 ering->rx_mini_pending = 0;
5120 ering->rx_jumbo_pending = 0;
5121
5122 ering->tx_max_pending = MAX_TX_DESC_CNT;
5123 ering->tx_pending = bp->tx_ring_size;
5124}
5125
5126static int
5127bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5128{
Michael Chan972ec0d2006-01-23 16:12:43 -08005129 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005130
Michael Chan13daffa2006-03-20 17:49:20 -08005131 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005132 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5133 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5134
5135 return -EINVAL;
5136 }
Michael Chan13daffa2006-03-20 17:49:20 -08005137 if (netif_running(bp->dev)) {
5138 bnx2_netif_stop(bp);
5139 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5140 bnx2_free_skbs(bp);
5141 bnx2_free_mem(bp);
5142 }
5143
5144 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005145 bp->tx_ring_size = ering->tx_pending;
5146
5147 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005148 int rc;
5149
5150 rc = bnx2_alloc_mem(bp);
5151 if (rc)
5152 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005153 bnx2_init_nic(bp);
5154 bnx2_netif_start(bp);
5155 }
5156
5157 return 0;
5158}
5159
5160static void
5161bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5162{
Michael Chan972ec0d2006-01-23 16:12:43 -08005163 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005164
5165 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5166 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5167 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5168}
5169
5170static int
5171bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5172{
Michael Chan972ec0d2006-01-23 16:12:43 -08005173 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005174
5175 bp->req_flow_ctrl = 0;
5176 if (epause->rx_pause)
5177 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5178 if (epause->tx_pause)
5179 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5180
5181 if (epause->autoneg) {
5182 bp->autoneg |= AUTONEG_FLOW_CTRL;
5183 }
5184 else {
5185 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5186 }
5187
Michael Chanc770a652005-08-25 15:38:39 -07005188 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005189
5190 bnx2_setup_phy(bp);
5191
Michael Chanc770a652005-08-25 15:38:39 -07005192 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005193
5194 return 0;
5195}
5196
5197static u32
5198bnx2_get_rx_csum(struct net_device *dev)
5199{
Michael Chan972ec0d2006-01-23 16:12:43 -08005200 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005201
5202 return bp->rx_csum;
5203}
5204
5205static int
5206bnx2_set_rx_csum(struct net_device *dev, u32 data)
5207{
Michael Chan972ec0d2006-01-23 16:12:43 -08005208 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005209
5210 bp->rx_csum = data;
5211 return 0;
5212}
5213
Michael Chanb11d6212006-06-29 12:31:21 -07005214static int
5215bnx2_set_tso(struct net_device *dev, u32 data)
5216{
5217 if (data)
5218 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5219 else
5220 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5221 return 0;
5222}
5223
Michael Chancea94db2006-06-12 22:16:13 -07005224#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005225
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005226static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005227 char string[ETH_GSTRING_LEN];
5228} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5229 { "rx_bytes" },
5230 { "rx_error_bytes" },
5231 { "tx_bytes" },
5232 { "tx_error_bytes" },
5233 { "rx_ucast_packets" },
5234 { "rx_mcast_packets" },
5235 { "rx_bcast_packets" },
5236 { "tx_ucast_packets" },
5237 { "tx_mcast_packets" },
5238 { "tx_bcast_packets" },
5239 { "tx_mac_errors" },
5240 { "tx_carrier_errors" },
5241 { "rx_crc_errors" },
5242 { "rx_align_errors" },
5243 { "tx_single_collisions" },
5244 { "tx_multi_collisions" },
5245 { "tx_deferred" },
5246 { "tx_excess_collisions" },
5247 { "tx_late_collisions" },
5248 { "tx_total_collisions" },
5249 { "rx_fragments" },
5250 { "rx_jabbers" },
5251 { "rx_undersize_packets" },
5252 { "rx_oversize_packets" },
5253 { "rx_64_byte_packets" },
5254 { "rx_65_to_127_byte_packets" },
5255 { "rx_128_to_255_byte_packets" },
5256 { "rx_256_to_511_byte_packets" },
5257 { "rx_512_to_1023_byte_packets" },
5258 { "rx_1024_to_1522_byte_packets" },
5259 { "rx_1523_to_9022_byte_packets" },
5260 { "tx_64_byte_packets" },
5261 { "tx_65_to_127_byte_packets" },
5262 { "tx_128_to_255_byte_packets" },
5263 { "tx_256_to_511_byte_packets" },
5264 { "tx_512_to_1023_byte_packets" },
5265 { "tx_1024_to_1522_byte_packets" },
5266 { "tx_1523_to_9022_byte_packets" },
5267 { "rx_xon_frames" },
5268 { "rx_xoff_frames" },
5269 { "tx_xon_frames" },
5270 { "tx_xoff_frames" },
5271 { "rx_mac_ctrl_frames" },
5272 { "rx_filtered_packets" },
5273 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005274 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005275};
5276
5277#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5278
Arjan van de Venf71e1302006-03-03 21:33:57 -05005279static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005280 STATS_OFFSET32(stat_IfHCInOctets_hi),
5281 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5282 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5283 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5284 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5286 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5287 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5288 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5289 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5290 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005291 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5292 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5293 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5294 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5295 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5296 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5297 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5298 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5299 STATS_OFFSET32(stat_EtherStatsCollisions),
5300 STATS_OFFSET32(stat_EtherStatsFragments),
5301 STATS_OFFSET32(stat_EtherStatsJabbers),
5302 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5303 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5314 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5315 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5316 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5317 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5318 STATS_OFFSET32(stat_XonPauseFramesReceived),
5319 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5320 STATS_OFFSET32(stat_OutXonSent),
5321 STATS_OFFSET32(stat_OutXoffSent),
5322 STATS_OFFSET32(stat_MacControlFramesReceived),
5323 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5324 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005325 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005326};
5327
5328/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5329 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005330 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005331static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005332 8,0,8,8,8,8,8,8,8,8,
5333 4,0,4,4,4,4,4,4,4,4,
5334 4,4,4,4,4,4,4,4,4,4,
5335 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005336 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005337};
5338
Michael Chan5b0c76a2005-11-04 08:45:49 -08005339static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5340 8,0,8,8,8,8,8,8,8,8,
5341 4,4,4,4,4,4,4,4,4,4,
5342 4,4,4,4,4,4,4,4,4,4,
5343 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005344 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005345};
5346
Michael Chanb6016b72005-05-26 13:03:09 -07005347#define BNX2_NUM_TESTS 6
5348
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005349static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005350 char string[ETH_GSTRING_LEN];
5351} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5352 { "register_test (offline)" },
5353 { "memory_test (offline)" },
5354 { "loopback_test (offline)" },
5355 { "nvram_test (online)" },
5356 { "interrupt_test (online)" },
5357 { "link_test (online)" },
5358};
5359
5360static int
5361bnx2_self_test_count(struct net_device *dev)
5362{
5363 return BNX2_NUM_TESTS;
5364}
5365
5366static void
5367bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5368{
Michael Chan972ec0d2006-01-23 16:12:43 -08005369 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005370
5371 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5372 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005373 int i;
5374
Michael Chanb6016b72005-05-26 13:03:09 -07005375 bnx2_netif_stop(bp);
5376 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5377 bnx2_free_skbs(bp);
5378
5379 if (bnx2_test_registers(bp) != 0) {
5380 buf[0] = 1;
5381 etest->flags |= ETH_TEST_FL_FAILED;
5382 }
5383 if (bnx2_test_memory(bp) != 0) {
5384 buf[1] = 1;
5385 etest->flags |= ETH_TEST_FL_FAILED;
5386 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005387 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005388 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005389
5390 if (!netif_running(bp->dev)) {
5391 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5392 }
5393 else {
5394 bnx2_init_nic(bp);
5395 bnx2_netif_start(bp);
5396 }
5397
5398 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005399 for (i = 0; i < 7; i++) {
5400 if (bp->link_up)
5401 break;
5402 msleep_interruptible(1000);
5403 }
Michael Chanb6016b72005-05-26 13:03:09 -07005404 }
5405
5406 if (bnx2_test_nvram(bp) != 0) {
5407 buf[3] = 1;
5408 etest->flags |= ETH_TEST_FL_FAILED;
5409 }
5410 if (bnx2_test_intr(bp) != 0) {
5411 buf[4] = 1;
5412 etest->flags |= ETH_TEST_FL_FAILED;
5413 }
5414
5415 if (bnx2_test_link(bp) != 0) {
5416 buf[5] = 1;
5417 etest->flags |= ETH_TEST_FL_FAILED;
5418
5419 }
5420}
5421
5422static void
5423bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5424{
5425 switch (stringset) {
5426 case ETH_SS_STATS:
5427 memcpy(buf, bnx2_stats_str_arr,
5428 sizeof(bnx2_stats_str_arr));
5429 break;
5430 case ETH_SS_TEST:
5431 memcpy(buf, bnx2_tests_str_arr,
5432 sizeof(bnx2_tests_str_arr));
5433 break;
5434 }
5435}
5436
5437static int
5438bnx2_get_stats_count(struct net_device *dev)
5439{
5440 return BNX2_NUM_STATS;
5441}
5442
5443static void
5444bnx2_get_ethtool_stats(struct net_device *dev,
5445 struct ethtool_stats *stats, u64 *buf)
5446{
Michael Chan972ec0d2006-01-23 16:12:43 -08005447 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005448 int i;
5449 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005450 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005451
5452 if (hw_stats == NULL) {
5453 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5454 return;
5455 }
5456
Michael Chan5b0c76a2005-11-04 08:45:49 -08005457 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5458 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5459 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5460 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005461 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005462 else
5463 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005464
5465 for (i = 0; i < BNX2_NUM_STATS; i++) {
5466 if (stats_len_arr[i] == 0) {
5467 /* skip this counter */
5468 buf[i] = 0;
5469 continue;
5470 }
5471 if (stats_len_arr[i] == 4) {
5472 /* 4-byte counter */
5473 buf[i] = (u64)
5474 *(hw_stats + bnx2_stats_offset_arr[i]);
5475 continue;
5476 }
5477 /* 8-byte counter */
5478 buf[i] = (((u64) *(hw_stats +
5479 bnx2_stats_offset_arr[i])) << 32) +
5480 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5481 }
5482}
5483
5484static int
5485bnx2_phys_id(struct net_device *dev, u32 data)
5486{
Michael Chan972ec0d2006-01-23 16:12:43 -08005487 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005488 int i;
5489 u32 save;
5490
5491 if (data == 0)
5492 data = 2;
5493
5494 save = REG_RD(bp, BNX2_MISC_CFG);
5495 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5496
5497 for (i = 0; i < (data * 2); i++) {
5498 if ((i % 2) == 0) {
5499 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5500 }
5501 else {
5502 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5503 BNX2_EMAC_LED_1000MB_OVERRIDE |
5504 BNX2_EMAC_LED_100MB_OVERRIDE |
5505 BNX2_EMAC_LED_10MB_OVERRIDE |
5506 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5507 BNX2_EMAC_LED_TRAFFIC);
5508 }
5509 msleep_interruptible(500);
5510 if (signal_pending(current))
5511 break;
5512 }
5513 REG_WR(bp, BNX2_EMAC_LED, 0);
5514 REG_WR(bp, BNX2_MISC_CFG, save);
5515 return 0;
5516}
5517
Jeff Garzik7282d492006-09-13 14:30:00 -04005518static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005519 .get_settings = bnx2_get_settings,
5520 .set_settings = bnx2_set_settings,
5521 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005522 .get_regs_len = bnx2_get_regs_len,
5523 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005524 .get_wol = bnx2_get_wol,
5525 .set_wol = bnx2_set_wol,
5526 .nway_reset = bnx2_nway_reset,
5527 .get_link = ethtool_op_get_link,
5528 .get_eeprom_len = bnx2_get_eeprom_len,
5529 .get_eeprom = bnx2_get_eeprom,
5530 .set_eeprom = bnx2_set_eeprom,
5531 .get_coalesce = bnx2_get_coalesce,
5532 .set_coalesce = bnx2_set_coalesce,
5533 .get_ringparam = bnx2_get_ringparam,
5534 .set_ringparam = bnx2_set_ringparam,
5535 .get_pauseparam = bnx2_get_pauseparam,
5536 .set_pauseparam = bnx2_set_pauseparam,
5537 .get_rx_csum = bnx2_get_rx_csum,
5538 .set_rx_csum = bnx2_set_rx_csum,
5539 .get_tx_csum = ethtool_op_get_tx_csum,
5540 .set_tx_csum = ethtool_op_set_tx_csum,
5541 .get_sg = ethtool_op_get_sg,
5542 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005543 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005544 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005545 .self_test_count = bnx2_self_test_count,
5546 .self_test = bnx2_self_test,
5547 .get_strings = bnx2_get_strings,
5548 .phys_id = bnx2_phys_id,
5549 .get_stats_count = bnx2_get_stats_count,
5550 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005551 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005552};
5553
5554/* Called with rtnl_lock */
5555static int
5556bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5557{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005558 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005559 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005560 int err;
5561
5562 switch(cmd) {
5563 case SIOCGMIIPHY:
5564 data->phy_id = bp->phy_addr;
5565
5566 /* fallthru */
5567 case SIOCGMIIREG: {
5568 u32 mii_regval;
5569
Michael Chanc770a652005-08-25 15:38:39 -07005570 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005571 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005572 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005573
5574 data->val_out = mii_regval;
5575
5576 return err;
5577 }
5578
5579 case SIOCSMIIREG:
5580 if (!capable(CAP_NET_ADMIN))
5581 return -EPERM;
5582
Michael Chanc770a652005-08-25 15:38:39 -07005583 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005584 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005585 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005586
5587 return err;
5588
5589 default:
5590 /* do nothing */
5591 break;
5592 }
5593 return -EOPNOTSUPP;
5594}
5595
5596/* Called with rtnl_lock */
5597static int
5598bnx2_change_mac_addr(struct net_device *dev, void *p)
5599{
5600 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005601 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005602
Michael Chan73eef4c2005-08-25 15:39:15 -07005603 if (!is_valid_ether_addr(addr->sa_data))
5604 return -EINVAL;
5605
Michael Chanb6016b72005-05-26 13:03:09 -07005606 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5607 if (netif_running(dev))
5608 bnx2_set_mac_addr(bp);
5609
5610 return 0;
5611}
5612
5613/* Called with rtnl_lock */
5614static int
5615bnx2_change_mtu(struct net_device *dev, int new_mtu)
5616{
Michael Chan972ec0d2006-01-23 16:12:43 -08005617 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005618
5619 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5620 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5621 return -EINVAL;
5622
5623 dev->mtu = new_mtu;
5624 if (netif_running(dev)) {
5625 bnx2_netif_stop(bp);
5626
5627 bnx2_init_nic(bp);
5628
5629 bnx2_netif_start(bp);
5630 }
5631 return 0;
5632}
5633
5634#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5635static void
5636poll_bnx2(struct net_device *dev)
5637{
Michael Chan972ec0d2006-01-23 16:12:43 -08005638 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005639
5640 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005641 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005642 enable_irq(bp->pdev->irq);
5643}
5644#endif
5645
Michael Chan253c8b72007-01-08 19:56:01 -08005646static void __devinit
5647bnx2_get_5709_media(struct bnx2 *bp)
5648{
5649 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5650 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5651 u32 strap;
5652
5653 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5654 return;
5655 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5656 bp->phy_flags |= PHY_SERDES_FLAG;
5657 return;
5658 }
5659
5660 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5662 else
5663 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5664
5665 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5666 switch (strap) {
5667 case 0x4:
5668 case 0x5:
5669 case 0x6:
5670 bp->phy_flags |= PHY_SERDES_FLAG;
5671 return;
5672 }
5673 } else {
5674 switch (strap) {
5675 case 0x1:
5676 case 0x2:
5677 case 0x4:
5678 bp->phy_flags |= PHY_SERDES_FLAG;
5679 return;
5680 }
5681 }
5682}
5683
Michael Chanb6016b72005-05-26 13:03:09 -07005684static int __devinit
5685bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5686{
5687 struct bnx2 *bp;
5688 unsigned long mem_len;
5689 int rc;
5690 u32 reg;
5691
5692 SET_MODULE_OWNER(dev);
5693 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005694 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005695
5696 bp->flags = 0;
5697 bp->phy_flags = 0;
5698
5699 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5700 rc = pci_enable_device(pdev);
5701 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005702 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005703 goto err_out;
5704 }
5705
5706 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005707 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005708 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005709 rc = -ENODEV;
5710 goto err_out_disable;
5711 }
5712
5713 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5714 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005715 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005716 goto err_out_disable;
5717 }
5718
5719 pci_set_master(pdev);
5720
5721 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5722 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005723 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005724 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005725 rc = -EIO;
5726 goto err_out_release;
5727 }
5728
Michael Chanb6016b72005-05-26 13:03:09 -07005729 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5730 bp->flags |= USING_DAC_FLAG;
5731 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005732 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005733 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005734 rc = -EIO;
5735 goto err_out_release;
5736 }
5737 }
5738 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005739 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005740 rc = -EIO;
5741 goto err_out_release;
5742 }
5743
5744 bp->dev = dev;
5745 bp->pdev = pdev;
5746
5747 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00005748 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005749
5750 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005751 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005752 dev->mem_end = dev->mem_start + mem_len;
5753 dev->irq = pdev->irq;
5754
5755 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5756
5757 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005758 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005759 rc = -ENOMEM;
5760 goto err_out_release;
5761 }
5762
5763 /* Configure byte swap and enable write to the reg_window registers.
5764 * Rely on CPU to do target byte swapping on big endian systems
5765 * The chip's target access swapping will not swap all accesses
5766 */
5767 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5768 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5769 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5770
Pavel Machek829ca9a2005-09-03 15:56:56 -07005771 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005772
5773 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5774
Michael Chan59b47d82006-11-19 14:10:45 -08005775 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5776 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5777 if (bp->pcix_cap == 0) {
5778 dev_err(&pdev->dev,
5779 "Cannot find PCIX capability, aborting.\n");
5780 rc = -EIO;
5781 goto err_out_unmap;
5782 }
5783 }
5784
Michael Chanb6016b72005-05-26 13:03:09 -07005785 /* Get bus information. */
5786 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5787 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5788 u32 clkreg;
5789
5790 bp->flags |= PCIX_FLAG;
5791
5792 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005793
Michael Chanb6016b72005-05-26 13:03:09 -07005794 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5795 switch (clkreg) {
5796 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5797 bp->bus_speed_mhz = 133;
5798 break;
5799
5800 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5801 bp->bus_speed_mhz = 100;
5802 break;
5803
5804 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5805 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5806 bp->bus_speed_mhz = 66;
5807 break;
5808
5809 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5810 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5811 bp->bus_speed_mhz = 50;
5812 break;
5813
5814 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5815 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5816 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5817 bp->bus_speed_mhz = 33;
5818 break;
5819 }
5820 }
5821 else {
5822 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5823 bp->bus_speed_mhz = 66;
5824 else
5825 bp->bus_speed_mhz = 33;
5826 }
5827
5828 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5829 bp->flags |= PCI_32BIT_FLAG;
5830
5831 /* 5706A0 may falsely detect SERR and PERR. */
5832 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5833 reg = REG_RD(bp, PCI_COMMAND);
5834 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5835 REG_WR(bp, PCI_COMMAND, reg);
5836 }
5837 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5838 !(bp->flags & PCIX_FLAG)) {
5839
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005840 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005841 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005842 goto err_out_unmap;
5843 }
5844
5845 bnx2_init_nvram(bp);
5846
Michael Chane3648b32005-11-04 08:51:21 -08005847 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5848
5849 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08005850 BNX2_SHM_HDR_SIGNATURE_SIG) {
5851 u32 off = PCI_FUNC(pdev->devfn) << 2;
5852
5853 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5854 } else
Michael Chane3648b32005-11-04 08:51:21 -08005855 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5856
Michael Chanb6016b72005-05-26 13:03:09 -07005857 /* Get the permanent MAC address. First we need to make sure the
5858 * firmware is actually running.
5859 */
Michael Chane3648b32005-11-04 08:51:21 -08005860 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005861
5862 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5863 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005864 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005865 rc = -ENODEV;
5866 goto err_out_unmap;
5867 }
5868
Michael Chane3648b32005-11-04 08:51:21 -08005869 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005870
Michael Chane3648b32005-11-04 08:51:21 -08005871 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005872 bp->mac_addr[0] = (u8) (reg >> 8);
5873 bp->mac_addr[1] = (u8) reg;
5874
Michael Chane3648b32005-11-04 08:51:21 -08005875 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005876 bp->mac_addr[2] = (u8) (reg >> 24);
5877 bp->mac_addr[3] = (u8) (reg >> 16);
5878 bp->mac_addr[4] = (u8) (reg >> 8);
5879 bp->mac_addr[5] = (u8) reg;
5880
5881 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005882 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005883
5884 bp->rx_csum = 1;
5885
5886 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5887
5888 bp->tx_quick_cons_trip_int = 20;
5889 bp->tx_quick_cons_trip = 20;
5890 bp->tx_ticks_int = 80;
5891 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005892
Michael Chanb6016b72005-05-26 13:03:09 -07005893 bp->rx_quick_cons_trip_int = 6;
5894 bp->rx_quick_cons_trip = 6;
5895 bp->rx_ticks_int = 18;
5896 bp->rx_ticks = 18;
5897
5898 bp->stats_ticks = 1000000 & 0xffff00;
5899
5900 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005901 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005902
Michael Chan5b0c76a2005-11-04 08:45:49 -08005903 bp->phy_addr = 1;
5904
Michael Chanb6016b72005-05-26 13:03:09 -07005905 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08005906 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5907 bnx2_get_5709_media(bp);
5908 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07005909 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005910
5911 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07005912 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005913 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08005914 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005915 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005916 BNX2_SHARED_HW_CFG_CONFIG);
5917 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5918 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5919 }
Michael Chan261dd5c2007-01-08 19:55:46 -08005920 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5921 CHIP_NUM(bp) == CHIP_NUM_5708)
5922 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08005923 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5924 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005925
Michael Chan16088272006-06-12 22:16:43 -07005926 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5927 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5928 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005929 bp->flags |= NO_WOL_FLAG;
5930
Michael Chanb6016b72005-05-26 13:03:09 -07005931 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5932 bp->tx_quick_cons_trip_int =
5933 bp->tx_quick_cons_trip;
5934 bp->tx_ticks_int = bp->tx_ticks;
5935 bp->rx_quick_cons_trip_int =
5936 bp->rx_quick_cons_trip;
5937 bp->rx_ticks_int = bp->rx_ticks;
5938 bp->comp_prod_trip_int = bp->comp_prod_trip;
5939 bp->com_ticks_int = bp->com_ticks;
5940 bp->cmd_ticks_int = bp->cmd_ticks;
5941 }
5942
Michael Chanf9317a42006-09-29 17:06:23 -07005943 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5944 *
5945 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5946 * with byte enables disabled on the unused 32-bit word. This is legal
5947 * but causes problems on the AMD 8132 which will eventually stop
5948 * responding after a while.
5949 *
5950 * AMD believes this incompatibility is unique to the 5706, and
5951 * prefers to locally disable MSI rather than globally disabling it
5952 * using pci_msi_quirk.
5953 */
5954 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5955 struct pci_dev *amd_8132 = NULL;
5956
5957 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5958 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5959 amd_8132))) {
5960 u8 rev;
5961
5962 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5963 if (rev >= 0x10 && rev <= 0x13) {
5964 disable_msi = 1;
5965 pci_dev_put(amd_8132);
5966 break;
5967 }
5968 }
5969 }
5970
Michael Chanb6016b72005-05-26 13:03:09 -07005971 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5972 bp->req_line_speed = 0;
5973 if (bp->phy_flags & PHY_SERDES_FLAG) {
5974 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005975
Michael Chane3648b32005-11-04 08:51:21 -08005976 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005977 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5978 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5979 bp->autoneg = 0;
5980 bp->req_line_speed = bp->line_speed = SPEED_1000;
5981 bp->req_duplex = DUPLEX_FULL;
5982 }
Michael Chanb6016b72005-05-26 13:03:09 -07005983 }
5984 else {
5985 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5986 }
5987
5988 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5989
Michael Chancd339a02005-08-25 15:35:24 -07005990 init_timer(&bp->timer);
5991 bp->timer.expires = RUN_AT(bp->timer_interval);
5992 bp->timer.data = (unsigned long) bp;
5993 bp->timer.function = bnx2_timer;
5994
Michael Chanb6016b72005-05-26 13:03:09 -07005995 return 0;
5996
5997err_out_unmap:
5998 if (bp->regview) {
5999 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006000 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006001 }
6002
6003err_out_release:
6004 pci_release_regions(pdev);
6005
6006err_out_disable:
6007 pci_disable_device(pdev);
6008 pci_set_drvdata(pdev, NULL);
6009
6010err_out:
6011 return rc;
6012}
6013
6014static int __devinit
6015bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6016{
6017 static int version_printed = 0;
6018 struct net_device *dev = NULL;
6019 struct bnx2 *bp;
6020 int rc, i;
6021
6022 if (version_printed++ == 0)
6023 printk(KERN_INFO "%s", version);
6024
6025 /* dev zeroed in init_etherdev */
6026 dev = alloc_etherdev(sizeof(*bp));
6027
6028 if (!dev)
6029 return -ENOMEM;
6030
6031 rc = bnx2_init_board(pdev, dev);
6032 if (rc < 0) {
6033 free_netdev(dev);
6034 return rc;
6035 }
6036
6037 dev->open = bnx2_open;
6038 dev->hard_start_xmit = bnx2_start_xmit;
6039 dev->stop = bnx2_close;
6040 dev->get_stats = bnx2_get_stats;
6041 dev->set_multicast_list = bnx2_set_rx_mode;
6042 dev->do_ioctl = bnx2_ioctl;
6043 dev->set_mac_address = bnx2_change_mac_addr;
6044 dev->change_mtu = bnx2_change_mtu;
6045 dev->tx_timeout = bnx2_tx_timeout;
6046 dev->watchdog_timeo = TX_TIMEOUT;
6047#ifdef BCM_VLAN
6048 dev->vlan_rx_register = bnx2_vlan_rx_register;
6049 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6050#endif
6051 dev->poll = bnx2_poll;
6052 dev->ethtool_ops = &bnx2_ethtool_ops;
6053 dev->weight = 64;
6054
Michael Chan972ec0d2006-01-23 16:12:43 -08006055 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006056
6057#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6058 dev->poll_controller = poll_bnx2;
6059#endif
6060
6061 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006062 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006063 if (bp->regview)
6064 iounmap(bp->regview);
6065 pci_release_regions(pdev);
6066 pci_disable_device(pdev);
6067 pci_set_drvdata(pdev, NULL);
6068 free_netdev(dev);
6069 return rc;
6070 }
6071
6072 pci_set_drvdata(pdev, dev);
6073
6074 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07006075 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07006076 bp->name = board_info[ent->driver_data].name,
6077 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6078 "IRQ %d, ",
6079 dev->name,
6080 bp->name,
6081 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6082 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6083 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6084 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6085 bp->bus_speed_mhz,
6086 dev->base_addr,
6087 bp->pdev->irq);
6088
6089 printk("node addr ");
6090 for (i = 0; i < 6; i++)
6091 printk("%2.2x", dev->dev_addr[i]);
6092 printk("\n");
6093
6094 dev->features |= NETIF_F_SG;
6095 if (bp->flags & USING_DAC_FLAG)
6096 dev->features |= NETIF_F_HIGHDMA;
6097 dev->features |= NETIF_F_IP_CSUM;
6098#ifdef BCM_VLAN
6099 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6100#endif
Michael Chanb11d6212006-06-29 12:31:21 -07006101 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006102
6103 netif_carrier_off(bp->dev);
6104
6105 return 0;
6106}
6107
6108static void __devexit
6109bnx2_remove_one(struct pci_dev *pdev)
6110{
6111 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006112 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006113
Michael Chanafdc08b2005-08-25 15:34:29 -07006114 flush_scheduled_work();
6115
Michael Chanb6016b72005-05-26 13:03:09 -07006116 unregister_netdev(dev);
6117
6118 if (bp->regview)
6119 iounmap(bp->regview);
6120
6121 free_netdev(dev);
6122 pci_release_regions(pdev);
6123 pci_disable_device(pdev);
6124 pci_set_drvdata(pdev, NULL);
6125}
6126
6127static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006128bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006129{
6130 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006131 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006132 u32 reset_code;
6133
6134 if (!netif_running(dev))
6135 return 0;
6136
Michael Chan1d60290f2006-03-20 17:50:08 -08006137 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006138 bnx2_netif_stop(bp);
6139 netif_device_detach(dev);
6140 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006141 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006142 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006143 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006144 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6145 else
6146 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6147 bnx2_reset_chip(bp, reset_code);
6148 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006149 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006150 return 0;
6151}
6152
6153static int
6154bnx2_resume(struct pci_dev *pdev)
6155{
6156 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006157 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006158
6159 if (!netif_running(dev))
6160 return 0;
6161
Pavel Machek829ca9a2005-09-03 15:56:56 -07006162 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006163 netif_device_attach(dev);
6164 bnx2_init_nic(bp);
6165 bnx2_netif_start(bp);
6166 return 0;
6167}
6168
6169static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006170 .name = DRV_MODULE_NAME,
6171 .id_table = bnx2_pci_tbl,
6172 .probe = bnx2_init_one,
6173 .remove = __devexit_p(bnx2_remove_one),
6174 .suspend = bnx2_suspend,
6175 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006176};
6177
6178static int __init bnx2_init(void)
6179{
Jeff Garzik29917622006-08-19 17:48:59 -04006180 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006181}
6182
6183static void __exit bnx2_cleanup(void)
6184{
6185 pci_unregister_driver(&bnx2_pci_driver);
6186}
6187
6188module_init(bnx2_init);
6189module_exit(bnx2_cleanup);
6190
6191
6192