blob: 92897efbc26341817c41ff00bf8806e826d4f064 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080056#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070057
58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": "
Michael Chanf9317a42006-09-29 17:06:23 -070060#define DRV_MODULE_VERSION "1.4.45"
61#define DRV_MODULE_RELDATE "September 29, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070062
63#define RUN_AT(x) (jiffies + (x))
64
65/* Time in jiffies before concluding the transmitter is hung. */
66#define TX_TIMEOUT (5*HZ)
67
Randy Dunlape19360f2006-04-10 23:22:06 -070068static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070069 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080072MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070073MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
75
76static int disable_msi = 0;
77
78module_param(disable_msi, int, 0);
79MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080087 BCM5708,
88 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070089} board_t;
90
91/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050092static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070093 char *name;
94} board_info[] __devinitdata = {
95 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
96 { "HP NC370T Multifunction Gigabit Server Adapter" },
97 { "HP NC370i Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
99 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800100 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700102 };
103
104static struct pci_device_id bnx2_pci_tbl[] = {
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -0700119 { 0, }
120};
121
122static struct flash_spec flash_table[] =
123{
124 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800125 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800129 /* Expansion entry 0001 */
130 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700134 /* Saifun SA25F010 (non-buffered flash) */
135 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800136 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700137 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
139 "Non-buffered flash (128kB)"},
140 /* Saifun SA25F020 (non-buffered flash) */
141 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800142 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700143 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
145 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800146 /* Expansion entry 0100 */
147 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 "Entry 0100"},
151 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400152 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800153 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
154 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
155 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
156 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
157 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
160 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
161 /* Saifun SA25F005 (non-buffered flash) */
162 /* strap, cfg1, & write1 need updates */
163 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
164 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
166 "Non-buffered flash (64kB)"},
167 /* Fast EEPROM */
168 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
169 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 "EEPROM - fast"},
172 /* Expansion entry 1001 */
173 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
174 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 "Entry 1001"},
177 /* Expansion entry 1010 */
178 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1010"},
182 /* ATMEL AT45DB011B (buffered flash) */
183 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
184 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
185 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
186 "Buffered flash (128kB)"},
187 /* Expansion entry 1100 */
188 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
189 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 "Entry 1100"},
192 /* Expansion entry 1101 */
193 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1101"},
197 /* Ateml Expansion entry 1110 */
198 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
199 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
200 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1110 (Atmel)"},
202 /* ATMEL AT45DB021B (buffered flash) */
203 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
206 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700207};
208
209MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210
Michael Chane89bbf12005-08-25 15:36:58 -0700211static inline u32 bnx2_tx_avail(struct bnx2 *bp)
212{
Michael Chan2f8af122006-08-15 01:39:10 -0700213 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700214
Michael Chan2f8af122006-08-15 01:39:10 -0700215 smp_mb();
216 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
Michael Chane89bbf12005-08-25 15:36:58 -0700217 if (diff > MAX_TX_DESC_CNT)
218 diff = (diff & MAX_TX_DESC_CNT) - 1;
219 return (bp->tx_ring_size - diff);
220}
221
Michael Chanb6016b72005-05-26 13:03:09 -0700222static u32
223bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
224{
225 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
226 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
227}
228
229static void
230bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
231{
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
234}
235
236static void
237bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
238{
239 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800240 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
241 int i;
242
243 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
244 REG_WR(bp, BNX2_CTX_CTX_CTRL,
245 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
246 for (i = 0; i < 5; i++) {
247 u32 val;
248 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
249 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
250 break;
251 udelay(5);
252 }
253 } else {
254 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
255 REG_WR(bp, BNX2_CTX_DATA, val);
256 }
Michael Chanb6016b72005-05-26 13:03:09 -0700257}
258
259static int
260bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
261{
262 u32 val1;
263 int i, ret;
264
265 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
266 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
267 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
268
269 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
270 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
271
272 udelay(40);
273 }
274
275 val1 = (bp->phy_addr << 21) | (reg << 16) |
276 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
277 BNX2_EMAC_MDIO_COMM_START_BUSY;
278 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
279
280 for (i = 0; i < 50; i++) {
281 udelay(10);
282
283 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
284 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
285 udelay(5);
286
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
288 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
289
290 break;
291 }
292 }
293
294 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
295 *val = 0x0;
296 ret = -EBUSY;
297 }
298 else {
299 *val = val1;
300 ret = 0;
301 }
302
303 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
306
307 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
308 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309
310 udelay(40);
311 }
312
313 return ret;
314}
315
316static int
317bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
318{
319 u32 val1;
320 int i, ret;
321
322 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
323 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
324 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
325
326 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
327 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
328
329 udelay(40);
330 }
331
332 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
333 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
334 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
335 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400336
Michael Chanb6016b72005-05-26 13:03:09 -0700337 for (i = 0; i < 50; i++) {
338 udelay(10);
339
340 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
341 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
342 udelay(5);
343 break;
344 }
345 }
346
347 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
348 ret = -EBUSY;
349 else
350 ret = 0;
351
352 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
353 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
354 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
355
356 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
357 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358
359 udelay(40);
360 }
361
362 return ret;
363}
364
365static void
366bnx2_disable_int(struct bnx2 *bp)
367{
368 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
369 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
370 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
371}
372
373static void
374bnx2_enable_int(struct bnx2 *bp)
375{
Michael Chanb6016b72005-05-26 13:03:09 -0700376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800377 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
378 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
379
380 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700381 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
382
Michael Chanbf5295b2006-03-23 01:11:56 -0800383 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700384}
385
386static void
387bnx2_disable_int_sync(struct bnx2 *bp)
388{
389 atomic_inc(&bp->intr_sem);
390 bnx2_disable_int(bp);
391 synchronize_irq(bp->pdev->irq);
392}
393
394static void
395bnx2_netif_stop(struct bnx2 *bp)
396{
397 bnx2_disable_int_sync(bp);
398 if (netif_running(bp->dev)) {
399 netif_poll_disable(bp->dev);
400 netif_tx_disable(bp->dev);
401 bp->dev->trans_start = jiffies; /* prevent tx timeout */
402 }
403}
404
405static void
406bnx2_netif_start(struct bnx2 *bp)
407{
408 if (atomic_dec_and_test(&bp->intr_sem)) {
409 if (netif_running(bp->dev)) {
410 netif_wake_queue(bp->dev);
411 netif_poll_enable(bp->dev);
412 bnx2_enable_int(bp);
413 }
414 }
415}
416
417static void
418bnx2_free_mem(struct bnx2 *bp)
419{
Michael Chan13daffa2006-03-20 17:49:20 -0800420 int i;
421
Michael Chan59b47d82006-11-19 14:10:45 -0800422 for (i = 0; i < bp->ctx_pages; i++) {
423 if (bp->ctx_blk[i]) {
424 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
425 bp->ctx_blk[i],
426 bp->ctx_blk_mapping[i]);
427 bp->ctx_blk[i] = NULL;
428 }
429 }
Michael Chanb6016b72005-05-26 13:03:09 -0700430 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800431 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700432 bp->status_blk, bp->status_blk_mapping);
433 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800434 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700435 }
436 if (bp->tx_desc_ring) {
437 pci_free_consistent(bp->pdev,
438 sizeof(struct tx_bd) * TX_DESC_CNT,
439 bp->tx_desc_ring, bp->tx_desc_mapping);
440 bp->tx_desc_ring = NULL;
441 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400442 kfree(bp->tx_buf_ring);
443 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800444 for (i = 0; i < bp->rx_max_ring; i++) {
445 if (bp->rx_desc_ring[i])
446 pci_free_consistent(bp->pdev,
447 sizeof(struct rx_bd) * RX_DESC_CNT,
448 bp->rx_desc_ring[i],
449 bp->rx_desc_mapping[i]);
450 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700451 }
Michael Chan13daffa2006-03-20 17:49:20 -0800452 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400453 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700454}
455
456static int
457bnx2_alloc_mem(struct bnx2 *bp)
458{
Michael Chan0f31f992006-03-23 01:12:38 -0800459 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800460
Michael Chan0f31f992006-03-23 01:12:38 -0800461 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
462 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700463 if (bp->tx_buf_ring == NULL)
464 return -ENOMEM;
465
Michael Chanb6016b72005-05-26 13:03:09 -0700466 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
467 sizeof(struct tx_bd) *
468 TX_DESC_CNT,
469 &bp->tx_desc_mapping);
470 if (bp->tx_desc_ring == NULL)
471 goto alloc_mem_err;
472
Michael Chan13daffa2006-03-20 17:49:20 -0800473 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
474 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700475 if (bp->rx_buf_ring == NULL)
476 goto alloc_mem_err;
477
Michael Chan13daffa2006-03-20 17:49:20 -0800478 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
479 bp->rx_max_ring);
480
481 for (i = 0; i < bp->rx_max_ring; i++) {
482 bp->rx_desc_ring[i] =
483 pci_alloc_consistent(bp->pdev,
484 sizeof(struct rx_bd) * RX_DESC_CNT,
485 &bp->rx_desc_mapping[i]);
486 if (bp->rx_desc_ring[i] == NULL)
487 goto alloc_mem_err;
488
489 }
Michael Chanb6016b72005-05-26 13:03:09 -0700490
Michael Chan0f31f992006-03-23 01:12:38 -0800491 /* Combine status and statistics blocks into one allocation. */
492 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
493 bp->status_stats_size = status_blk_size +
494 sizeof(struct statistics_block);
495
496 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700497 &bp->status_blk_mapping);
498 if (bp->status_blk == NULL)
499 goto alloc_mem_err;
500
Michael Chan0f31f992006-03-23 01:12:38 -0800501 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700502
Michael Chan0f31f992006-03-23 01:12:38 -0800503 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
504 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700505
Michael Chan0f31f992006-03-23 01:12:38 -0800506 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700507
Michael Chan59b47d82006-11-19 14:10:45 -0800508 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
509 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
510 if (bp->ctx_pages == 0)
511 bp->ctx_pages = 1;
512 for (i = 0; i < bp->ctx_pages; i++) {
513 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
514 BCM_PAGE_SIZE,
515 &bp->ctx_blk_mapping[i]);
516 if (bp->ctx_blk[i] == NULL)
517 goto alloc_mem_err;
518 }
519 }
Michael Chanb6016b72005-05-26 13:03:09 -0700520 return 0;
521
522alloc_mem_err:
523 bnx2_free_mem(bp);
524 return -ENOMEM;
525}
526
527static void
Michael Chane3648b32005-11-04 08:51:21 -0800528bnx2_report_fw_link(struct bnx2 *bp)
529{
530 u32 fw_link_status = 0;
531
532 if (bp->link_up) {
533 u32 bmsr;
534
535 switch (bp->line_speed) {
536 case SPEED_10:
537 if (bp->duplex == DUPLEX_HALF)
538 fw_link_status = BNX2_LINK_STATUS_10HALF;
539 else
540 fw_link_status = BNX2_LINK_STATUS_10FULL;
541 break;
542 case SPEED_100:
543 if (bp->duplex == DUPLEX_HALF)
544 fw_link_status = BNX2_LINK_STATUS_100HALF;
545 else
546 fw_link_status = BNX2_LINK_STATUS_100FULL;
547 break;
548 case SPEED_1000:
549 if (bp->duplex == DUPLEX_HALF)
550 fw_link_status = BNX2_LINK_STATUS_1000HALF;
551 else
552 fw_link_status = BNX2_LINK_STATUS_1000FULL;
553 break;
554 case SPEED_2500:
555 if (bp->duplex == DUPLEX_HALF)
556 fw_link_status = BNX2_LINK_STATUS_2500HALF;
557 else
558 fw_link_status = BNX2_LINK_STATUS_2500FULL;
559 break;
560 }
561
562 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
563
564 if (bp->autoneg) {
565 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
566
567 bnx2_read_phy(bp, MII_BMSR, &bmsr);
568 bnx2_read_phy(bp, MII_BMSR, &bmsr);
569
570 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
571 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
572 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
573 else
574 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
575 }
576 }
577 else
578 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
579
580 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
581}
582
583static void
Michael Chanb6016b72005-05-26 13:03:09 -0700584bnx2_report_link(struct bnx2 *bp)
585{
586 if (bp->link_up) {
587 netif_carrier_on(bp->dev);
588 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
589
590 printk("%d Mbps ", bp->line_speed);
591
592 if (bp->duplex == DUPLEX_FULL)
593 printk("full duplex");
594 else
595 printk("half duplex");
596
597 if (bp->flow_ctrl) {
598 if (bp->flow_ctrl & FLOW_CTRL_RX) {
599 printk(", receive ");
600 if (bp->flow_ctrl & FLOW_CTRL_TX)
601 printk("& transmit ");
602 }
603 else {
604 printk(", transmit ");
605 }
606 printk("flow control ON");
607 }
608 printk("\n");
609 }
610 else {
611 netif_carrier_off(bp->dev);
612 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
613 }
Michael Chane3648b32005-11-04 08:51:21 -0800614
615 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700616}
617
618static void
619bnx2_resolve_flow_ctrl(struct bnx2 *bp)
620{
621 u32 local_adv, remote_adv;
622
623 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400624 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700625 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
626
627 if (bp->duplex == DUPLEX_FULL) {
628 bp->flow_ctrl = bp->req_flow_ctrl;
629 }
630 return;
631 }
632
633 if (bp->duplex != DUPLEX_FULL) {
634 return;
635 }
636
Michael Chan5b0c76a2005-11-04 08:45:49 -0800637 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
638 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
639 u32 val;
640
641 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
642 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
643 bp->flow_ctrl |= FLOW_CTRL_TX;
644 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
645 bp->flow_ctrl |= FLOW_CTRL_RX;
646 return;
647 }
648
Michael Chanb6016b72005-05-26 13:03:09 -0700649 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
650 bnx2_read_phy(bp, MII_LPA, &remote_adv);
651
652 if (bp->phy_flags & PHY_SERDES_FLAG) {
653 u32 new_local_adv = 0;
654 u32 new_remote_adv = 0;
655
656 if (local_adv & ADVERTISE_1000XPAUSE)
657 new_local_adv |= ADVERTISE_PAUSE_CAP;
658 if (local_adv & ADVERTISE_1000XPSE_ASYM)
659 new_local_adv |= ADVERTISE_PAUSE_ASYM;
660 if (remote_adv & ADVERTISE_1000XPAUSE)
661 new_remote_adv |= ADVERTISE_PAUSE_CAP;
662 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
663 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
664
665 local_adv = new_local_adv;
666 remote_adv = new_remote_adv;
667 }
668
669 /* See Table 28B-3 of 802.3ab-1999 spec. */
670 if (local_adv & ADVERTISE_PAUSE_CAP) {
671 if(local_adv & ADVERTISE_PAUSE_ASYM) {
672 if (remote_adv & ADVERTISE_PAUSE_CAP) {
673 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
674 }
675 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
676 bp->flow_ctrl = FLOW_CTRL_RX;
677 }
678 }
679 else {
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682 }
683 }
684 }
685 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
686 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
687 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
688
689 bp->flow_ctrl = FLOW_CTRL_TX;
690 }
691 }
692}
693
694static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800695bnx2_5708s_linkup(struct bnx2 *bp)
696{
697 u32 val;
698
699 bp->link_up = 1;
700 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
701 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
702 case BCM5708S_1000X_STAT1_SPEED_10:
703 bp->line_speed = SPEED_10;
704 break;
705 case BCM5708S_1000X_STAT1_SPEED_100:
706 bp->line_speed = SPEED_100;
707 break;
708 case BCM5708S_1000X_STAT1_SPEED_1G:
709 bp->line_speed = SPEED_1000;
710 break;
711 case BCM5708S_1000X_STAT1_SPEED_2G5:
712 bp->line_speed = SPEED_2500;
713 break;
714 }
715 if (val & BCM5708S_1000X_STAT1_FD)
716 bp->duplex = DUPLEX_FULL;
717 else
718 bp->duplex = DUPLEX_HALF;
719
720 return 0;
721}
722
723static int
724bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700725{
726 u32 bmcr, local_adv, remote_adv, common;
727
728 bp->link_up = 1;
729 bp->line_speed = SPEED_1000;
730
731 bnx2_read_phy(bp, MII_BMCR, &bmcr);
732 if (bmcr & BMCR_FULLDPLX) {
733 bp->duplex = DUPLEX_FULL;
734 }
735 else {
736 bp->duplex = DUPLEX_HALF;
737 }
738
739 if (!(bmcr & BMCR_ANENABLE)) {
740 return 0;
741 }
742
743 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
744 bnx2_read_phy(bp, MII_LPA, &remote_adv);
745
746 common = local_adv & remote_adv;
747 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
748
749 if (common & ADVERTISE_1000XFULL) {
750 bp->duplex = DUPLEX_FULL;
751 }
752 else {
753 bp->duplex = DUPLEX_HALF;
754 }
755 }
756
757 return 0;
758}
759
760static int
761bnx2_copper_linkup(struct bnx2 *bp)
762{
763 u32 bmcr;
764
765 bnx2_read_phy(bp, MII_BMCR, &bmcr);
766 if (bmcr & BMCR_ANENABLE) {
767 u32 local_adv, remote_adv, common;
768
769 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
770 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
771
772 common = local_adv & (remote_adv >> 2);
773 if (common & ADVERTISE_1000FULL) {
774 bp->line_speed = SPEED_1000;
775 bp->duplex = DUPLEX_FULL;
776 }
777 else if (common & ADVERTISE_1000HALF) {
778 bp->line_speed = SPEED_1000;
779 bp->duplex = DUPLEX_HALF;
780 }
781 else {
782 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
783 bnx2_read_phy(bp, MII_LPA, &remote_adv);
784
785 common = local_adv & remote_adv;
786 if (common & ADVERTISE_100FULL) {
787 bp->line_speed = SPEED_100;
788 bp->duplex = DUPLEX_FULL;
789 }
790 else if (common & ADVERTISE_100HALF) {
791 bp->line_speed = SPEED_100;
792 bp->duplex = DUPLEX_HALF;
793 }
794 else if (common & ADVERTISE_10FULL) {
795 bp->line_speed = SPEED_10;
796 bp->duplex = DUPLEX_FULL;
797 }
798 else if (common & ADVERTISE_10HALF) {
799 bp->line_speed = SPEED_10;
800 bp->duplex = DUPLEX_HALF;
801 }
802 else {
803 bp->line_speed = 0;
804 bp->link_up = 0;
805 }
806 }
807 }
808 else {
809 if (bmcr & BMCR_SPEED100) {
810 bp->line_speed = SPEED_100;
811 }
812 else {
813 bp->line_speed = SPEED_10;
814 }
815 if (bmcr & BMCR_FULLDPLX) {
816 bp->duplex = DUPLEX_FULL;
817 }
818 else {
819 bp->duplex = DUPLEX_HALF;
820 }
821 }
822
823 return 0;
824}
825
826static int
827bnx2_set_mac_link(struct bnx2 *bp)
828{
829 u32 val;
830
831 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
832 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
833 (bp->duplex == DUPLEX_HALF)) {
834 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
835 }
836
837 /* Configure the EMAC mode register. */
838 val = REG_RD(bp, BNX2_EMAC_MODE);
839
840 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800841 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800842 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700843
844 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800845 switch (bp->line_speed) {
846 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800847 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
848 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800849 break;
850 }
851 /* fall through */
852 case SPEED_100:
853 val |= BNX2_EMAC_MODE_PORT_MII;
854 break;
855 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800856 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800857 /* fall through */
858 case SPEED_1000:
859 val |= BNX2_EMAC_MODE_PORT_GMII;
860 break;
861 }
Michael Chanb6016b72005-05-26 13:03:09 -0700862 }
863 else {
864 val |= BNX2_EMAC_MODE_PORT_GMII;
865 }
866
867 /* Set the MAC to operate in the appropriate duplex mode. */
868 if (bp->duplex == DUPLEX_HALF)
869 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
870 REG_WR(bp, BNX2_EMAC_MODE, val);
871
872 /* Enable/disable rx PAUSE. */
873 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
874
875 if (bp->flow_ctrl & FLOW_CTRL_RX)
876 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
877 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
878
879 /* Enable/disable tx PAUSE. */
880 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
881 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
882
883 if (bp->flow_ctrl & FLOW_CTRL_TX)
884 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
886
887 /* Acknowledge the interrupt. */
888 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
889
890 return 0;
891}
892
893static int
894bnx2_set_link(struct bnx2 *bp)
895{
896 u32 bmsr;
897 u8 link_up;
898
Michael Chan80be4432006-11-19 14:07:28 -0800899 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700900 bp->link_up = 1;
901 return 0;
902 }
903
904 link_up = bp->link_up;
905
906 bnx2_read_phy(bp, MII_BMSR, &bmsr);
907 bnx2_read_phy(bp, MII_BMSR, &bmsr);
908
909 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
910 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
911 u32 val;
912
913 val = REG_RD(bp, BNX2_EMAC_STATUS);
914 if (val & BNX2_EMAC_STATUS_LINK)
915 bmsr |= BMSR_LSTATUS;
916 else
917 bmsr &= ~BMSR_LSTATUS;
918 }
919
920 if (bmsr & BMSR_LSTATUS) {
921 bp->link_up = 1;
922
923 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800924 if (CHIP_NUM(bp) == CHIP_NUM_5706)
925 bnx2_5706s_linkup(bp);
926 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
927 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700928 }
929 else {
930 bnx2_copper_linkup(bp);
931 }
932 bnx2_resolve_flow_ctrl(bp);
933 }
934 else {
935 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
936 (bp->autoneg & AUTONEG_SPEED)) {
937
938 u32 bmcr;
939
940 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800941 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700942 if (!(bmcr & BMCR_ANENABLE)) {
943 bnx2_write_phy(bp, MII_BMCR, bmcr |
944 BMCR_ANENABLE);
945 }
946 }
947 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
948 bp->link_up = 0;
949 }
950
951 if (bp->link_up != link_up) {
952 bnx2_report_link(bp);
953 }
954
955 bnx2_set_mac_link(bp);
956
957 return 0;
958}
959
960static int
961bnx2_reset_phy(struct bnx2 *bp)
962{
963 int i;
964 u32 reg;
965
966 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
967
968#define PHY_RESET_MAX_WAIT 100
969 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
970 udelay(10);
971
972 bnx2_read_phy(bp, MII_BMCR, &reg);
973 if (!(reg & BMCR_RESET)) {
974 udelay(20);
975 break;
976 }
977 }
978 if (i == PHY_RESET_MAX_WAIT) {
979 return -EBUSY;
980 }
981 return 0;
982}
983
984static u32
985bnx2_phy_get_pause_adv(struct bnx2 *bp)
986{
987 u32 adv = 0;
988
989 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
990 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
991
992 if (bp->phy_flags & PHY_SERDES_FLAG) {
993 adv = ADVERTISE_1000XPAUSE;
994 }
995 else {
996 adv = ADVERTISE_PAUSE_CAP;
997 }
998 }
999 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1000 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001 adv = ADVERTISE_1000XPSE_ASYM;
1002 }
1003 else {
1004 adv = ADVERTISE_PAUSE_ASYM;
1005 }
1006 }
1007 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1008 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1010 }
1011 else {
1012 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1013 }
1014 }
1015 return adv;
1016}
1017
1018static int
1019bnx2_setup_serdes_phy(struct bnx2 *bp)
1020{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001021 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001022 u32 new_adv = 0;
1023
1024 if (!(bp->autoneg & AUTONEG_SPEED)) {
1025 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001026 int force_link_down = 0;
1027
Michael Chan80be4432006-11-19 14:07:28 -08001028 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1029 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1030
1031 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1032 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1033 new_bmcr |= BMCR_SPEED1000;
1034 if (bp->req_line_speed == SPEED_2500) {
1035 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1036 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1037 if (!(up1 & BCM5708S_UP1_2G5)) {
1038 up1 |= BCM5708S_UP1_2G5;
1039 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1040 force_link_down = 1;
1041 }
1042 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001043 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1044 if (up1 & BCM5708S_UP1_2G5) {
1045 up1 &= ~BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047 force_link_down = 1;
1048 }
1049 }
1050
Michael Chanb6016b72005-05-26 13:03:09 -07001051 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001052 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001053 new_bmcr |= BMCR_FULLDPLX;
1054 }
1055 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001056 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001057 new_bmcr &= ~BMCR_FULLDPLX;
1058 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001059 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001060 /* Force a link down visible on the other side */
1061 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001062 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1063 ~(ADVERTISE_1000XFULL |
1064 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001065 bnx2_write_phy(bp, MII_BMCR, bmcr |
1066 BMCR_ANRESTART | BMCR_ANENABLE);
1067
1068 bp->link_up = 0;
1069 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001070 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001071 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001072 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001073 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001074 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1075 }
1076 return 0;
1077 }
1078
Michael Chan5b0c76a2005-11-04 08:45:49 -08001079 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1080 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1081 up1 |= BCM5708S_UP1_2G5;
1082 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1083 }
1084
Michael Chanb6016b72005-05-26 13:03:09 -07001085 if (bp->advertising & ADVERTISED_1000baseT_Full)
1086 new_adv |= ADVERTISE_1000XFULL;
1087
1088 new_adv |= bnx2_phy_get_pause_adv(bp);
1089
1090 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1091 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1092
1093 bp->serdes_an_pending = 0;
1094 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1095 /* Force a link down visible on the other side */
1096 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001097 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001098 spin_unlock_bh(&bp->phy_lock);
1099 msleep(20);
1100 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001101 }
1102
1103 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1104 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1105 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001106 /* Speed up link-up time when the link partner
1107 * does not autonegotiate which is very common
1108 * in blade servers. Some blade servers use
1109 * IPMI for kerboard input and it's important
1110 * to minimize link disruptions. Autoneg. involves
1111 * exchanging base pages plus 3 next pages and
1112 * normally completes in about 120 msec.
1113 */
1114 bp->current_interval = SERDES_AN_TIMEOUT;
1115 bp->serdes_an_pending = 1;
1116 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001117 }
1118
1119 return 0;
1120}
1121
1122#define ETHTOOL_ALL_FIBRE_SPEED \
1123 (ADVERTISED_1000baseT_Full)
1124
1125#define ETHTOOL_ALL_COPPER_SPEED \
1126 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1127 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1128 ADVERTISED_1000baseT_Full)
1129
1130#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1131 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001132
Michael Chanb6016b72005-05-26 13:03:09 -07001133#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1134
1135static int
1136bnx2_setup_copper_phy(struct bnx2 *bp)
1137{
1138 u32 bmcr;
1139 u32 new_bmcr;
1140
1141 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1142
1143 if (bp->autoneg & AUTONEG_SPEED) {
1144 u32 adv_reg, adv1000_reg;
1145 u32 new_adv_reg = 0;
1146 u32 new_adv1000_reg = 0;
1147
1148 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1149 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1150 ADVERTISE_PAUSE_ASYM);
1151
1152 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1153 adv1000_reg &= PHY_ALL_1000_SPEED;
1154
1155 if (bp->advertising & ADVERTISED_10baseT_Half)
1156 new_adv_reg |= ADVERTISE_10HALF;
1157 if (bp->advertising & ADVERTISED_10baseT_Full)
1158 new_adv_reg |= ADVERTISE_10FULL;
1159 if (bp->advertising & ADVERTISED_100baseT_Half)
1160 new_adv_reg |= ADVERTISE_100HALF;
1161 if (bp->advertising & ADVERTISED_100baseT_Full)
1162 new_adv_reg |= ADVERTISE_100FULL;
1163 if (bp->advertising & ADVERTISED_1000baseT_Full)
1164 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001165
Michael Chanb6016b72005-05-26 13:03:09 -07001166 new_adv_reg |= ADVERTISE_CSMA;
1167
1168 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1169
1170 if ((adv1000_reg != new_adv1000_reg) ||
1171 (adv_reg != new_adv_reg) ||
1172 ((bmcr & BMCR_ANENABLE) == 0)) {
1173
1174 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1175 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1176 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1177 BMCR_ANENABLE);
1178 }
1179 else if (bp->link_up) {
1180 /* Flow ctrl may have changed from auto to forced */
1181 /* or vice-versa. */
1182
1183 bnx2_resolve_flow_ctrl(bp);
1184 bnx2_set_mac_link(bp);
1185 }
1186 return 0;
1187 }
1188
1189 new_bmcr = 0;
1190 if (bp->req_line_speed == SPEED_100) {
1191 new_bmcr |= BMCR_SPEED100;
1192 }
1193 if (bp->req_duplex == DUPLEX_FULL) {
1194 new_bmcr |= BMCR_FULLDPLX;
1195 }
1196 if (new_bmcr != bmcr) {
1197 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001198
1199 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1200 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001201
Michael Chanb6016b72005-05-26 13:03:09 -07001202 if (bmsr & BMSR_LSTATUS) {
1203 /* Force link down */
1204 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001205 spin_unlock_bh(&bp->phy_lock);
1206 msleep(50);
1207 spin_lock_bh(&bp->phy_lock);
1208
1209 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001211 }
1212
1213 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1214
1215 /* Normally, the new speed is setup after the link has
1216 * gone down and up again. In some cases, link will not go
1217 * down so we need to set up the new speed here.
1218 */
1219 if (bmsr & BMSR_LSTATUS) {
1220 bp->line_speed = bp->req_line_speed;
1221 bp->duplex = bp->req_duplex;
1222 bnx2_resolve_flow_ctrl(bp);
1223 bnx2_set_mac_link(bp);
1224 }
1225 }
1226 return 0;
1227}
1228
1229static int
1230bnx2_setup_phy(struct bnx2 *bp)
1231{
1232 if (bp->loopback == MAC_LOOPBACK)
1233 return 0;
1234
1235 if (bp->phy_flags & PHY_SERDES_FLAG) {
1236 return (bnx2_setup_serdes_phy(bp));
1237 }
1238 else {
1239 return (bnx2_setup_copper_phy(bp));
1240 }
1241}
1242
1243static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001244bnx2_init_5708s_phy(struct bnx2 *bp)
1245{
1246 u32 val;
1247
1248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1249 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1250 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1251
1252 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1253 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1254 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1255
1256 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1257 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1258 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1259
1260 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1261 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1262 val |= BCM5708S_UP1_2G5;
1263 bnx2_write_phy(bp, BCM5708S_UP1, val);
1264 }
1265
1266 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001267 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1268 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001269 /* increase tx signal amplitude */
1270 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1271 BCM5708S_BLK_ADDR_TX_MISC);
1272 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1273 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1274 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1275 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1276 }
1277
Michael Chane3648b32005-11-04 08:51:21 -08001278 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001279 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1280
1281 if (val) {
1282 u32 is_backplane;
1283
Michael Chane3648b32005-11-04 08:51:21 -08001284 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001285 BNX2_SHARED_HW_CFG_CONFIG);
1286 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1287 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1288 BCM5708S_BLK_ADDR_TX_MISC);
1289 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1290 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1291 BCM5708S_BLK_ADDR_DIG);
1292 }
1293 }
1294 return 0;
1295}
1296
1297static int
1298bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001299{
1300 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1301
Michael Chan59b47d82006-11-19 14:10:45 -08001302 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1303 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001304
1305 if (bp->dev->mtu > 1500) {
1306 u32 val;
1307
1308 /* Set extended packet length bit */
1309 bnx2_write_phy(bp, 0x18, 0x7);
1310 bnx2_read_phy(bp, 0x18, &val);
1311 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1312
1313 bnx2_write_phy(bp, 0x1c, 0x6c00);
1314 bnx2_read_phy(bp, 0x1c, &val);
1315 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1316 }
1317 else {
1318 u32 val;
1319
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1323
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1327 }
1328
1329 return 0;
1330}
1331
1332static int
1333bnx2_init_copper_phy(struct bnx2 *bp)
1334{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001335 u32 val;
1336
Michael Chanb6016b72005-05-26 13:03:09 -07001337 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1338
1339 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1340 bnx2_write_phy(bp, 0x18, 0x0c00);
1341 bnx2_write_phy(bp, 0x17, 0x000a);
1342 bnx2_write_phy(bp, 0x15, 0x310b);
1343 bnx2_write_phy(bp, 0x17, 0x201f);
1344 bnx2_write_phy(bp, 0x15, 0x9506);
1345 bnx2_write_phy(bp, 0x17, 0x401f);
1346 bnx2_write_phy(bp, 0x15, 0x14e2);
1347 bnx2_write_phy(bp, 0x18, 0x0400);
1348 }
1349
1350 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001351 /* Set extended packet length bit */
1352 bnx2_write_phy(bp, 0x18, 0x7);
1353 bnx2_read_phy(bp, 0x18, &val);
1354 bnx2_write_phy(bp, 0x18, val | 0x4000);
1355
1356 bnx2_read_phy(bp, 0x10, &val);
1357 bnx2_write_phy(bp, 0x10, val | 0x1);
1358 }
1359 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001360 bnx2_write_phy(bp, 0x18, 0x7);
1361 bnx2_read_phy(bp, 0x18, &val);
1362 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1363
1364 bnx2_read_phy(bp, 0x10, &val);
1365 bnx2_write_phy(bp, 0x10, val & ~0x1);
1366 }
1367
Michael Chan5b0c76a2005-11-04 08:45:49 -08001368 /* ethernet@wirespeed */
1369 bnx2_write_phy(bp, 0x18, 0x7007);
1370 bnx2_read_phy(bp, 0x18, &val);
1371 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001372 return 0;
1373}
1374
1375
1376static int
1377bnx2_init_phy(struct bnx2 *bp)
1378{
1379 u32 val;
1380 int rc = 0;
1381
1382 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1383 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1384
1385 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1386
1387 bnx2_reset_phy(bp);
1388
1389 bnx2_read_phy(bp, MII_PHYSID1, &val);
1390 bp->phy_id = val << 16;
1391 bnx2_read_phy(bp, MII_PHYSID2, &val);
1392 bp->phy_id |= val & 0xffff;
1393
1394 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001395 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1396 rc = bnx2_init_5706s_phy(bp);
1397 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1398 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001399 }
1400 else {
1401 rc = bnx2_init_copper_phy(bp);
1402 }
1403
1404 bnx2_setup_phy(bp);
1405
1406 return rc;
1407}
1408
1409static int
1410bnx2_set_mac_loopback(struct bnx2 *bp)
1411{
1412 u32 mac_mode;
1413
1414 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1415 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1416 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1417 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1418 bp->link_up = 1;
1419 return 0;
1420}
1421
Michael Chanbc5a0692006-01-23 16:13:22 -08001422static int bnx2_test_link(struct bnx2 *);
1423
1424static int
1425bnx2_set_phy_loopback(struct bnx2 *bp)
1426{
1427 u32 mac_mode;
1428 int rc, i;
1429
1430 spin_lock_bh(&bp->phy_lock);
1431 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1432 BMCR_SPEED1000);
1433 spin_unlock_bh(&bp->phy_lock);
1434 if (rc)
1435 return rc;
1436
1437 for (i = 0; i < 10; i++) {
1438 if (bnx2_test_link(bp) == 0)
1439 break;
Michael Chan80be4432006-11-19 14:07:28 -08001440 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001441 }
1442
1443 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1444 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1445 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001446 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001447
1448 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1449 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1450 bp->link_up = 1;
1451 return 0;
1452}
1453
Michael Chanb6016b72005-05-26 13:03:09 -07001454static int
Michael Chanb090ae22006-01-23 16:07:10 -08001455bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001456{
1457 int i;
1458 u32 val;
1459
Michael Chanb6016b72005-05-26 13:03:09 -07001460 bp->fw_wr_seq++;
1461 msg_data |= bp->fw_wr_seq;
1462
Michael Chane3648b32005-11-04 08:51:21 -08001463 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001464
1465 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001466 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1467 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001468
Michael Chane3648b32005-11-04 08:51:21 -08001469 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001470
1471 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1472 break;
1473 }
Michael Chanb090ae22006-01-23 16:07:10 -08001474 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1475 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001476
1477 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001478 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1479 if (!silent)
1480 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1481 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001482
1483 msg_data &= ~BNX2_DRV_MSG_CODE;
1484 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1485
Michael Chane3648b32005-11-04 08:51:21 -08001486 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001487
Michael Chanb6016b72005-05-26 13:03:09 -07001488 return -EBUSY;
1489 }
1490
Michael Chanb090ae22006-01-23 16:07:10 -08001491 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1492 return -EIO;
1493
Michael Chanb6016b72005-05-26 13:03:09 -07001494 return 0;
1495}
1496
Michael Chan59b47d82006-11-19 14:10:45 -08001497static int
1498bnx2_init_5709_context(struct bnx2 *bp)
1499{
1500 int i, ret = 0;
1501 u32 val;
1502
1503 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1504 val |= (BCM_PAGE_BITS - 8) << 16;
1505 REG_WR(bp, BNX2_CTX_COMMAND, val);
1506 for (i = 0; i < bp->ctx_pages; i++) {
1507 int j;
1508
1509 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1510 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1511 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1512 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1513 (u64) bp->ctx_blk_mapping[i] >> 32);
1514 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1515 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1516 for (j = 0; j < 10; j++) {
1517
1518 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1519 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1520 break;
1521 udelay(5);
1522 }
1523 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1524 ret = -EBUSY;
1525 break;
1526 }
1527 }
1528 return ret;
1529}
1530
Michael Chanb6016b72005-05-26 13:03:09 -07001531static void
1532bnx2_init_context(struct bnx2 *bp)
1533{
1534 u32 vcid;
1535
1536 vcid = 96;
1537 while (vcid) {
1538 u32 vcid_addr, pcid_addr, offset;
1539
1540 vcid--;
1541
1542 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1543 u32 new_vcid;
1544
1545 vcid_addr = GET_PCID_ADDR(vcid);
1546 if (vcid & 0x8) {
1547 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1548 }
1549 else {
1550 new_vcid = vcid;
1551 }
1552 pcid_addr = GET_PCID_ADDR(new_vcid);
1553 }
1554 else {
1555 vcid_addr = GET_CID_ADDR(vcid);
1556 pcid_addr = vcid_addr;
1557 }
1558
1559 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1560 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1561
1562 /* Zero out the context. */
1563 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1564 CTX_WR(bp, 0x00, offset, 0);
1565 }
1566
1567 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1568 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1569 }
1570}
1571
1572static int
1573bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1574{
1575 u16 *good_mbuf;
1576 u32 good_mbuf_cnt;
1577 u32 val;
1578
1579 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1580 if (good_mbuf == NULL) {
1581 printk(KERN_ERR PFX "Failed to allocate memory in "
1582 "bnx2_alloc_bad_rbuf\n");
1583 return -ENOMEM;
1584 }
1585
1586 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1587 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1588
1589 good_mbuf_cnt = 0;
1590
1591 /* Allocate a bunch of mbufs and save the good ones in an array. */
1592 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1593 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1594 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1595
1596 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1597
1598 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1599
1600 /* The addresses with Bit 9 set are bad memory blocks. */
1601 if (!(val & (1 << 9))) {
1602 good_mbuf[good_mbuf_cnt] = (u16) val;
1603 good_mbuf_cnt++;
1604 }
1605
1606 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607 }
1608
1609 /* Free the good ones back to the mbuf pool thus discarding
1610 * all the bad ones. */
1611 while (good_mbuf_cnt) {
1612 good_mbuf_cnt--;
1613
1614 val = good_mbuf[good_mbuf_cnt];
1615 val = (val << 9) | val | 1;
1616
1617 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1618 }
1619 kfree(good_mbuf);
1620 return 0;
1621}
1622
1623static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001624bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001625{
1626 u32 val;
1627 u8 *mac_addr = bp->dev->dev_addr;
1628
1629 val = (mac_addr[0] << 8) | mac_addr[1];
1630
1631 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1632
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001633 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001634 (mac_addr[4] << 8) | mac_addr[5];
1635
1636 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1637}
1638
1639static inline int
1640bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1641{
1642 struct sk_buff *skb;
1643 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1644 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001645 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001646 unsigned long align;
1647
Michael Chan932f3772006-08-15 01:39:36 -07001648 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001649 if (skb == NULL) {
1650 return -ENOMEM;
1651 }
1652
Michael Chan59b47d82006-11-19 14:10:45 -08001653 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1654 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001655
Michael Chanb6016b72005-05-26 13:03:09 -07001656 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1657 PCI_DMA_FROMDEVICE);
1658
1659 rx_buf->skb = skb;
1660 pci_unmap_addr_set(rx_buf, mapping, mapping);
1661
1662 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1663 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1664
1665 bp->rx_prod_bseq += bp->rx_buf_use_size;
1666
1667 return 0;
1668}
1669
1670static void
1671bnx2_phy_int(struct bnx2 *bp)
1672{
1673 u32 new_link_state, old_link_state;
1674
1675 new_link_state = bp->status_blk->status_attn_bits &
1676 STATUS_ATTN_BITS_LINK_STATE;
1677 old_link_state = bp->status_blk->status_attn_bits_ack &
1678 STATUS_ATTN_BITS_LINK_STATE;
1679 if (new_link_state != old_link_state) {
1680 if (new_link_state) {
1681 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1682 STATUS_ATTN_BITS_LINK_STATE);
1683 }
1684 else {
1685 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1686 STATUS_ATTN_BITS_LINK_STATE);
1687 }
1688 bnx2_set_link(bp);
1689 }
1690}
1691
1692static void
1693bnx2_tx_int(struct bnx2 *bp)
1694{
Michael Chanf4e418f2005-11-04 08:53:48 -08001695 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001696 u16 hw_cons, sw_cons, sw_ring_cons;
1697 int tx_free_bd = 0;
1698
Michael Chanf4e418f2005-11-04 08:53:48 -08001699 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001700 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1701 hw_cons++;
1702 }
1703 sw_cons = bp->tx_cons;
1704
1705 while (sw_cons != hw_cons) {
1706 struct sw_bd *tx_buf;
1707 struct sk_buff *skb;
1708 int i, last;
1709
1710 sw_ring_cons = TX_RING_IDX(sw_cons);
1711
1712 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1713 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001714#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001715 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001716 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001717 u16 last_idx, last_ring_idx;
1718
1719 last_idx = sw_cons +
1720 skb_shinfo(skb)->nr_frags + 1;
1721 last_ring_idx = sw_ring_cons +
1722 skb_shinfo(skb)->nr_frags + 1;
1723 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1724 last_idx++;
1725 }
1726 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1727 break;
1728 }
1729 }
1730#endif
1731 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1732 skb_headlen(skb), PCI_DMA_TODEVICE);
1733
1734 tx_buf->skb = NULL;
1735 last = skb_shinfo(skb)->nr_frags;
1736
1737 for (i = 0; i < last; i++) {
1738 sw_cons = NEXT_TX_BD(sw_cons);
1739
1740 pci_unmap_page(bp->pdev,
1741 pci_unmap_addr(
1742 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1743 mapping),
1744 skb_shinfo(skb)->frags[i].size,
1745 PCI_DMA_TODEVICE);
1746 }
1747
1748 sw_cons = NEXT_TX_BD(sw_cons);
1749
1750 tx_free_bd += last + 1;
1751
Michael Chan745720e2006-06-29 12:37:41 -07001752 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001753
Michael Chanf4e418f2005-11-04 08:53:48 -08001754 hw_cons = bp->hw_tx_cons =
1755 sblk->status_tx_quick_consumer_index0;
1756
Michael Chanb6016b72005-05-26 13:03:09 -07001757 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1758 hw_cons++;
1759 }
1760 }
1761
Michael Chane89bbf12005-08-25 15:36:58 -07001762 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001763 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1764 * before checking for netif_queue_stopped(). Without the
1765 * memory barrier, there is a small possibility that bnx2_start_xmit()
1766 * will miss it and cause the queue to be stopped forever.
1767 */
1768 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001769
Michael Chan2f8af122006-08-15 01:39:10 -07001770 if (unlikely(netif_queue_stopped(bp->dev)) &&
1771 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1772 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001773 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001774 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001775 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001776 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001777 }
Michael Chanb6016b72005-05-26 13:03:09 -07001778}
1779
1780static inline void
1781bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1782 u16 cons, u16 prod)
1783{
Michael Chan236b6392006-03-20 17:49:02 -08001784 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1785 struct rx_bd *cons_bd, *prod_bd;
1786
1787 cons_rx_buf = &bp->rx_buf_ring[cons];
1788 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001789
1790 pci_dma_sync_single_for_device(bp->pdev,
1791 pci_unmap_addr(cons_rx_buf, mapping),
1792 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1793
Michael Chan236b6392006-03-20 17:49:02 -08001794 bp->rx_prod_bseq += bp->rx_buf_use_size;
1795
1796 prod_rx_buf->skb = skb;
1797
1798 if (cons == prod)
1799 return;
1800
Michael Chanb6016b72005-05-26 13:03:09 -07001801 pci_unmap_addr_set(prod_rx_buf, mapping,
1802 pci_unmap_addr(cons_rx_buf, mapping));
1803
Michael Chan3fdfcc22006-03-20 17:49:49 -08001804 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1805 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001806 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1807 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001808}
1809
1810static int
1811bnx2_rx_int(struct bnx2 *bp, int budget)
1812{
Michael Chanf4e418f2005-11-04 08:53:48 -08001813 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001814 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1815 struct l2_fhdr *rx_hdr;
1816 int rx_pkt = 0;
1817
Michael Chanf4e418f2005-11-04 08:53:48 -08001818 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001819 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1820 hw_cons++;
1821 }
1822 sw_cons = bp->rx_cons;
1823 sw_prod = bp->rx_prod;
1824
1825 /* Memory barrier necessary as speculative reads of the rx
1826 * buffer can be ahead of the index in the status block
1827 */
1828 rmb();
1829 while (sw_cons != hw_cons) {
1830 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001831 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001832 struct sw_bd *rx_buf;
1833 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001834 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001835
1836 sw_ring_cons = RX_RING_IDX(sw_cons);
1837 sw_ring_prod = RX_RING_IDX(sw_prod);
1838
1839 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1840 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001841
1842 rx_buf->skb = NULL;
1843
1844 dma_addr = pci_unmap_addr(rx_buf, mapping);
1845
1846 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001847 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1848
1849 rx_hdr = (struct l2_fhdr *) skb->data;
1850 len = rx_hdr->l2_fhdr_pkt_len - 4;
1851
Michael Chanade2bfe2006-01-23 16:09:51 -08001852 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001853 (L2_FHDR_ERRORS_BAD_CRC |
1854 L2_FHDR_ERRORS_PHY_DECODE |
1855 L2_FHDR_ERRORS_ALIGNMENT |
1856 L2_FHDR_ERRORS_TOO_SHORT |
1857 L2_FHDR_ERRORS_GIANT_FRAME)) {
1858
1859 goto reuse_rx;
1860 }
1861
1862 /* Since we don't have a jumbo ring, copy small packets
1863 * if mtu > 1500
1864 */
1865 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1866 struct sk_buff *new_skb;
1867
Michael Chan932f3772006-08-15 01:39:36 -07001868 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001869 if (new_skb == NULL)
1870 goto reuse_rx;
1871
1872 /* aligned copy */
1873 memcpy(new_skb->data,
1874 skb->data + bp->rx_offset - 2,
1875 len + 2);
1876
1877 skb_reserve(new_skb, 2);
1878 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001879
1880 bnx2_reuse_rx_skb(bp, skb,
1881 sw_ring_cons, sw_ring_prod);
1882
1883 skb = new_skb;
1884 }
1885 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001886 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001887 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1888
1889 skb_reserve(skb, bp->rx_offset);
1890 skb_put(skb, len);
1891 }
1892 else {
1893reuse_rx:
1894 bnx2_reuse_rx_skb(bp, skb,
1895 sw_ring_cons, sw_ring_prod);
1896 goto next_rx;
1897 }
1898
1899 skb->protocol = eth_type_trans(skb, bp->dev);
1900
1901 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001902 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001903
Michael Chan745720e2006-06-29 12:37:41 -07001904 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001905 goto next_rx;
1906
1907 }
1908
Michael Chanb6016b72005-05-26 13:03:09 -07001909 skb->ip_summed = CHECKSUM_NONE;
1910 if (bp->rx_csum &&
1911 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1912 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1913
Michael Chanade2bfe2006-01-23 16:09:51 -08001914 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1915 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001916 skb->ip_summed = CHECKSUM_UNNECESSARY;
1917 }
1918
1919#ifdef BCM_VLAN
1920 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1921 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1922 rx_hdr->l2_fhdr_vlan_tag);
1923 }
1924 else
1925#endif
1926 netif_receive_skb(skb);
1927
1928 bp->dev->last_rx = jiffies;
1929 rx_pkt++;
1930
1931next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001932 sw_cons = NEXT_RX_BD(sw_cons);
1933 sw_prod = NEXT_RX_BD(sw_prod);
1934
1935 if ((rx_pkt == budget))
1936 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001937
1938 /* Refresh hw_cons to see if there is new work */
1939 if (sw_cons == hw_cons) {
1940 hw_cons = bp->hw_rx_cons =
1941 sblk->status_rx_quick_consumer_index0;
1942 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1943 hw_cons++;
1944 rmb();
1945 }
Michael Chanb6016b72005-05-26 13:03:09 -07001946 }
1947 bp->rx_cons = sw_cons;
1948 bp->rx_prod = sw_prod;
1949
1950 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1951
1952 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1953
1954 mmiowb();
1955
1956 return rx_pkt;
1957
1958}
1959
1960/* MSI ISR - The only difference between this and the INTx ISR
1961 * is that the MSI interrupt is always serviced.
1962 */
1963static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001964bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001965{
1966 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001967 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001968
Michael Chanc921e4c2005-09-08 13:15:32 -07001969 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001970 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1971 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1972 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1973
1974 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001975 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1976 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001977
Michael Chan73eef4c2005-08-25 15:39:15 -07001978 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001979
Michael Chan73eef4c2005-08-25 15:39:15 -07001980 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001981}
1982
1983static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001984bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001985{
1986 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001987 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001988
1989 /* When using INTx, it is possible for the interrupt to arrive
1990 * at the CPU before the status block posted prior to the
1991 * interrupt. Reading a register will flush the status block.
1992 * When using MSI, the MSI message will always complete after
1993 * the status block write.
1994 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001995 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001996 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1997 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001998 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001999
2000 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2001 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2002 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2003
2004 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002005 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2006 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002007
Michael Chan73eef4c2005-08-25 15:39:15 -07002008 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002009
Michael Chan73eef4c2005-08-25 15:39:15 -07002010 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002011}
2012
Michael Chanf4e418f2005-11-04 08:53:48 -08002013static inline int
2014bnx2_has_work(struct bnx2 *bp)
2015{
2016 struct status_block *sblk = bp->status_blk;
2017
2018 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2019 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2020 return 1;
2021
2022 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2023 bp->link_up)
2024 return 1;
2025
2026 return 0;
2027}
2028
Michael Chanb6016b72005-05-26 13:03:09 -07002029static int
2030bnx2_poll(struct net_device *dev, int *budget)
2031{
Michael Chan972ec0d2006-01-23 16:12:43 -08002032 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002033
Michael Chanb6016b72005-05-26 13:03:09 -07002034 if ((bp->status_blk->status_attn_bits &
2035 STATUS_ATTN_BITS_LINK_STATE) !=
2036 (bp->status_blk->status_attn_bits_ack &
2037 STATUS_ATTN_BITS_LINK_STATE)) {
2038
Michael Chanc770a652005-08-25 15:38:39 -07002039 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002040 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002041 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002042
2043 /* This is needed to take care of transient status
2044 * during link changes.
2045 */
2046 REG_WR(bp, BNX2_HC_COMMAND,
2047 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2048 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002049 }
2050
Michael Chanf4e418f2005-11-04 08:53:48 -08002051 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002052 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002053
Michael Chanf4e418f2005-11-04 08:53:48 -08002054 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002055 int orig_budget = *budget;
2056 int work_done;
2057
2058 if (orig_budget > dev->quota)
2059 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002060
Michael Chanb6016b72005-05-26 13:03:09 -07002061 work_done = bnx2_rx_int(bp, orig_budget);
2062 *budget -= work_done;
2063 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002064 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002065
Michael Chanf4e418f2005-11-04 08:53:48 -08002066 bp->last_status_idx = bp->status_blk->status_idx;
2067 rmb();
2068
2069 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002070 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002071 if (likely(bp->flags & USING_MSI_FLAG)) {
2072 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2073 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2074 bp->last_status_idx);
2075 return 0;
2076 }
Michael Chanb6016b72005-05-26 13:03:09 -07002077 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002078 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2079 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2080 bp->last_status_idx);
2081
2082 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2083 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2084 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002085 return 0;
2086 }
2087
2088 return 1;
2089}
2090
Herbert Xu932ff272006-06-09 12:20:56 -07002091/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002092 * from set_multicast.
2093 */
2094static void
2095bnx2_set_rx_mode(struct net_device *dev)
2096{
Michael Chan972ec0d2006-01-23 16:12:43 -08002097 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002098 u32 rx_mode, sort_mode;
2099 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002100
Michael Chanc770a652005-08-25 15:38:39 -07002101 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002102
2103 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2104 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2105 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2106#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002107 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002108 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002109#else
Michael Chane29054f2006-01-23 16:06:06 -08002110 if (!(bp->flags & ASF_ENABLE_FLAG))
2111 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002112#endif
2113 if (dev->flags & IFF_PROMISC) {
2114 /* Promiscuous mode. */
2115 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002116 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2117 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002118 }
2119 else if (dev->flags & IFF_ALLMULTI) {
2120 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2121 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2122 0xffffffff);
2123 }
2124 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2125 }
2126 else {
2127 /* Accept one or more multicast(s). */
2128 struct dev_mc_list *mclist;
2129 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2130 u32 regidx;
2131 u32 bit;
2132 u32 crc;
2133
2134 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2135
2136 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2137 i++, mclist = mclist->next) {
2138
2139 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2140 bit = crc & 0xff;
2141 regidx = (bit & 0xe0) >> 5;
2142 bit &= 0x1f;
2143 mc_filter[regidx] |= (1 << bit);
2144 }
2145
2146 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2147 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2148 mc_filter[i]);
2149 }
2150
2151 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2152 }
2153
2154 if (rx_mode != bp->rx_mode) {
2155 bp->rx_mode = rx_mode;
2156 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2157 }
2158
2159 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2160 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2161 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2162
Michael Chanc770a652005-08-25 15:38:39 -07002163 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002164}
2165
Michael Chanfba9fe92006-06-12 22:21:25 -07002166#define FW_BUF_SIZE 0x8000
2167
2168static int
2169bnx2_gunzip_init(struct bnx2 *bp)
2170{
2171 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2172 goto gunzip_nomem1;
2173
2174 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2175 goto gunzip_nomem2;
2176
2177 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2178 if (bp->strm->workspace == NULL)
2179 goto gunzip_nomem3;
2180
2181 return 0;
2182
2183gunzip_nomem3:
2184 kfree(bp->strm);
2185 bp->strm = NULL;
2186
2187gunzip_nomem2:
2188 vfree(bp->gunzip_buf);
2189 bp->gunzip_buf = NULL;
2190
2191gunzip_nomem1:
2192 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2193 "uncompression.\n", bp->dev->name);
2194 return -ENOMEM;
2195}
2196
2197static void
2198bnx2_gunzip_end(struct bnx2 *bp)
2199{
2200 kfree(bp->strm->workspace);
2201
2202 kfree(bp->strm);
2203 bp->strm = NULL;
2204
2205 if (bp->gunzip_buf) {
2206 vfree(bp->gunzip_buf);
2207 bp->gunzip_buf = NULL;
2208 }
2209}
2210
2211static int
2212bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2213{
2214 int n, rc;
2215
2216 /* check gzip header */
2217 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2218 return -EINVAL;
2219
2220 n = 10;
2221
2222#define FNAME 0x8
2223 if (zbuf[3] & FNAME)
2224 while ((zbuf[n++] != 0) && (n < len));
2225
2226 bp->strm->next_in = zbuf + n;
2227 bp->strm->avail_in = len - n;
2228 bp->strm->next_out = bp->gunzip_buf;
2229 bp->strm->avail_out = FW_BUF_SIZE;
2230
2231 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2232 if (rc != Z_OK)
2233 return rc;
2234
2235 rc = zlib_inflate(bp->strm, Z_FINISH);
2236
2237 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2238 *outbuf = bp->gunzip_buf;
2239
2240 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2241 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2242 bp->dev->name, bp->strm->msg);
2243
2244 zlib_inflateEnd(bp->strm);
2245
2246 if (rc == Z_STREAM_END)
2247 return 0;
2248
2249 return rc;
2250}
2251
Michael Chanb6016b72005-05-26 13:03:09 -07002252static void
2253load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2254 u32 rv2p_proc)
2255{
2256 int i;
2257 u32 val;
2258
2259
2260 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002261 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002262 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002263 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002264 rv2p_code++;
2265
2266 if (rv2p_proc == RV2P_PROC1) {
2267 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2268 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2269 }
2270 else {
2271 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2272 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2273 }
2274 }
2275
2276 /* Reset the processor, un-stall is done later. */
2277 if (rv2p_proc == RV2P_PROC1) {
2278 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2279 }
2280 else {
2281 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2282 }
2283}
2284
Michael Chanaf3ee512006-11-19 14:09:25 -08002285static int
Michael Chanb6016b72005-05-26 13:03:09 -07002286load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2287{
2288 u32 offset;
2289 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002290 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002291
2292 /* Halt the CPU. */
2293 val = REG_RD_IND(bp, cpu_reg->mode);
2294 val |= cpu_reg->mode_value_halt;
2295 REG_WR_IND(bp, cpu_reg->mode, val);
2296 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2297
2298 /* Load the Text area. */
2299 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002300 if (fw->gz_text) {
2301 u32 text_len;
2302 void *text;
2303
2304 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2305 &text_len);
2306 if (rc)
2307 return rc;
2308
2309 fw->text = text;
2310 }
2311 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002312 int j;
2313
2314 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002315 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002316 }
2317 }
2318
2319 /* Load the Data area. */
2320 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2321 if (fw->data) {
2322 int j;
2323
2324 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2325 REG_WR_IND(bp, offset, fw->data[j]);
2326 }
2327 }
2328
2329 /* Load the SBSS area. */
2330 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2331 if (fw->sbss) {
2332 int j;
2333
2334 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2335 REG_WR_IND(bp, offset, fw->sbss[j]);
2336 }
2337 }
2338
2339 /* Load the BSS area. */
2340 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2341 if (fw->bss) {
2342 int j;
2343
2344 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2345 REG_WR_IND(bp, offset, fw->bss[j]);
2346 }
2347 }
2348
2349 /* Load the Read-Only area. */
2350 offset = cpu_reg->spad_base +
2351 (fw->rodata_addr - cpu_reg->mips_view_base);
2352 if (fw->rodata) {
2353 int j;
2354
2355 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2356 REG_WR_IND(bp, offset, fw->rodata[j]);
2357 }
2358 }
2359
2360 /* Clear the pre-fetch instruction. */
2361 REG_WR_IND(bp, cpu_reg->inst, 0);
2362 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2363
2364 /* Start the CPU. */
2365 val = REG_RD_IND(bp, cpu_reg->mode);
2366 val &= ~cpu_reg->mode_value_halt;
2367 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2368 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002369
2370 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002371}
2372
Michael Chanfba9fe92006-06-12 22:21:25 -07002373static int
Michael Chanb6016b72005-05-26 13:03:09 -07002374bnx2_init_cpus(struct bnx2 *bp)
2375{
2376 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002377 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002378 int rc = 0;
2379 void *text;
2380 u32 text_len;
2381
2382 if ((rc = bnx2_gunzip_init(bp)) != 0)
2383 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002384
2385 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002386 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2387 &text_len);
2388 if (rc)
2389 goto init_cpu_err;
2390
2391 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2392
2393 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2394 &text_len);
2395 if (rc)
2396 goto init_cpu_err;
2397
2398 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002399
2400 /* Initialize the RX Processor. */
2401 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2402 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2403 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2404 cpu_reg.state = BNX2_RXP_CPU_STATE;
2405 cpu_reg.state_value_clear = 0xffffff;
2406 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2407 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2408 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2409 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2410 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2411 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2412 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002413
Michael Chand43584c2006-11-19 14:14:35 -08002414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2415 fw = &bnx2_rxp_fw_09;
2416 else
2417 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002418
Michael Chanaf3ee512006-11-19 14:09:25 -08002419 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002420 if (rc)
2421 goto init_cpu_err;
2422
Michael Chanb6016b72005-05-26 13:03:09 -07002423 /* Initialize the TX Processor. */
2424 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2425 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2426 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2427 cpu_reg.state = BNX2_TXP_CPU_STATE;
2428 cpu_reg.state_value_clear = 0xffffff;
2429 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2430 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2431 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2432 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2433 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2434 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2435 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002436
Michael Chand43584c2006-11-19 14:14:35 -08002437 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2438 fw = &bnx2_txp_fw_09;
2439 else
2440 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002441
Michael Chanaf3ee512006-11-19 14:09:25 -08002442 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002443 if (rc)
2444 goto init_cpu_err;
2445
Michael Chanb6016b72005-05-26 13:03:09 -07002446 /* Initialize the TX Patch-up Processor. */
2447 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2448 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2449 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2450 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2451 cpu_reg.state_value_clear = 0xffffff;
2452 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2453 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2454 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2455 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2456 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2457 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2458 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002459
Michael Chand43584c2006-11-19 14:14:35 -08002460 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2461 fw = &bnx2_tpat_fw_09;
2462 else
2463 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002464
Michael Chanaf3ee512006-11-19 14:09:25 -08002465 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002466 if (rc)
2467 goto init_cpu_err;
2468
Michael Chanb6016b72005-05-26 13:03:09 -07002469 /* Initialize the Completion Processor. */
2470 cpu_reg.mode = BNX2_COM_CPU_MODE;
2471 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2472 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2473 cpu_reg.state = BNX2_COM_CPU_STATE;
2474 cpu_reg.state_value_clear = 0xffffff;
2475 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2476 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2477 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2478 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2479 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2480 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2481 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002482
Michael Chand43584c2006-11-19 14:14:35 -08002483 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2484 fw = &bnx2_com_fw_09;
2485 else
2486 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002487
Michael Chanaf3ee512006-11-19 14:09:25 -08002488 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002489 if (rc)
2490 goto init_cpu_err;
2491
Michael Chand43584c2006-11-19 14:14:35 -08002492 /* Initialize the Command Processor. */
2493 cpu_reg.mode = BNX2_CP_CPU_MODE;
2494 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2495 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2496 cpu_reg.state = BNX2_CP_CPU_STATE;
2497 cpu_reg.state_value_clear = 0xffffff;
2498 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2499 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2500 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2501 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2502 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2503 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2504 cpu_reg.mips_view_base = 0x8000000;
2505
2506 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2507 fw = &bnx2_cp_fw_09;
2508
2509 load_cpu_fw(bp, &cpu_reg, fw);
2510 if (rc)
2511 goto init_cpu_err;
2512 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002513init_cpu_err:
2514 bnx2_gunzip_end(bp);
2515 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002516}
2517
2518static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002519bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002520{
2521 u16 pmcsr;
2522
2523 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2524
2525 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002526 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002527 u32 val;
2528
2529 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2530 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2531 PCI_PM_CTRL_PME_STATUS);
2532
2533 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2534 /* delay required during transition out of D3hot */
2535 msleep(20);
2536
2537 val = REG_RD(bp, BNX2_EMAC_MODE);
2538 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2539 val &= ~BNX2_EMAC_MODE_MPKT;
2540 REG_WR(bp, BNX2_EMAC_MODE, val);
2541
2542 val = REG_RD(bp, BNX2_RPM_CONFIG);
2543 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2544 REG_WR(bp, BNX2_RPM_CONFIG, val);
2545 break;
2546 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002547 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002548 int i;
2549 u32 val, wol_msg;
2550
2551 if (bp->wol) {
2552 u32 advertising;
2553 u8 autoneg;
2554
2555 autoneg = bp->autoneg;
2556 advertising = bp->advertising;
2557
2558 bp->autoneg = AUTONEG_SPEED;
2559 bp->advertising = ADVERTISED_10baseT_Half |
2560 ADVERTISED_10baseT_Full |
2561 ADVERTISED_100baseT_Half |
2562 ADVERTISED_100baseT_Full |
2563 ADVERTISED_Autoneg;
2564
2565 bnx2_setup_copper_phy(bp);
2566
2567 bp->autoneg = autoneg;
2568 bp->advertising = advertising;
2569
2570 bnx2_set_mac_addr(bp);
2571
2572 val = REG_RD(bp, BNX2_EMAC_MODE);
2573
2574 /* Enable port mode. */
2575 val &= ~BNX2_EMAC_MODE_PORT;
2576 val |= BNX2_EMAC_MODE_PORT_MII |
2577 BNX2_EMAC_MODE_MPKT_RCVD |
2578 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002579 BNX2_EMAC_MODE_MPKT;
2580
2581 REG_WR(bp, BNX2_EMAC_MODE, val);
2582
2583 /* receive all multicast */
2584 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2585 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2586 0xffffffff);
2587 }
2588 REG_WR(bp, BNX2_EMAC_RX_MODE,
2589 BNX2_EMAC_RX_MODE_SORT_MODE);
2590
2591 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2592 BNX2_RPM_SORT_USER0_MC_EN;
2593 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2594 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2595 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2596 BNX2_RPM_SORT_USER0_ENA);
2597
2598 /* Need to enable EMAC and RPM for WOL. */
2599 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2600 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2601 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2602 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2603
2604 val = REG_RD(bp, BNX2_RPM_CONFIG);
2605 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2606 REG_WR(bp, BNX2_RPM_CONFIG, val);
2607
2608 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2609 }
2610 else {
2611 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2612 }
2613
Michael Chandda1e392006-01-23 16:08:14 -08002614 if (!(bp->flags & NO_WOL_FLAG))
2615 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002616
2617 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2618 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2619 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2620
2621 if (bp->wol)
2622 pmcsr |= 3;
2623 }
2624 else {
2625 pmcsr |= 3;
2626 }
2627 if (bp->wol) {
2628 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2629 }
2630 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2631 pmcsr);
2632
2633 /* No more memory access after this point until
2634 * device is brought back to D0.
2635 */
2636 udelay(50);
2637 break;
2638 }
2639 default:
2640 return -EINVAL;
2641 }
2642 return 0;
2643}
2644
2645static int
2646bnx2_acquire_nvram_lock(struct bnx2 *bp)
2647{
2648 u32 val;
2649 int j;
2650
2651 /* Request access to the flash interface. */
2652 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2653 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2654 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2655 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2656 break;
2657
2658 udelay(5);
2659 }
2660
2661 if (j >= NVRAM_TIMEOUT_COUNT)
2662 return -EBUSY;
2663
2664 return 0;
2665}
2666
2667static int
2668bnx2_release_nvram_lock(struct bnx2 *bp)
2669{
2670 int j;
2671 u32 val;
2672
2673 /* Relinquish nvram interface. */
2674 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2675
2676 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2677 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2678 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2679 break;
2680
2681 udelay(5);
2682 }
2683
2684 if (j >= NVRAM_TIMEOUT_COUNT)
2685 return -EBUSY;
2686
2687 return 0;
2688}
2689
2690
2691static int
2692bnx2_enable_nvram_write(struct bnx2 *bp)
2693{
2694 u32 val;
2695
2696 val = REG_RD(bp, BNX2_MISC_CFG);
2697 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2698
2699 if (!bp->flash_info->buffered) {
2700 int j;
2701
2702 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2703 REG_WR(bp, BNX2_NVM_COMMAND,
2704 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2705
2706 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2707 udelay(5);
2708
2709 val = REG_RD(bp, BNX2_NVM_COMMAND);
2710 if (val & BNX2_NVM_COMMAND_DONE)
2711 break;
2712 }
2713
2714 if (j >= NVRAM_TIMEOUT_COUNT)
2715 return -EBUSY;
2716 }
2717 return 0;
2718}
2719
2720static void
2721bnx2_disable_nvram_write(struct bnx2 *bp)
2722{
2723 u32 val;
2724
2725 val = REG_RD(bp, BNX2_MISC_CFG);
2726 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2727}
2728
2729
2730static void
2731bnx2_enable_nvram_access(struct bnx2 *bp)
2732{
2733 u32 val;
2734
2735 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2736 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002737 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002738 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2739}
2740
2741static void
2742bnx2_disable_nvram_access(struct bnx2 *bp)
2743{
2744 u32 val;
2745
2746 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2747 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002748 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002749 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2750 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2751}
2752
2753static int
2754bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2755{
2756 u32 cmd;
2757 int j;
2758
2759 if (bp->flash_info->buffered)
2760 /* Buffered flash, no erase needed */
2761 return 0;
2762
2763 /* Build an erase command */
2764 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2765 BNX2_NVM_COMMAND_DOIT;
2766
2767 /* Need to clear DONE bit separately. */
2768 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2769
2770 /* Address of the NVRAM to read from. */
2771 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2772
2773 /* Issue an erase command. */
2774 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2775
2776 /* Wait for completion. */
2777 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2778 u32 val;
2779
2780 udelay(5);
2781
2782 val = REG_RD(bp, BNX2_NVM_COMMAND);
2783 if (val & BNX2_NVM_COMMAND_DONE)
2784 break;
2785 }
2786
2787 if (j >= NVRAM_TIMEOUT_COUNT)
2788 return -EBUSY;
2789
2790 return 0;
2791}
2792
2793static int
2794bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2795{
2796 u32 cmd;
2797 int j;
2798
2799 /* Build the command word. */
2800 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2801
2802 /* Calculate an offset of a buffered flash. */
2803 if (bp->flash_info->buffered) {
2804 offset = ((offset / bp->flash_info->page_size) <<
2805 bp->flash_info->page_bits) +
2806 (offset % bp->flash_info->page_size);
2807 }
2808
2809 /* Need to clear DONE bit separately. */
2810 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2811
2812 /* Address of the NVRAM to read from. */
2813 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2814
2815 /* Issue a read command. */
2816 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2817
2818 /* Wait for completion. */
2819 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2820 u32 val;
2821
2822 udelay(5);
2823
2824 val = REG_RD(bp, BNX2_NVM_COMMAND);
2825 if (val & BNX2_NVM_COMMAND_DONE) {
2826 val = REG_RD(bp, BNX2_NVM_READ);
2827
2828 val = be32_to_cpu(val);
2829 memcpy(ret_val, &val, 4);
2830 break;
2831 }
2832 }
2833 if (j >= NVRAM_TIMEOUT_COUNT)
2834 return -EBUSY;
2835
2836 return 0;
2837}
2838
2839
2840static int
2841bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2842{
2843 u32 cmd, val32;
2844 int j;
2845
2846 /* Build the command word. */
2847 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2848
2849 /* Calculate an offset of a buffered flash. */
2850 if (bp->flash_info->buffered) {
2851 offset = ((offset / bp->flash_info->page_size) <<
2852 bp->flash_info->page_bits) +
2853 (offset % bp->flash_info->page_size);
2854 }
2855
2856 /* Need to clear DONE bit separately. */
2857 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2858
2859 memcpy(&val32, val, 4);
2860 val32 = cpu_to_be32(val32);
2861
2862 /* Write the data. */
2863 REG_WR(bp, BNX2_NVM_WRITE, val32);
2864
2865 /* Address of the NVRAM to write to. */
2866 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2867
2868 /* Issue the write command. */
2869 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2870
2871 /* Wait for completion. */
2872 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2873 udelay(5);
2874
2875 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2876 break;
2877 }
2878 if (j >= NVRAM_TIMEOUT_COUNT)
2879 return -EBUSY;
2880
2881 return 0;
2882}
2883
2884static int
2885bnx2_init_nvram(struct bnx2 *bp)
2886{
2887 u32 val;
2888 int j, entry_count, rc;
2889 struct flash_spec *flash;
2890
2891 /* Determine the selected interface. */
2892 val = REG_RD(bp, BNX2_NVM_CFG1);
2893
2894 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2895
2896 rc = 0;
2897 if (val & 0x40000000) {
2898
2899 /* Flash interface has been reconfigured */
2900 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002901 j++, flash++) {
2902 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2903 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002904 bp->flash_info = flash;
2905 break;
2906 }
2907 }
2908 }
2909 else {
Michael Chan37137702005-11-04 08:49:17 -08002910 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002911 /* Not yet been reconfigured */
2912
Michael Chan37137702005-11-04 08:49:17 -08002913 if (val & (1 << 23))
2914 mask = FLASH_BACKUP_STRAP_MASK;
2915 else
2916 mask = FLASH_STRAP_MASK;
2917
Michael Chanb6016b72005-05-26 13:03:09 -07002918 for (j = 0, flash = &flash_table[0]; j < entry_count;
2919 j++, flash++) {
2920
Michael Chan37137702005-11-04 08:49:17 -08002921 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002922 bp->flash_info = flash;
2923
2924 /* Request access to the flash interface. */
2925 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2926 return rc;
2927
2928 /* Enable access to flash interface */
2929 bnx2_enable_nvram_access(bp);
2930
2931 /* Reconfigure the flash interface */
2932 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2933 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2934 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2935 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2936
2937 /* Disable access to flash interface */
2938 bnx2_disable_nvram_access(bp);
2939 bnx2_release_nvram_lock(bp);
2940
2941 break;
2942 }
2943 }
2944 } /* if (val & 0x40000000) */
2945
2946 if (j == entry_count) {
2947 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002948 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002949 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002950 }
2951
Michael Chan1122db72006-01-23 16:11:42 -08002952 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2953 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2954 if (val)
2955 bp->flash_size = val;
2956 else
2957 bp->flash_size = bp->flash_info->total_size;
2958
Michael Chanb6016b72005-05-26 13:03:09 -07002959 return rc;
2960}
2961
2962static int
2963bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2964 int buf_size)
2965{
2966 int rc = 0;
2967 u32 cmd_flags, offset32, len32, extra;
2968
2969 if (buf_size == 0)
2970 return 0;
2971
2972 /* Request access to the flash interface. */
2973 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2974 return rc;
2975
2976 /* Enable access to flash interface */
2977 bnx2_enable_nvram_access(bp);
2978
2979 len32 = buf_size;
2980 offset32 = offset;
2981 extra = 0;
2982
2983 cmd_flags = 0;
2984
2985 if (offset32 & 3) {
2986 u8 buf[4];
2987 u32 pre_len;
2988
2989 offset32 &= ~3;
2990 pre_len = 4 - (offset & 3);
2991
2992 if (pre_len >= len32) {
2993 pre_len = len32;
2994 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2995 BNX2_NVM_COMMAND_LAST;
2996 }
2997 else {
2998 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2999 }
3000
3001 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3002
3003 if (rc)
3004 return rc;
3005
3006 memcpy(ret_buf, buf + (offset & 3), pre_len);
3007
3008 offset32 += 4;
3009 ret_buf += pre_len;
3010 len32 -= pre_len;
3011 }
3012 if (len32 & 3) {
3013 extra = 4 - (len32 & 3);
3014 len32 = (len32 + 4) & ~3;
3015 }
3016
3017 if (len32 == 4) {
3018 u8 buf[4];
3019
3020 if (cmd_flags)
3021 cmd_flags = BNX2_NVM_COMMAND_LAST;
3022 else
3023 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3024 BNX2_NVM_COMMAND_LAST;
3025
3026 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3027
3028 memcpy(ret_buf, buf, 4 - extra);
3029 }
3030 else if (len32 > 0) {
3031 u8 buf[4];
3032
3033 /* Read the first word. */
3034 if (cmd_flags)
3035 cmd_flags = 0;
3036 else
3037 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3038
3039 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3040
3041 /* Advance to the next dword. */
3042 offset32 += 4;
3043 ret_buf += 4;
3044 len32 -= 4;
3045
3046 while (len32 > 4 && rc == 0) {
3047 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3048
3049 /* Advance to the next dword. */
3050 offset32 += 4;
3051 ret_buf += 4;
3052 len32 -= 4;
3053 }
3054
3055 if (rc)
3056 return rc;
3057
3058 cmd_flags = BNX2_NVM_COMMAND_LAST;
3059 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3060
3061 memcpy(ret_buf, buf, 4 - extra);
3062 }
3063
3064 /* Disable access to flash interface */
3065 bnx2_disable_nvram_access(bp);
3066
3067 bnx2_release_nvram_lock(bp);
3068
3069 return rc;
3070}
3071
3072static int
3073bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3074 int buf_size)
3075{
3076 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07003077 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003078 int rc = 0;
3079 int align_start, align_end;
3080
3081 buf = data_buf;
3082 offset32 = offset;
3083 len32 = buf_size;
3084 align_start = align_end = 0;
3085
3086 if ((align_start = (offset32 & 3))) {
3087 offset32 &= ~3;
3088 len32 += align_start;
3089 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3090 return rc;
3091 }
3092
3093 if (len32 & 3) {
3094 if ((len32 > 4) || !align_start) {
3095 align_end = 4 - (len32 & 3);
3096 len32 += align_end;
3097 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3098 end, 4))) {
3099 return rc;
3100 }
3101 }
3102 }
3103
3104 if (align_start || align_end) {
3105 buf = kmalloc(len32, GFP_KERNEL);
3106 if (buf == 0)
3107 return -ENOMEM;
3108 if (align_start) {
3109 memcpy(buf, start, 4);
3110 }
3111 if (align_end) {
3112 memcpy(buf + len32 - 4, end, 4);
3113 }
3114 memcpy(buf + align_start, data_buf, buf_size);
3115 }
3116
Michael Chanae181bc2006-05-22 16:39:20 -07003117 if (bp->flash_info->buffered == 0) {
3118 flash_buffer = kmalloc(264, GFP_KERNEL);
3119 if (flash_buffer == NULL) {
3120 rc = -ENOMEM;
3121 goto nvram_write_end;
3122 }
3123 }
3124
Michael Chanb6016b72005-05-26 13:03:09 -07003125 written = 0;
3126 while ((written < len32) && (rc == 0)) {
3127 u32 page_start, page_end, data_start, data_end;
3128 u32 addr, cmd_flags;
3129 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003130
3131 /* Find the page_start addr */
3132 page_start = offset32 + written;
3133 page_start -= (page_start % bp->flash_info->page_size);
3134 /* Find the page_end addr */
3135 page_end = page_start + bp->flash_info->page_size;
3136 /* Find the data_start addr */
3137 data_start = (written == 0) ? offset32 : page_start;
3138 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003139 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003140 (offset32 + len32) : page_end;
3141
3142 /* Request access to the flash interface. */
3143 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3144 goto nvram_write_end;
3145
3146 /* Enable access to flash interface */
3147 bnx2_enable_nvram_access(bp);
3148
3149 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3150 if (bp->flash_info->buffered == 0) {
3151 int j;
3152
3153 /* Read the whole page into the buffer
3154 * (non-buffer flash only) */
3155 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3156 if (j == (bp->flash_info->page_size - 4)) {
3157 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3158 }
3159 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003160 page_start + j,
3161 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003162 cmd_flags);
3163
3164 if (rc)
3165 goto nvram_write_end;
3166
3167 cmd_flags = 0;
3168 }
3169 }
3170
3171 /* Enable writes to flash interface (unlock write-protect) */
3172 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3173 goto nvram_write_end;
3174
3175 /* Erase the page */
3176 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3177 goto nvram_write_end;
3178
3179 /* Re-enable the write again for the actual write */
3180 bnx2_enable_nvram_write(bp);
3181
3182 /* Loop to write back the buffer data from page_start to
3183 * data_start */
3184 i = 0;
3185 if (bp->flash_info->buffered == 0) {
3186 for (addr = page_start; addr < data_start;
3187 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003188
Michael Chanb6016b72005-05-26 13:03:09 -07003189 rc = bnx2_nvram_write_dword(bp, addr,
3190 &flash_buffer[i], cmd_flags);
3191
3192 if (rc != 0)
3193 goto nvram_write_end;
3194
3195 cmd_flags = 0;
3196 }
3197 }
3198
3199 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003200 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003201 if ((addr == page_end - 4) ||
3202 ((bp->flash_info->buffered) &&
3203 (addr == data_end - 4))) {
3204
3205 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3206 }
3207 rc = bnx2_nvram_write_dword(bp, addr, buf,
3208 cmd_flags);
3209
3210 if (rc != 0)
3211 goto nvram_write_end;
3212
3213 cmd_flags = 0;
3214 buf += 4;
3215 }
3216
3217 /* Loop to write back the buffer data from data_end
3218 * to page_end */
3219 if (bp->flash_info->buffered == 0) {
3220 for (addr = data_end; addr < page_end;
3221 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003222
Michael Chanb6016b72005-05-26 13:03:09 -07003223 if (addr == page_end-4) {
3224 cmd_flags = BNX2_NVM_COMMAND_LAST;
3225 }
3226 rc = bnx2_nvram_write_dword(bp, addr,
3227 &flash_buffer[i], cmd_flags);
3228
3229 if (rc != 0)
3230 goto nvram_write_end;
3231
3232 cmd_flags = 0;
3233 }
3234 }
3235
3236 /* Disable writes to flash interface (lock write-protect) */
3237 bnx2_disable_nvram_write(bp);
3238
3239 /* Disable access to flash interface */
3240 bnx2_disable_nvram_access(bp);
3241 bnx2_release_nvram_lock(bp);
3242
3243 /* Increment written */
3244 written += data_end - data_start;
3245 }
3246
3247nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003248 if (bp->flash_info->buffered == 0)
3249 kfree(flash_buffer);
3250
Michael Chanb6016b72005-05-26 13:03:09 -07003251 if (align_start || align_end)
3252 kfree(buf);
3253 return rc;
3254}
3255
3256static int
3257bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3258{
3259 u32 val;
3260 int i, rc = 0;
3261
3262 /* Wait for the current PCI transaction to complete before
3263 * issuing a reset. */
3264 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3265 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3266 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3267 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3268 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3269 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3270 udelay(5);
3271
Michael Chanb090ae22006-01-23 16:07:10 -08003272 /* Wait for the firmware to tell us it is ok to issue a reset. */
3273 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3274
Michael Chanb6016b72005-05-26 13:03:09 -07003275 /* Deposit a driver reset signature so the firmware knows that
3276 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003277 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003278 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3279
Michael Chanb6016b72005-05-26 13:03:09 -07003280 /* Do a dummy read to force the chip to complete all current transaction
3281 * before we issue a reset. */
3282 val = REG_RD(bp, BNX2_MISC_ID);
3283
Michael Chan234754d2006-11-19 14:11:41 -08003284 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3285 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3286 REG_RD(bp, BNX2_MISC_COMMAND);
3287 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003288
Michael Chan234754d2006-11-19 14:11:41 -08003289 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3290 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003291
Michael Chan234754d2006-11-19 14:11:41 -08003292 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003293
Michael Chan234754d2006-11-19 14:11:41 -08003294 } else {
3295 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3296 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3297 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3298
3299 /* Chip reset. */
3300 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3301
3302 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3303 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3304 current->state = TASK_UNINTERRUPTIBLE;
3305 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003306 }
Michael Chanb6016b72005-05-26 13:03:09 -07003307
Michael Chan234754d2006-11-19 14:11:41 -08003308 /* Reset takes approximate 30 usec */
3309 for (i = 0; i < 10; i++) {
3310 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3311 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3312 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3313 break;
3314 udelay(10);
3315 }
3316
3317 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3318 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3319 printk(KERN_ERR PFX "Chip reset did not complete\n");
3320 return -EBUSY;
3321 }
Michael Chanb6016b72005-05-26 13:03:09 -07003322 }
3323
3324 /* Make sure byte swapping is properly configured. */
3325 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3326 if (val != 0x01020304) {
3327 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3328 return -ENODEV;
3329 }
3330
Michael Chanb6016b72005-05-26 13:03:09 -07003331 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003332 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3333 if (rc)
3334 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003335
3336 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3337 /* Adjust the voltage regular to two steps lower. The default
3338 * of this register is 0x0000000e. */
3339 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3340
3341 /* Remove bad rbuf memory from the free pool. */
3342 rc = bnx2_alloc_bad_rbuf(bp);
3343 }
3344
3345 return rc;
3346}
3347
3348static int
3349bnx2_init_chip(struct bnx2 *bp)
3350{
3351 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003352 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003353
3354 /* Make sure the interrupt is not active. */
3355 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3356
3357 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3358 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3359#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003360 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003361#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003362 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003363 DMA_READ_CHANS << 12 |
3364 DMA_WRITE_CHANS << 16;
3365
3366 val |= (0x2 << 20) | (1 << 11);
3367
Michael Chandda1e392006-01-23 16:08:14 -08003368 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003369 val |= (1 << 23);
3370
3371 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3372 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3373 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3374
3375 REG_WR(bp, BNX2_DMA_CONFIG, val);
3376
3377 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3378 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3379 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3380 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3381 }
3382
3383 if (bp->flags & PCIX_FLAG) {
3384 u16 val16;
3385
3386 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3387 &val16);
3388 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3389 val16 & ~PCI_X_CMD_ERO);
3390 }
3391
3392 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3393 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3394 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3395 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3396
3397 /* Initialize context mapping and zero out the quick contexts. The
3398 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003399 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3400 bnx2_init_5709_context(bp);
3401 else
3402 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003403
Michael Chanfba9fe92006-06-12 22:21:25 -07003404 if ((rc = bnx2_init_cpus(bp)) != 0)
3405 return rc;
3406
Michael Chanb6016b72005-05-26 13:03:09 -07003407 bnx2_init_nvram(bp);
3408
3409 bnx2_set_mac_addr(bp);
3410
3411 val = REG_RD(bp, BNX2_MQ_CONFIG);
3412 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3413 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3414 REG_WR(bp, BNX2_MQ_CONFIG, val);
3415
3416 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3417 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3418 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3419
3420 val = (BCM_PAGE_BITS - 8) << 24;
3421 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3422
3423 /* Configure page size. */
3424 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3425 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3426 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3427 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3428
3429 val = bp->mac_addr[0] +
3430 (bp->mac_addr[1] << 8) +
3431 (bp->mac_addr[2] << 16) +
3432 bp->mac_addr[3] +
3433 (bp->mac_addr[4] << 8) +
3434 (bp->mac_addr[5] << 16);
3435 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3436
3437 /* Program the MTU. Also include 4 bytes for CRC32. */
3438 val = bp->dev->mtu + ETH_HLEN + 4;
3439 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3440 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3441 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3442
3443 bp->last_status_idx = 0;
3444 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3445
3446 /* Set up how to generate a link change interrupt. */
3447 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3448
3449 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3450 (u64) bp->status_blk_mapping & 0xffffffff);
3451 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3452
3453 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3454 (u64) bp->stats_blk_mapping & 0xffffffff);
3455 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3456 (u64) bp->stats_blk_mapping >> 32);
3457
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003458 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003459 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3460
3461 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3462 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3463
3464 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3465 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3466
3467 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3468
3469 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3470
3471 REG_WR(bp, BNX2_HC_COM_TICKS,
3472 (bp->com_ticks_int << 16) | bp->com_ticks);
3473
3474 REG_WR(bp, BNX2_HC_CMD_TICKS,
3475 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3476
3477 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3478 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3479
3480 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3481 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3482 else {
3483 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3484 BNX2_HC_CONFIG_TX_TMR_MODE |
3485 BNX2_HC_CONFIG_COLLECT_STATS);
3486 }
3487
3488 /* Clear internal stats counters. */
3489 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3490
3491 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3492
Michael Chane29054f2006-01-23 16:06:06 -08003493 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3494 BNX2_PORT_FEATURE_ASF_ENABLED)
3495 bp->flags |= ASF_ENABLE_FLAG;
3496
Michael Chanb6016b72005-05-26 13:03:09 -07003497 /* Initialize the receive filter. */
3498 bnx2_set_rx_mode(bp->dev);
3499
Michael Chanb090ae22006-01-23 16:07:10 -08003500 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3501 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003502
3503 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3504 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3505
3506 udelay(20);
3507
Michael Chanbf5295b2006-03-23 01:11:56 -08003508 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3509
Michael Chanb090ae22006-01-23 16:07:10 -08003510 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003511}
3512
Michael Chan59b47d82006-11-19 14:10:45 -08003513static void
3514bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3515{
3516 u32 val, offset0, offset1, offset2, offset3;
3517
3518 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3519 offset0 = BNX2_L2CTX_TYPE_XI;
3520 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3521 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3522 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3523 } else {
3524 offset0 = BNX2_L2CTX_TYPE;
3525 offset1 = BNX2_L2CTX_CMD_TYPE;
3526 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3527 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3528 }
3529 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3530 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3531
3532 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3533 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3534
3535 val = (u64) bp->tx_desc_mapping >> 32;
3536 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3537
3538 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3539 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3540}
Michael Chanb6016b72005-05-26 13:03:09 -07003541
3542static void
3543bnx2_init_tx_ring(struct bnx2 *bp)
3544{
3545 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003546 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003547
Michael Chan2f8af122006-08-15 01:39:10 -07003548 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3549
Michael Chanb6016b72005-05-26 13:03:09 -07003550 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003551
Michael Chanb6016b72005-05-26 13:03:09 -07003552 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3553 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3554
3555 bp->tx_prod = 0;
3556 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003557 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003558 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003559
Michael Chan59b47d82006-11-19 14:10:45 -08003560 cid = TX_CID;
3561 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3562 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003563
Michael Chan59b47d82006-11-19 14:10:45 -08003564 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003565}
3566
3567static void
3568bnx2_init_rx_ring(struct bnx2 *bp)
3569{
3570 struct rx_bd *rxbd;
3571 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003572 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003573 u32 val;
3574
3575 /* 8 for CRC and VLAN */
3576 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003577 /* hw alignment */
3578 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003579
3580 ring_prod = prod = bp->rx_prod = 0;
3581 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003582 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003583 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003584
Michael Chan13daffa2006-03-20 17:49:20 -08003585 for (i = 0; i < bp->rx_max_ring; i++) {
3586 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003587
Michael Chan13daffa2006-03-20 17:49:20 -08003588 rxbd = &bp->rx_desc_ring[i][0];
3589 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3590 rxbd->rx_bd_len = bp->rx_buf_use_size;
3591 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3592 }
3593 if (i == (bp->rx_max_ring - 1))
3594 j = 0;
3595 else
3596 j = i + 1;
3597 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3598 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3599 0xffffffff;
3600 }
Michael Chanb6016b72005-05-26 13:03:09 -07003601
3602 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3603 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3604 val |= 0x02 << 8;
3605 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3606
Michael Chan13daffa2006-03-20 17:49:20 -08003607 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003608 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3609
Michael Chan13daffa2006-03-20 17:49:20 -08003610 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003611 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3612
Michael Chan236b6392006-03-20 17:49:02 -08003613 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003614 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3615 break;
3616 }
3617 prod = NEXT_RX_BD(prod);
3618 ring_prod = RX_RING_IDX(prod);
3619 }
3620 bp->rx_prod = prod;
3621
3622 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3623
3624 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3625}
3626
3627static void
Michael Chan13daffa2006-03-20 17:49:20 -08003628bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3629{
3630 u32 num_rings, max;
3631
3632 bp->rx_ring_size = size;
3633 num_rings = 1;
3634 while (size > MAX_RX_DESC_CNT) {
3635 size -= MAX_RX_DESC_CNT;
3636 num_rings++;
3637 }
3638 /* round to next power of 2 */
3639 max = MAX_RX_RINGS;
3640 while ((max & num_rings) == 0)
3641 max >>= 1;
3642
3643 if (num_rings != max)
3644 max <<= 1;
3645
3646 bp->rx_max_ring = max;
3647 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3648}
3649
3650static void
Michael Chanb6016b72005-05-26 13:03:09 -07003651bnx2_free_tx_skbs(struct bnx2 *bp)
3652{
3653 int i;
3654
3655 if (bp->tx_buf_ring == NULL)
3656 return;
3657
3658 for (i = 0; i < TX_DESC_CNT; ) {
3659 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3660 struct sk_buff *skb = tx_buf->skb;
3661 int j, last;
3662
3663 if (skb == NULL) {
3664 i++;
3665 continue;
3666 }
3667
3668 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3669 skb_headlen(skb), PCI_DMA_TODEVICE);
3670
3671 tx_buf->skb = NULL;
3672
3673 last = skb_shinfo(skb)->nr_frags;
3674 for (j = 0; j < last; j++) {
3675 tx_buf = &bp->tx_buf_ring[i + j + 1];
3676 pci_unmap_page(bp->pdev,
3677 pci_unmap_addr(tx_buf, mapping),
3678 skb_shinfo(skb)->frags[j].size,
3679 PCI_DMA_TODEVICE);
3680 }
Michael Chan745720e2006-06-29 12:37:41 -07003681 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003682 i += j + 1;
3683 }
3684
3685}
3686
3687static void
3688bnx2_free_rx_skbs(struct bnx2 *bp)
3689{
3690 int i;
3691
3692 if (bp->rx_buf_ring == NULL)
3693 return;
3694
Michael Chan13daffa2006-03-20 17:49:20 -08003695 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003696 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3697 struct sk_buff *skb = rx_buf->skb;
3698
Michael Chan05d0f1c2005-11-04 08:53:48 -08003699 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003700 continue;
3701
3702 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3703 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3704
3705 rx_buf->skb = NULL;
3706
Michael Chan745720e2006-06-29 12:37:41 -07003707 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003708 }
3709}
3710
3711static void
3712bnx2_free_skbs(struct bnx2 *bp)
3713{
3714 bnx2_free_tx_skbs(bp);
3715 bnx2_free_rx_skbs(bp);
3716}
3717
3718static int
3719bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3720{
3721 int rc;
3722
3723 rc = bnx2_reset_chip(bp, reset_code);
3724 bnx2_free_skbs(bp);
3725 if (rc)
3726 return rc;
3727
Michael Chanfba9fe92006-06-12 22:21:25 -07003728 if ((rc = bnx2_init_chip(bp)) != 0)
3729 return rc;
3730
Michael Chanb6016b72005-05-26 13:03:09 -07003731 bnx2_init_tx_ring(bp);
3732 bnx2_init_rx_ring(bp);
3733 return 0;
3734}
3735
3736static int
3737bnx2_init_nic(struct bnx2 *bp)
3738{
3739 int rc;
3740
3741 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3742 return rc;
3743
Michael Chan80be4432006-11-19 14:07:28 -08003744 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003745 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003746 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003747 bnx2_set_link(bp);
3748 return 0;
3749}
3750
3751static int
3752bnx2_test_registers(struct bnx2 *bp)
3753{
3754 int ret;
3755 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003756 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003757 u16 offset;
3758 u16 flags;
3759 u32 rw_mask;
3760 u32 ro_mask;
3761 } reg_tbl[] = {
3762 { 0x006c, 0, 0x00000000, 0x0000003f },
3763 { 0x0090, 0, 0xffffffff, 0x00000000 },
3764 { 0x0094, 0, 0x00000000, 0x00000000 },
3765
3766 { 0x0404, 0, 0x00003f00, 0x00000000 },
3767 { 0x0418, 0, 0x00000000, 0xffffffff },
3768 { 0x041c, 0, 0x00000000, 0xffffffff },
3769 { 0x0420, 0, 0x00000000, 0x80ffffff },
3770 { 0x0424, 0, 0x00000000, 0x00000000 },
3771 { 0x0428, 0, 0x00000000, 0x00000001 },
3772 { 0x0450, 0, 0x00000000, 0x0000ffff },
3773 { 0x0454, 0, 0x00000000, 0xffffffff },
3774 { 0x0458, 0, 0x00000000, 0xffffffff },
3775
3776 { 0x0808, 0, 0x00000000, 0xffffffff },
3777 { 0x0854, 0, 0x00000000, 0xffffffff },
3778 { 0x0868, 0, 0x00000000, 0x77777777 },
3779 { 0x086c, 0, 0x00000000, 0x77777777 },
3780 { 0x0870, 0, 0x00000000, 0x77777777 },
3781 { 0x0874, 0, 0x00000000, 0x77777777 },
3782
3783 { 0x0c00, 0, 0x00000000, 0x00000001 },
3784 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3785 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003786
3787 { 0x1000, 0, 0x00000000, 0x00000001 },
3788 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003789
3790 { 0x1408, 0, 0x01c00800, 0x00000000 },
3791 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3792 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003793 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003794 { 0x14b0, 0, 0x00000002, 0x00000001 },
3795 { 0x14b8, 0, 0x00000000, 0x00000000 },
3796 { 0x14c0, 0, 0x00000000, 0x00000009 },
3797 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3798 { 0x14cc, 0, 0x00000000, 0x00000001 },
3799 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003800
3801 { 0x1800, 0, 0x00000000, 0x00000001 },
3802 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003803
3804 { 0x2800, 0, 0x00000000, 0x00000001 },
3805 { 0x2804, 0, 0x00000000, 0x00003f01 },
3806 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3807 { 0x2810, 0, 0xffff0000, 0x00000000 },
3808 { 0x2814, 0, 0xffff0000, 0x00000000 },
3809 { 0x2818, 0, 0xffff0000, 0x00000000 },
3810 { 0x281c, 0, 0xffff0000, 0x00000000 },
3811 { 0x2834, 0, 0xffffffff, 0x00000000 },
3812 { 0x2840, 0, 0x00000000, 0xffffffff },
3813 { 0x2844, 0, 0x00000000, 0xffffffff },
3814 { 0x2848, 0, 0xffffffff, 0x00000000 },
3815 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3816
3817 { 0x2c00, 0, 0x00000000, 0x00000011 },
3818 { 0x2c04, 0, 0x00000000, 0x00030007 },
3819
Michael Chanb6016b72005-05-26 13:03:09 -07003820 { 0x3c00, 0, 0x00000000, 0x00000001 },
3821 { 0x3c04, 0, 0x00000000, 0x00070000 },
3822 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3823 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3824 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3825 { 0x3c14, 0, 0x00000000, 0xffffffff },
3826 { 0x3c18, 0, 0x00000000, 0xffffffff },
3827 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3828 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003829
3830 { 0x5004, 0, 0x00000000, 0x0000007f },
3831 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3832 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3833
Michael Chanb6016b72005-05-26 13:03:09 -07003834 { 0x5c00, 0, 0x00000000, 0x00000001 },
3835 { 0x5c04, 0, 0x00000000, 0x0003000f },
3836 { 0x5c08, 0, 0x00000003, 0x00000000 },
3837 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3838 { 0x5c10, 0, 0x00000000, 0xffffffff },
3839 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3840 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3841 { 0x5c88, 0, 0x00000000, 0x00077373 },
3842 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3843
3844 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3845 { 0x680c, 0, 0xffffffff, 0x00000000 },
3846 { 0x6810, 0, 0xffffffff, 0x00000000 },
3847 { 0x6814, 0, 0xffffffff, 0x00000000 },
3848 { 0x6818, 0, 0xffffffff, 0x00000000 },
3849 { 0x681c, 0, 0xffffffff, 0x00000000 },
3850 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3851 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3852 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3853 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3854 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3855 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3856 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3857 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3858 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3859 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3860 { 0x684c, 0, 0xffffffff, 0x00000000 },
3861 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3862 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3863 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3864 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3865 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3866 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3867
3868 { 0xffff, 0, 0x00000000, 0x00000000 },
3869 };
3870
3871 ret = 0;
3872 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3873 u32 offset, rw_mask, ro_mask, save_val, val;
3874
3875 offset = (u32) reg_tbl[i].offset;
3876 rw_mask = reg_tbl[i].rw_mask;
3877 ro_mask = reg_tbl[i].ro_mask;
3878
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003879 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003880
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003881 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003882
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003883 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003884 if ((val & rw_mask) != 0) {
3885 goto reg_test_err;
3886 }
3887
3888 if ((val & ro_mask) != (save_val & ro_mask)) {
3889 goto reg_test_err;
3890 }
3891
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003892 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003893
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003894 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003895 if ((val & rw_mask) != rw_mask) {
3896 goto reg_test_err;
3897 }
3898
3899 if ((val & ro_mask) != (save_val & ro_mask)) {
3900 goto reg_test_err;
3901 }
3902
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003903 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003904 continue;
3905
3906reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003907 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003908 ret = -ENODEV;
3909 break;
3910 }
3911 return ret;
3912}
3913
3914static int
3915bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3916{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003917 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003918 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3919 int i;
3920
3921 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3922 u32 offset;
3923
3924 for (offset = 0; offset < size; offset += 4) {
3925
3926 REG_WR_IND(bp, start + offset, test_pattern[i]);
3927
3928 if (REG_RD_IND(bp, start + offset) !=
3929 test_pattern[i]) {
3930 return -ENODEV;
3931 }
3932 }
3933 }
3934 return 0;
3935}
3936
3937static int
3938bnx2_test_memory(struct bnx2 *bp)
3939{
3940 int ret = 0;
3941 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003942 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003943 u32 offset;
3944 u32 len;
3945 } mem_tbl[] = {
3946 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003947 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003948 { 0xe0000, 0x4000 },
3949 { 0x120000, 0x4000 },
3950 { 0x1a0000, 0x4000 },
3951 { 0x160000, 0x4000 },
3952 { 0xffffffff, 0 },
3953 };
3954
3955 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3956 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3957 mem_tbl[i].len)) != 0) {
3958 return ret;
3959 }
3960 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003961
Michael Chanb6016b72005-05-26 13:03:09 -07003962 return ret;
3963}
3964
Michael Chanbc5a0692006-01-23 16:13:22 -08003965#define BNX2_MAC_LOOPBACK 0
3966#define BNX2_PHY_LOOPBACK 1
3967
Michael Chanb6016b72005-05-26 13:03:09 -07003968static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003969bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003970{
3971 unsigned int pkt_size, num_pkts, i;
3972 struct sk_buff *skb, *rx_skb;
3973 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003974 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003975 dma_addr_t map;
3976 struct tx_bd *txbd;
3977 struct sw_bd *rx_buf;
3978 struct l2_fhdr *rx_hdr;
3979 int ret = -ENODEV;
3980
Michael Chanbc5a0692006-01-23 16:13:22 -08003981 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3982 bp->loopback = MAC_LOOPBACK;
3983 bnx2_set_mac_loopback(bp);
3984 }
3985 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003986 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003987 bnx2_set_phy_loopback(bp);
3988 }
3989 else
3990 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003991
3992 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07003993 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003994 if (!skb)
3995 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003996 packet = skb_put(skb, pkt_size);
3997 memcpy(packet, bp->mac_addr, 6);
3998 memset(packet + 6, 0x0, 8);
3999 for (i = 14; i < pkt_size; i++)
4000 packet[i] = (unsigned char) (i & 0xff);
4001
4002 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4003 PCI_DMA_TODEVICE);
4004
Michael Chanbf5295b2006-03-23 01:11:56 -08004005 REG_WR(bp, BNX2_HC_COMMAND,
4006 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4007
Michael Chanb6016b72005-05-26 13:03:09 -07004008 REG_RD(bp, BNX2_HC_COMMAND);
4009
4010 udelay(5);
4011 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4012
Michael Chanb6016b72005-05-26 13:03:09 -07004013 num_pkts = 0;
4014
Michael Chanbc5a0692006-01-23 16:13:22 -08004015 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004016
4017 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4018 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4019 txbd->tx_bd_mss_nbytes = pkt_size;
4020 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4021
4022 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004023 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4024 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004025
Michael Chan234754d2006-11-19 14:11:41 -08004026 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4027 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004028
4029 udelay(100);
4030
Michael Chanbf5295b2006-03-23 01:11:56 -08004031 REG_WR(bp, BNX2_HC_COMMAND,
4032 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4033
Michael Chanb6016b72005-05-26 13:03:09 -07004034 REG_RD(bp, BNX2_HC_COMMAND);
4035
4036 udelay(5);
4037
4038 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004039 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004040
Michael Chanbc5a0692006-01-23 16:13:22 -08004041 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004042 goto loopback_test_done;
4043 }
4044
4045 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4046 if (rx_idx != rx_start_idx + num_pkts) {
4047 goto loopback_test_done;
4048 }
4049
4050 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4051 rx_skb = rx_buf->skb;
4052
4053 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4054 skb_reserve(rx_skb, bp->rx_offset);
4055
4056 pci_dma_sync_single_for_cpu(bp->pdev,
4057 pci_unmap_addr(rx_buf, mapping),
4058 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4059
Michael Chanade2bfe2006-01-23 16:09:51 -08004060 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004061 (L2_FHDR_ERRORS_BAD_CRC |
4062 L2_FHDR_ERRORS_PHY_DECODE |
4063 L2_FHDR_ERRORS_ALIGNMENT |
4064 L2_FHDR_ERRORS_TOO_SHORT |
4065 L2_FHDR_ERRORS_GIANT_FRAME)) {
4066
4067 goto loopback_test_done;
4068 }
4069
4070 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4071 goto loopback_test_done;
4072 }
4073
4074 for (i = 14; i < pkt_size; i++) {
4075 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4076 goto loopback_test_done;
4077 }
4078 }
4079
4080 ret = 0;
4081
4082loopback_test_done:
4083 bp->loopback = 0;
4084 return ret;
4085}
4086
Michael Chanbc5a0692006-01-23 16:13:22 -08004087#define BNX2_MAC_LOOPBACK_FAILED 1
4088#define BNX2_PHY_LOOPBACK_FAILED 2
4089#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4090 BNX2_PHY_LOOPBACK_FAILED)
4091
4092static int
4093bnx2_test_loopback(struct bnx2 *bp)
4094{
4095 int rc = 0;
4096
4097 if (!netif_running(bp->dev))
4098 return BNX2_LOOPBACK_FAILED;
4099
4100 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4101 spin_lock_bh(&bp->phy_lock);
4102 bnx2_init_phy(bp);
4103 spin_unlock_bh(&bp->phy_lock);
4104 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4105 rc |= BNX2_MAC_LOOPBACK_FAILED;
4106 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4107 rc |= BNX2_PHY_LOOPBACK_FAILED;
4108 return rc;
4109}
4110
Michael Chanb6016b72005-05-26 13:03:09 -07004111#define NVRAM_SIZE 0x200
4112#define CRC32_RESIDUAL 0xdebb20e3
4113
4114static int
4115bnx2_test_nvram(struct bnx2 *bp)
4116{
4117 u32 buf[NVRAM_SIZE / 4];
4118 u8 *data = (u8 *) buf;
4119 int rc = 0;
4120 u32 magic, csum;
4121
4122 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4123 goto test_nvram_done;
4124
4125 magic = be32_to_cpu(buf[0]);
4126 if (magic != 0x669955aa) {
4127 rc = -ENODEV;
4128 goto test_nvram_done;
4129 }
4130
4131 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4132 goto test_nvram_done;
4133
4134 csum = ether_crc_le(0x100, data);
4135 if (csum != CRC32_RESIDUAL) {
4136 rc = -ENODEV;
4137 goto test_nvram_done;
4138 }
4139
4140 csum = ether_crc_le(0x100, data + 0x100);
4141 if (csum != CRC32_RESIDUAL) {
4142 rc = -ENODEV;
4143 }
4144
4145test_nvram_done:
4146 return rc;
4147}
4148
4149static int
4150bnx2_test_link(struct bnx2 *bp)
4151{
4152 u32 bmsr;
4153
Michael Chanc770a652005-08-25 15:38:39 -07004154 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004155 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4156 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004157 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004158
Michael Chanb6016b72005-05-26 13:03:09 -07004159 if (bmsr & BMSR_LSTATUS) {
4160 return 0;
4161 }
4162 return -ENODEV;
4163}
4164
4165static int
4166bnx2_test_intr(struct bnx2 *bp)
4167{
4168 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004169 u16 status_idx;
4170
4171 if (!netif_running(bp->dev))
4172 return -ENODEV;
4173
4174 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4175
4176 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004177 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004178 REG_RD(bp, BNX2_HC_COMMAND);
4179
4180 for (i = 0; i < 10; i++) {
4181 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4182 status_idx) {
4183
4184 break;
4185 }
4186
4187 msleep_interruptible(10);
4188 }
4189 if (i < 10)
4190 return 0;
4191
4192 return -ENODEV;
4193}
4194
4195static void
Michael Chan48b01e22006-11-19 14:08:00 -08004196bnx2_5706_serdes_timer(struct bnx2 *bp)
4197{
4198 spin_lock(&bp->phy_lock);
4199 if (bp->serdes_an_pending)
4200 bp->serdes_an_pending--;
4201 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4202 u32 bmcr;
4203
4204 bp->current_interval = bp->timer_interval;
4205
4206 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4207
4208 if (bmcr & BMCR_ANENABLE) {
4209 u32 phy1, phy2;
4210
4211 bnx2_write_phy(bp, 0x1c, 0x7c00);
4212 bnx2_read_phy(bp, 0x1c, &phy1);
4213
4214 bnx2_write_phy(bp, 0x17, 0x0f01);
4215 bnx2_read_phy(bp, 0x15, &phy2);
4216 bnx2_write_phy(bp, 0x17, 0x0f01);
4217 bnx2_read_phy(bp, 0x15, &phy2);
4218
4219 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4220 !(phy2 & 0x20)) { /* no CONFIG */
4221
4222 bmcr &= ~BMCR_ANENABLE;
4223 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4224 bnx2_write_phy(bp, MII_BMCR, bmcr);
4225 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4226 }
4227 }
4228 }
4229 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4230 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4231 u32 phy2;
4232
4233 bnx2_write_phy(bp, 0x17, 0x0f01);
4234 bnx2_read_phy(bp, 0x15, &phy2);
4235 if (phy2 & 0x20) {
4236 u32 bmcr;
4237
4238 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4239 bmcr |= BMCR_ANENABLE;
4240 bnx2_write_phy(bp, MII_BMCR, bmcr);
4241
4242 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4243 }
4244 } else
4245 bp->current_interval = bp->timer_interval;
4246
4247 spin_unlock(&bp->phy_lock);
4248}
4249
4250static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004251bnx2_5708_serdes_timer(struct bnx2 *bp)
4252{
4253 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4254 bp->serdes_an_pending = 0;
4255 return;
4256 }
4257
4258 spin_lock(&bp->phy_lock);
4259 if (bp->serdes_an_pending)
4260 bp->serdes_an_pending--;
4261 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4262 u32 bmcr;
4263
4264 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4265
4266 if (bmcr & BMCR_ANENABLE) {
4267 bmcr &= ~BMCR_ANENABLE;
4268 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4269 bnx2_write_phy(bp, MII_BMCR, bmcr);
4270 bp->current_interval = SERDES_FORCED_TIMEOUT;
4271 } else {
4272 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4273 bmcr |= BMCR_ANENABLE;
4274 bnx2_write_phy(bp, MII_BMCR, bmcr);
4275 bp->serdes_an_pending = 2;
4276 bp->current_interval = bp->timer_interval;
4277 }
4278
4279 } else
4280 bp->current_interval = bp->timer_interval;
4281
4282 spin_unlock(&bp->phy_lock);
4283}
4284
4285static void
Michael Chanb6016b72005-05-26 13:03:09 -07004286bnx2_timer(unsigned long data)
4287{
4288 struct bnx2 *bp = (struct bnx2 *) data;
4289 u32 msg;
4290
Michael Chancd339a02005-08-25 15:35:24 -07004291 if (!netif_running(bp->dev))
4292 return;
4293
Michael Chanb6016b72005-05-26 13:03:09 -07004294 if (atomic_read(&bp->intr_sem) != 0)
4295 goto bnx2_restart_timer;
4296
4297 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004298 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004299
Michael Chancea94db2006-06-12 22:16:13 -07004300 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4301
Michael Chanf8dd0642006-11-19 14:08:29 -08004302 if (bp->phy_flags & PHY_SERDES_FLAG) {
4303 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4304 bnx2_5706_serdes_timer(bp);
4305 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4306 bnx2_5708_serdes_timer(bp);
4307 }
Michael Chanb6016b72005-05-26 13:03:09 -07004308
4309bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004310 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004311}
4312
4313/* Called with rtnl_lock */
4314static int
4315bnx2_open(struct net_device *dev)
4316{
Michael Chan972ec0d2006-01-23 16:12:43 -08004317 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004318 int rc;
4319
Pavel Machek829ca9a2005-09-03 15:56:56 -07004320 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004321 bnx2_disable_int(bp);
4322
4323 rc = bnx2_alloc_mem(bp);
4324 if (rc)
4325 return rc;
4326
4327 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4328 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4329 !disable_msi) {
4330
4331 if (pci_enable_msi(bp->pdev) == 0) {
4332 bp->flags |= USING_MSI_FLAG;
4333 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4334 dev);
4335 }
4336 else {
4337 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004338 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004339 }
4340 }
4341 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004342 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004343 dev->name, dev);
4344 }
4345 if (rc) {
4346 bnx2_free_mem(bp);
4347 return rc;
4348 }
4349
4350 rc = bnx2_init_nic(bp);
4351
4352 if (rc) {
4353 free_irq(bp->pdev->irq, dev);
4354 if (bp->flags & USING_MSI_FLAG) {
4355 pci_disable_msi(bp->pdev);
4356 bp->flags &= ~USING_MSI_FLAG;
4357 }
4358 bnx2_free_skbs(bp);
4359 bnx2_free_mem(bp);
4360 return rc;
4361 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004362
Michael Chancd339a02005-08-25 15:35:24 -07004363 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004364
4365 atomic_set(&bp->intr_sem, 0);
4366
4367 bnx2_enable_int(bp);
4368
4369 if (bp->flags & USING_MSI_FLAG) {
4370 /* Test MSI to make sure it is working
4371 * If MSI test fails, go back to INTx mode
4372 */
4373 if (bnx2_test_intr(bp) != 0) {
4374 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4375 " using MSI, switching to INTx mode. Please"
4376 " report this failure to the PCI maintainer"
4377 " and include system chipset information.\n",
4378 bp->dev->name);
4379
4380 bnx2_disable_int(bp);
4381 free_irq(bp->pdev->irq, dev);
4382 pci_disable_msi(bp->pdev);
4383 bp->flags &= ~USING_MSI_FLAG;
4384
4385 rc = bnx2_init_nic(bp);
4386
4387 if (!rc) {
4388 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004389 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004390 }
4391 if (rc) {
4392 bnx2_free_skbs(bp);
4393 bnx2_free_mem(bp);
4394 del_timer_sync(&bp->timer);
4395 return rc;
4396 }
4397 bnx2_enable_int(bp);
4398 }
4399 }
4400 if (bp->flags & USING_MSI_FLAG) {
4401 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4402 }
4403
4404 netif_start_queue(dev);
4405
4406 return 0;
4407}
4408
4409static void
4410bnx2_reset_task(void *data)
4411{
4412 struct bnx2 *bp = data;
4413
Michael Chanafdc08b2005-08-25 15:34:29 -07004414 if (!netif_running(bp->dev))
4415 return;
4416
4417 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004418 bnx2_netif_stop(bp);
4419
4420 bnx2_init_nic(bp);
4421
4422 atomic_set(&bp->intr_sem, 1);
4423 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004424 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004425}
4426
4427static void
4428bnx2_tx_timeout(struct net_device *dev)
4429{
Michael Chan972ec0d2006-01-23 16:12:43 -08004430 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004431
4432 /* This allows the netif to be shutdown gracefully before resetting */
4433 schedule_work(&bp->reset_task);
4434}
4435
4436#ifdef BCM_VLAN
4437/* Called with rtnl_lock */
4438static void
4439bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4440{
Michael Chan972ec0d2006-01-23 16:12:43 -08004441 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004442
4443 bnx2_netif_stop(bp);
4444
4445 bp->vlgrp = vlgrp;
4446 bnx2_set_rx_mode(dev);
4447
4448 bnx2_netif_start(bp);
4449}
4450
4451/* Called with rtnl_lock */
4452static void
4453bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4454{
Michael Chan972ec0d2006-01-23 16:12:43 -08004455 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004456
4457 bnx2_netif_stop(bp);
4458
4459 if (bp->vlgrp)
4460 bp->vlgrp->vlan_devices[vid] = NULL;
4461 bnx2_set_rx_mode(dev);
4462
4463 bnx2_netif_start(bp);
4464}
4465#endif
4466
Herbert Xu932ff272006-06-09 12:20:56 -07004467/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004468 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4469 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004470 */
4471static int
4472bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4473{
Michael Chan972ec0d2006-01-23 16:12:43 -08004474 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004475 dma_addr_t mapping;
4476 struct tx_bd *txbd;
4477 struct sw_bd *tx_buf;
4478 u32 len, vlan_tag_flags, last_frag, mss;
4479 u16 prod, ring_prod;
4480 int i;
4481
Michael Chane89bbf12005-08-25 15:36:58 -07004482 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004483 netif_stop_queue(dev);
4484 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4485 dev->name);
4486
4487 return NETDEV_TX_BUSY;
4488 }
4489 len = skb_headlen(skb);
4490 prod = bp->tx_prod;
4491 ring_prod = TX_RING_IDX(prod);
4492
4493 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004494 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004495 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4496 }
4497
4498 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4499 vlan_tag_flags |=
4500 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4501 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004502#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004503 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004504 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4505 u32 tcp_opt_len, ip_tcp_len;
4506
4507 if (skb_header_cloned(skb) &&
4508 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4509 dev_kfree_skb(skb);
4510 return NETDEV_TX_OK;
4511 }
4512
4513 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4514 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4515
4516 tcp_opt_len = 0;
4517 if (skb->h.th->doff > 5) {
4518 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4519 }
4520 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4521
4522 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004523 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004524 skb->h.th->check =
4525 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4526 skb->nh.iph->daddr,
4527 0, IPPROTO_TCP, 0);
4528
4529 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4530 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4531 (tcp_opt_len >> 2)) << 8;
4532 }
4533 }
4534 else
4535#endif
4536 {
4537 mss = 0;
4538 }
4539
4540 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004541
Michael Chanb6016b72005-05-26 13:03:09 -07004542 tx_buf = &bp->tx_buf_ring[ring_prod];
4543 tx_buf->skb = skb;
4544 pci_unmap_addr_set(tx_buf, mapping, mapping);
4545
4546 txbd = &bp->tx_desc_ring[ring_prod];
4547
4548 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4549 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4550 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4551 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4552
4553 last_frag = skb_shinfo(skb)->nr_frags;
4554
4555 for (i = 0; i < last_frag; i++) {
4556 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4557
4558 prod = NEXT_TX_BD(prod);
4559 ring_prod = TX_RING_IDX(prod);
4560 txbd = &bp->tx_desc_ring[ring_prod];
4561
4562 len = frag->size;
4563 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4564 len, PCI_DMA_TODEVICE);
4565 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4566 mapping, mapping);
4567
4568 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4569 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4570 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4571 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4572
4573 }
4574 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4575
4576 prod = NEXT_TX_BD(prod);
4577 bp->tx_prod_bseq += skb->len;
4578
Michael Chan234754d2006-11-19 14:11:41 -08004579 REG_WR16(bp, bp->tx_bidx_addr, prod);
4580 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004581
4582 mmiowb();
4583
4584 bp->tx_prod = prod;
4585 dev->trans_start = jiffies;
4586
Michael Chane89bbf12005-08-25 15:36:58 -07004587 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004588 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004589 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004590 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004591 }
4592
4593 return NETDEV_TX_OK;
4594}
4595
4596/* Called with rtnl_lock */
4597static int
4598bnx2_close(struct net_device *dev)
4599{
Michael Chan972ec0d2006-01-23 16:12:43 -08004600 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004601 u32 reset_code;
4602
Michael Chanafdc08b2005-08-25 15:34:29 -07004603 /* Calling flush_scheduled_work() may deadlock because
4604 * linkwatch_event() may be on the workqueue and it will try to get
4605 * the rtnl_lock which we are holding.
4606 */
4607 while (bp->in_reset_task)
4608 msleep(1);
4609
Michael Chanb6016b72005-05-26 13:03:09 -07004610 bnx2_netif_stop(bp);
4611 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004612 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004613 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004614 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004615 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4616 else
4617 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4618 bnx2_reset_chip(bp, reset_code);
4619 free_irq(bp->pdev->irq, dev);
4620 if (bp->flags & USING_MSI_FLAG) {
4621 pci_disable_msi(bp->pdev);
4622 bp->flags &= ~USING_MSI_FLAG;
4623 }
4624 bnx2_free_skbs(bp);
4625 bnx2_free_mem(bp);
4626 bp->link_up = 0;
4627 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004628 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004629 return 0;
4630}
4631
4632#define GET_NET_STATS64(ctr) \
4633 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4634 (unsigned long) (ctr##_lo)
4635
4636#define GET_NET_STATS32(ctr) \
4637 (ctr##_lo)
4638
4639#if (BITS_PER_LONG == 64)
4640#define GET_NET_STATS GET_NET_STATS64
4641#else
4642#define GET_NET_STATS GET_NET_STATS32
4643#endif
4644
4645static struct net_device_stats *
4646bnx2_get_stats(struct net_device *dev)
4647{
Michael Chan972ec0d2006-01-23 16:12:43 -08004648 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004649 struct statistics_block *stats_blk = bp->stats_blk;
4650 struct net_device_stats *net_stats = &bp->net_stats;
4651
4652 if (bp->stats_blk == NULL) {
4653 return net_stats;
4654 }
4655 net_stats->rx_packets =
4656 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4657 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4658 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4659
4660 net_stats->tx_packets =
4661 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4662 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4663 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4664
4665 net_stats->rx_bytes =
4666 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4667
4668 net_stats->tx_bytes =
4669 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4670
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004671 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004672 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4673
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004674 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004675 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4676
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004677 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004678 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4679 stats_blk->stat_EtherStatsOverrsizePkts);
4680
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004681 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004682 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4683
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004684 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004685 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4686
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004687 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004688 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4689
4690 net_stats->rx_errors = net_stats->rx_length_errors +
4691 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4692 net_stats->rx_crc_errors;
4693
4694 net_stats->tx_aborted_errors =
4695 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4696 stats_blk->stat_Dot3StatsLateCollisions);
4697
Michael Chan5b0c76a2005-11-04 08:45:49 -08004698 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4699 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004700 net_stats->tx_carrier_errors = 0;
4701 else {
4702 net_stats->tx_carrier_errors =
4703 (unsigned long)
4704 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4705 }
4706
4707 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004708 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004709 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4710 +
4711 net_stats->tx_aborted_errors +
4712 net_stats->tx_carrier_errors;
4713
Michael Chancea94db2006-06-12 22:16:13 -07004714 net_stats->rx_missed_errors =
4715 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4716 stats_blk->stat_FwRxDrop);
4717
Michael Chanb6016b72005-05-26 13:03:09 -07004718 return net_stats;
4719}
4720
4721/* All ethtool functions called with rtnl_lock */
4722
4723static int
4724bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4725{
Michael Chan972ec0d2006-01-23 16:12:43 -08004726 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004727
4728 cmd->supported = SUPPORTED_Autoneg;
4729 if (bp->phy_flags & PHY_SERDES_FLAG) {
4730 cmd->supported |= SUPPORTED_1000baseT_Full |
4731 SUPPORTED_FIBRE;
4732
4733 cmd->port = PORT_FIBRE;
4734 }
4735 else {
4736 cmd->supported |= SUPPORTED_10baseT_Half |
4737 SUPPORTED_10baseT_Full |
4738 SUPPORTED_100baseT_Half |
4739 SUPPORTED_100baseT_Full |
4740 SUPPORTED_1000baseT_Full |
4741 SUPPORTED_TP;
4742
4743 cmd->port = PORT_TP;
4744 }
4745
4746 cmd->advertising = bp->advertising;
4747
4748 if (bp->autoneg & AUTONEG_SPEED) {
4749 cmd->autoneg = AUTONEG_ENABLE;
4750 }
4751 else {
4752 cmd->autoneg = AUTONEG_DISABLE;
4753 }
4754
4755 if (netif_carrier_ok(dev)) {
4756 cmd->speed = bp->line_speed;
4757 cmd->duplex = bp->duplex;
4758 }
4759 else {
4760 cmd->speed = -1;
4761 cmd->duplex = -1;
4762 }
4763
4764 cmd->transceiver = XCVR_INTERNAL;
4765 cmd->phy_address = bp->phy_addr;
4766
4767 return 0;
4768}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004769
Michael Chanb6016b72005-05-26 13:03:09 -07004770static int
4771bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4772{
Michael Chan972ec0d2006-01-23 16:12:43 -08004773 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004774 u8 autoneg = bp->autoneg;
4775 u8 req_duplex = bp->req_duplex;
4776 u16 req_line_speed = bp->req_line_speed;
4777 u32 advertising = bp->advertising;
4778
4779 if (cmd->autoneg == AUTONEG_ENABLE) {
4780 autoneg |= AUTONEG_SPEED;
4781
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004782 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004783
4784 /* allow advertising 1 speed */
4785 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4786 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4787 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4788 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4789
4790 if (bp->phy_flags & PHY_SERDES_FLAG)
4791 return -EINVAL;
4792
4793 advertising = cmd->advertising;
4794
4795 }
4796 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4797 advertising = cmd->advertising;
4798 }
4799 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4800 return -EINVAL;
4801 }
4802 else {
4803 if (bp->phy_flags & PHY_SERDES_FLAG) {
4804 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4805 }
4806 else {
4807 advertising = ETHTOOL_ALL_COPPER_SPEED;
4808 }
4809 }
4810 advertising |= ADVERTISED_Autoneg;
4811 }
4812 else {
4813 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004814 if ((cmd->speed != SPEED_1000 &&
4815 cmd->speed != SPEED_2500) ||
4816 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004817 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004818
4819 if (cmd->speed == SPEED_2500 &&
4820 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4821 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004822 }
4823 else if (cmd->speed == SPEED_1000) {
4824 return -EINVAL;
4825 }
4826 autoneg &= ~AUTONEG_SPEED;
4827 req_line_speed = cmd->speed;
4828 req_duplex = cmd->duplex;
4829 advertising = 0;
4830 }
4831
4832 bp->autoneg = autoneg;
4833 bp->advertising = advertising;
4834 bp->req_line_speed = req_line_speed;
4835 bp->req_duplex = req_duplex;
4836
Michael Chanc770a652005-08-25 15:38:39 -07004837 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004838
4839 bnx2_setup_phy(bp);
4840
Michael Chanc770a652005-08-25 15:38:39 -07004841 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004842
4843 return 0;
4844}
4845
4846static void
4847bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4848{
Michael Chan972ec0d2006-01-23 16:12:43 -08004849 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004850
4851 strcpy(info->driver, DRV_MODULE_NAME);
4852 strcpy(info->version, DRV_MODULE_VERSION);
4853 strcpy(info->bus_info, pci_name(bp->pdev));
4854 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4855 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4856 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004857 info->fw_version[1] = info->fw_version[3] = '.';
4858 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004859}
4860
Michael Chan244ac4f2006-03-20 17:48:46 -08004861#define BNX2_REGDUMP_LEN (32 * 1024)
4862
4863static int
4864bnx2_get_regs_len(struct net_device *dev)
4865{
4866 return BNX2_REGDUMP_LEN;
4867}
4868
4869static void
4870bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4871{
4872 u32 *p = _p, i, offset;
4873 u8 *orig_p = _p;
4874 struct bnx2 *bp = netdev_priv(dev);
4875 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4876 0x0800, 0x0880, 0x0c00, 0x0c10,
4877 0x0c30, 0x0d08, 0x1000, 0x101c,
4878 0x1040, 0x1048, 0x1080, 0x10a4,
4879 0x1400, 0x1490, 0x1498, 0x14f0,
4880 0x1500, 0x155c, 0x1580, 0x15dc,
4881 0x1600, 0x1658, 0x1680, 0x16d8,
4882 0x1800, 0x1820, 0x1840, 0x1854,
4883 0x1880, 0x1894, 0x1900, 0x1984,
4884 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4885 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4886 0x2000, 0x2030, 0x23c0, 0x2400,
4887 0x2800, 0x2820, 0x2830, 0x2850,
4888 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4889 0x3c00, 0x3c94, 0x4000, 0x4010,
4890 0x4080, 0x4090, 0x43c0, 0x4458,
4891 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4892 0x4fc0, 0x5010, 0x53c0, 0x5444,
4893 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4894 0x5fc0, 0x6000, 0x6400, 0x6428,
4895 0x6800, 0x6848, 0x684c, 0x6860,
4896 0x6888, 0x6910, 0x8000 };
4897
4898 regs->version = 0;
4899
4900 memset(p, 0, BNX2_REGDUMP_LEN);
4901
4902 if (!netif_running(bp->dev))
4903 return;
4904
4905 i = 0;
4906 offset = reg_boundaries[0];
4907 p += offset;
4908 while (offset < BNX2_REGDUMP_LEN) {
4909 *p++ = REG_RD(bp, offset);
4910 offset += 4;
4911 if (offset == reg_boundaries[i + 1]) {
4912 offset = reg_boundaries[i + 2];
4913 p = (u32 *) (orig_p + offset);
4914 i += 2;
4915 }
4916 }
4917}
4918
Michael Chanb6016b72005-05-26 13:03:09 -07004919static void
4920bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4921{
Michael Chan972ec0d2006-01-23 16:12:43 -08004922 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004923
4924 if (bp->flags & NO_WOL_FLAG) {
4925 wol->supported = 0;
4926 wol->wolopts = 0;
4927 }
4928 else {
4929 wol->supported = WAKE_MAGIC;
4930 if (bp->wol)
4931 wol->wolopts = WAKE_MAGIC;
4932 else
4933 wol->wolopts = 0;
4934 }
4935 memset(&wol->sopass, 0, sizeof(wol->sopass));
4936}
4937
4938static int
4939bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4940{
Michael Chan972ec0d2006-01-23 16:12:43 -08004941 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004942
4943 if (wol->wolopts & ~WAKE_MAGIC)
4944 return -EINVAL;
4945
4946 if (wol->wolopts & WAKE_MAGIC) {
4947 if (bp->flags & NO_WOL_FLAG)
4948 return -EINVAL;
4949
4950 bp->wol = 1;
4951 }
4952 else {
4953 bp->wol = 0;
4954 }
4955 return 0;
4956}
4957
4958static int
4959bnx2_nway_reset(struct net_device *dev)
4960{
Michael Chan972ec0d2006-01-23 16:12:43 -08004961 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004962 u32 bmcr;
4963
4964 if (!(bp->autoneg & AUTONEG_SPEED)) {
4965 return -EINVAL;
4966 }
4967
Michael Chanc770a652005-08-25 15:38:39 -07004968 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004969
4970 /* Force a link down visible on the other side */
4971 if (bp->phy_flags & PHY_SERDES_FLAG) {
4972 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004973 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004974
4975 msleep(20);
4976
Michael Chanc770a652005-08-25 15:38:39 -07004977 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004978
4979 bp->current_interval = SERDES_AN_TIMEOUT;
4980 bp->serdes_an_pending = 1;
4981 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004982 }
4983
4984 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4985 bmcr &= ~BMCR_LOOPBACK;
4986 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4987
Michael Chanc770a652005-08-25 15:38:39 -07004988 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004989
4990 return 0;
4991}
4992
4993static int
4994bnx2_get_eeprom_len(struct net_device *dev)
4995{
Michael Chan972ec0d2006-01-23 16:12:43 -08004996 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004997
Michael Chan1122db72006-01-23 16:11:42 -08004998 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004999 return 0;
5000
Michael Chan1122db72006-01-23 16:11:42 -08005001 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005002}
5003
5004static int
5005bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5006 u8 *eebuf)
5007{
Michael Chan972ec0d2006-01-23 16:12:43 -08005008 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005009 int rc;
5010
John W. Linville1064e942005-11-10 12:58:24 -08005011 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005012
5013 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5014
5015 return rc;
5016}
5017
5018static int
5019bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5020 u8 *eebuf)
5021{
Michael Chan972ec0d2006-01-23 16:12:43 -08005022 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005023 int rc;
5024
John W. Linville1064e942005-11-10 12:58:24 -08005025 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005026
5027 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5028
5029 return rc;
5030}
5031
5032static int
5033bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5034{
Michael Chan972ec0d2006-01-23 16:12:43 -08005035 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005036
5037 memset(coal, 0, sizeof(struct ethtool_coalesce));
5038
5039 coal->rx_coalesce_usecs = bp->rx_ticks;
5040 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5041 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5042 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5043
5044 coal->tx_coalesce_usecs = bp->tx_ticks;
5045 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5046 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5047 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5048
5049 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5050
5051 return 0;
5052}
5053
5054static int
5055bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5056{
Michael Chan972ec0d2006-01-23 16:12:43 -08005057 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005058
5059 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5060 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5061
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005062 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005063 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5064
5065 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5066 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5067
5068 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5069 if (bp->rx_quick_cons_trip_int > 0xff)
5070 bp->rx_quick_cons_trip_int = 0xff;
5071
5072 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5073 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5074
5075 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5076 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5077
5078 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5079 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5080
5081 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5082 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5083 0xff;
5084
5085 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5086 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5087 bp->stats_ticks &= 0xffff00;
5088
5089 if (netif_running(bp->dev)) {
5090 bnx2_netif_stop(bp);
5091 bnx2_init_nic(bp);
5092 bnx2_netif_start(bp);
5093 }
5094
5095 return 0;
5096}
5097
5098static void
5099bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5100{
Michael Chan972ec0d2006-01-23 16:12:43 -08005101 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005102
Michael Chan13daffa2006-03-20 17:49:20 -08005103 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005104 ering->rx_mini_max_pending = 0;
5105 ering->rx_jumbo_max_pending = 0;
5106
5107 ering->rx_pending = bp->rx_ring_size;
5108 ering->rx_mini_pending = 0;
5109 ering->rx_jumbo_pending = 0;
5110
5111 ering->tx_max_pending = MAX_TX_DESC_CNT;
5112 ering->tx_pending = bp->tx_ring_size;
5113}
5114
5115static int
5116bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5117{
Michael Chan972ec0d2006-01-23 16:12:43 -08005118 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005119
Michael Chan13daffa2006-03-20 17:49:20 -08005120 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005121 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5122 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5123
5124 return -EINVAL;
5125 }
Michael Chan13daffa2006-03-20 17:49:20 -08005126 if (netif_running(bp->dev)) {
5127 bnx2_netif_stop(bp);
5128 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5129 bnx2_free_skbs(bp);
5130 bnx2_free_mem(bp);
5131 }
5132
5133 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005134 bp->tx_ring_size = ering->tx_pending;
5135
5136 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005137 int rc;
5138
5139 rc = bnx2_alloc_mem(bp);
5140 if (rc)
5141 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005142 bnx2_init_nic(bp);
5143 bnx2_netif_start(bp);
5144 }
5145
5146 return 0;
5147}
5148
5149static void
5150bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5151{
Michael Chan972ec0d2006-01-23 16:12:43 -08005152 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005153
5154 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5155 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5156 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5157}
5158
5159static int
5160bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5161{
Michael Chan972ec0d2006-01-23 16:12:43 -08005162 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005163
5164 bp->req_flow_ctrl = 0;
5165 if (epause->rx_pause)
5166 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5167 if (epause->tx_pause)
5168 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5169
5170 if (epause->autoneg) {
5171 bp->autoneg |= AUTONEG_FLOW_CTRL;
5172 }
5173 else {
5174 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5175 }
5176
Michael Chanc770a652005-08-25 15:38:39 -07005177 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005178
5179 bnx2_setup_phy(bp);
5180
Michael Chanc770a652005-08-25 15:38:39 -07005181 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005182
5183 return 0;
5184}
5185
5186static u32
5187bnx2_get_rx_csum(struct net_device *dev)
5188{
Michael Chan972ec0d2006-01-23 16:12:43 -08005189 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005190
5191 return bp->rx_csum;
5192}
5193
5194static int
5195bnx2_set_rx_csum(struct net_device *dev, u32 data)
5196{
Michael Chan972ec0d2006-01-23 16:12:43 -08005197 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005198
5199 bp->rx_csum = data;
5200 return 0;
5201}
5202
Michael Chanb11d6212006-06-29 12:31:21 -07005203static int
5204bnx2_set_tso(struct net_device *dev, u32 data)
5205{
5206 if (data)
5207 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5208 else
5209 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5210 return 0;
5211}
5212
Michael Chancea94db2006-06-12 22:16:13 -07005213#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005214
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005215static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005216 char string[ETH_GSTRING_LEN];
5217} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5218 { "rx_bytes" },
5219 { "rx_error_bytes" },
5220 { "tx_bytes" },
5221 { "tx_error_bytes" },
5222 { "rx_ucast_packets" },
5223 { "rx_mcast_packets" },
5224 { "rx_bcast_packets" },
5225 { "tx_ucast_packets" },
5226 { "tx_mcast_packets" },
5227 { "tx_bcast_packets" },
5228 { "tx_mac_errors" },
5229 { "tx_carrier_errors" },
5230 { "rx_crc_errors" },
5231 { "rx_align_errors" },
5232 { "tx_single_collisions" },
5233 { "tx_multi_collisions" },
5234 { "tx_deferred" },
5235 { "tx_excess_collisions" },
5236 { "tx_late_collisions" },
5237 { "tx_total_collisions" },
5238 { "rx_fragments" },
5239 { "rx_jabbers" },
5240 { "rx_undersize_packets" },
5241 { "rx_oversize_packets" },
5242 { "rx_64_byte_packets" },
5243 { "rx_65_to_127_byte_packets" },
5244 { "rx_128_to_255_byte_packets" },
5245 { "rx_256_to_511_byte_packets" },
5246 { "rx_512_to_1023_byte_packets" },
5247 { "rx_1024_to_1522_byte_packets" },
5248 { "rx_1523_to_9022_byte_packets" },
5249 { "tx_64_byte_packets" },
5250 { "tx_65_to_127_byte_packets" },
5251 { "tx_128_to_255_byte_packets" },
5252 { "tx_256_to_511_byte_packets" },
5253 { "tx_512_to_1023_byte_packets" },
5254 { "tx_1024_to_1522_byte_packets" },
5255 { "tx_1523_to_9022_byte_packets" },
5256 { "rx_xon_frames" },
5257 { "rx_xoff_frames" },
5258 { "tx_xon_frames" },
5259 { "tx_xoff_frames" },
5260 { "rx_mac_ctrl_frames" },
5261 { "rx_filtered_packets" },
5262 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005263 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005264};
5265
5266#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5267
Arjan van de Venf71e1302006-03-03 21:33:57 -05005268static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005269 STATS_OFFSET32(stat_IfHCInOctets_hi),
5270 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5271 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5272 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5273 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5274 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5275 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5276 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5277 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5278 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5279 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005280 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5281 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5282 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5283 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5284 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5285 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5286 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5287 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5288 STATS_OFFSET32(stat_EtherStatsCollisions),
5289 STATS_OFFSET32(stat_EtherStatsFragments),
5290 STATS_OFFSET32(stat_EtherStatsJabbers),
5291 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5292 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5293 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5294 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5295 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5296 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5297 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5298 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5299 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5300 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5301 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5302 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5307 STATS_OFFSET32(stat_XonPauseFramesReceived),
5308 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5309 STATS_OFFSET32(stat_OutXonSent),
5310 STATS_OFFSET32(stat_OutXoffSent),
5311 STATS_OFFSET32(stat_MacControlFramesReceived),
5312 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5313 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005314 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005315};
5316
5317/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5318 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005319 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005320static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005321 8,0,8,8,8,8,8,8,8,8,
5322 4,0,4,4,4,4,4,4,4,4,
5323 4,4,4,4,4,4,4,4,4,4,
5324 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005325 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005326};
5327
Michael Chan5b0c76a2005-11-04 08:45:49 -08005328static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5329 8,0,8,8,8,8,8,8,8,8,
5330 4,4,4,4,4,4,4,4,4,4,
5331 4,4,4,4,4,4,4,4,4,4,
5332 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005333 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005334};
5335
Michael Chanb6016b72005-05-26 13:03:09 -07005336#define BNX2_NUM_TESTS 6
5337
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005338static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005339 char string[ETH_GSTRING_LEN];
5340} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5341 { "register_test (offline)" },
5342 { "memory_test (offline)" },
5343 { "loopback_test (offline)" },
5344 { "nvram_test (online)" },
5345 { "interrupt_test (online)" },
5346 { "link_test (online)" },
5347};
5348
5349static int
5350bnx2_self_test_count(struct net_device *dev)
5351{
5352 return BNX2_NUM_TESTS;
5353}
5354
5355static void
5356bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5357{
Michael Chan972ec0d2006-01-23 16:12:43 -08005358 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005359
5360 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5361 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005362 int i;
5363
Michael Chanb6016b72005-05-26 13:03:09 -07005364 bnx2_netif_stop(bp);
5365 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5366 bnx2_free_skbs(bp);
5367
5368 if (bnx2_test_registers(bp) != 0) {
5369 buf[0] = 1;
5370 etest->flags |= ETH_TEST_FL_FAILED;
5371 }
5372 if (bnx2_test_memory(bp) != 0) {
5373 buf[1] = 1;
5374 etest->flags |= ETH_TEST_FL_FAILED;
5375 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005376 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005377 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005378
5379 if (!netif_running(bp->dev)) {
5380 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5381 }
5382 else {
5383 bnx2_init_nic(bp);
5384 bnx2_netif_start(bp);
5385 }
5386
5387 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005388 for (i = 0; i < 7; i++) {
5389 if (bp->link_up)
5390 break;
5391 msleep_interruptible(1000);
5392 }
Michael Chanb6016b72005-05-26 13:03:09 -07005393 }
5394
5395 if (bnx2_test_nvram(bp) != 0) {
5396 buf[3] = 1;
5397 etest->flags |= ETH_TEST_FL_FAILED;
5398 }
5399 if (bnx2_test_intr(bp) != 0) {
5400 buf[4] = 1;
5401 etest->flags |= ETH_TEST_FL_FAILED;
5402 }
5403
5404 if (bnx2_test_link(bp) != 0) {
5405 buf[5] = 1;
5406 etest->flags |= ETH_TEST_FL_FAILED;
5407
5408 }
5409}
5410
5411static void
5412bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5413{
5414 switch (stringset) {
5415 case ETH_SS_STATS:
5416 memcpy(buf, bnx2_stats_str_arr,
5417 sizeof(bnx2_stats_str_arr));
5418 break;
5419 case ETH_SS_TEST:
5420 memcpy(buf, bnx2_tests_str_arr,
5421 sizeof(bnx2_tests_str_arr));
5422 break;
5423 }
5424}
5425
5426static int
5427bnx2_get_stats_count(struct net_device *dev)
5428{
5429 return BNX2_NUM_STATS;
5430}
5431
5432static void
5433bnx2_get_ethtool_stats(struct net_device *dev,
5434 struct ethtool_stats *stats, u64 *buf)
5435{
Michael Chan972ec0d2006-01-23 16:12:43 -08005436 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005437 int i;
5438 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005439 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005440
5441 if (hw_stats == NULL) {
5442 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5443 return;
5444 }
5445
Michael Chan5b0c76a2005-11-04 08:45:49 -08005446 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5447 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5448 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5449 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005450 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005451 else
5452 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005453
5454 for (i = 0; i < BNX2_NUM_STATS; i++) {
5455 if (stats_len_arr[i] == 0) {
5456 /* skip this counter */
5457 buf[i] = 0;
5458 continue;
5459 }
5460 if (stats_len_arr[i] == 4) {
5461 /* 4-byte counter */
5462 buf[i] = (u64)
5463 *(hw_stats + bnx2_stats_offset_arr[i]);
5464 continue;
5465 }
5466 /* 8-byte counter */
5467 buf[i] = (((u64) *(hw_stats +
5468 bnx2_stats_offset_arr[i])) << 32) +
5469 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5470 }
5471}
5472
5473static int
5474bnx2_phys_id(struct net_device *dev, u32 data)
5475{
Michael Chan972ec0d2006-01-23 16:12:43 -08005476 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005477 int i;
5478 u32 save;
5479
5480 if (data == 0)
5481 data = 2;
5482
5483 save = REG_RD(bp, BNX2_MISC_CFG);
5484 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5485
5486 for (i = 0; i < (data * 2); i++) {
5487 if ((i % 2) == 0) {
5488 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5489 }
5490 else {
5491 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5492 BNX2_EMAC_LED_1000MB_OVERRIDE |
5493 BNX2_EMAC_LED_100MB_OVERRIDE |
5494 BNX2_EMAC_LED_10MB_OVERRIDE |
5495 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5496 BNX2_EMAC_LED_TRAFFIC);
5497 }
5498 msleep_interruptible(500);
5499 if (signal_pending(current))
5500 break;
5501 }
5502 REG_WR(bp, BNX2_EMAC_LED, 0);
5503 REG_WR(bp, BNX2_MISC_CFG, save);
5504 return 0;
5505}
5506
Jeff Garzik7282d492006-09-13 14:30:00 -04005507static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005508 .get_settings = bnx2_get_settings,
5509 .set_settings = bnx2_set_settings,
5510 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005511 .get_regs_len = bnx2_get_regs_len,
5512 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005513 .get_wol = bnx2_get_wol,
5514 .set_wol = bnx2_set_wol,
5515 .nway_reset = bnx2_nway_reset,
5516 .get_link = ethtool_op_get_link,
5517 .get_eeprom_len = bnx2_get_eeprom_len,
5518 .get_eeprom = bnx2_get_eeprom,
5519 .set_eeprom = bnx2_set_eeprom,
5520 .get_coalesce = bnx2_get_coalesce,
5521 .set_coalesce = bnx2_set_coalesce,
5522 .get_ringparam = bnx2_get_ringparam,
5523 .set_ringparam = bnx2_set_ringparam,
5524 .get_pauseparam = bnx2_get_pauseparam,
5525 .set_pauseparam = bnx2_set_pauseparam,
5526 .get_rx_csum = bnx2_get_rx_csum,
5527 .set_rx_csum = bnx2_set_rx_csum,
5528 .get_tx_csum = ethtool_op_get_tx_csum,
5529 .set_tx_csum = ethtool_op_set_tx_csum,
5530 .get_sg = ethtool_op_get_sg,
5531 .set_sg = ethtool_op_set_sg,
5532#ifdef BCM_TSO
5533 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005534 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005535#endif
5536 .self_test_count = bnx2_self_test_count,
5537 .self_test = bnx2_self_test,
5538 .get_strings = bnx2_get_strings,
5539 .phys_id = bnx2_phys_id,
5540 .get_stats_count = bnx2_get_stats_count,
5541 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005542 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005543};
5544
5545/* Called with rtnl_lock */
5546static int
5547bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5548{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005549 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005550 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005551 int err;
5552
5553 switch(cmd) {
5554 case SIOCGMIIPHY:
5555 data->phy_id = bp->phy_addr;
5556
5557 /* fallthru */
5558 case SIOCGMIIREG: {
5559 u32 mii_regval;
5560
Michael Chanc770a652005-08-25 15:38:39 -07005561 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005562 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005563 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005564
5565 data->val_out = mii_regval;
5566
5567 return err;
5568 }
5569
5570 case SIOCSMIIREG:
5571 if (!capable(CAP_NET_ADMIN))
5572 return -EPERM;
5573
Michael Chanc770a652005-08-25 15:38:39 -07005574 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005575 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005576 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005577
5578 return err;
5579
5580 default:
5581 /* do nothing */
5582 break;
5583 }
5584 return -EOPNOTSUPP;
5585}
5586
5587/* Called with rtnl_lock */
5588static int
5589bnx2_change_mac_addr(struct net_device *dev, void *p)
5590{
5591 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005592 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005593
Michael Chan73eef4c2005-08-25 15:39:15 -07005594 if (!is_valid_ether_addr(addr->sa_data))
5595 return -EINVAL;
5596
Michael Chanb6016b72005-05-26 13:03:09 -07005597 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5598 if (netif_running(dev))
5599 bnx2_set_mac_addr(bp);
5600
5601 return 0;
5602}
5603
5604/* Called with rtnl_lock */
5605static int
5606bnx2_change_mtu(struct net_device *dev, int new_mtu)
5607{
Michael Chan972ec0d2006-01-23 16:12:43 -08005608 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005609
5610 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5611 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5612 return -EINVAL;
5613
5614 dev->mtu = new_mtu;
5615 if (netif_running(dev)) {
5616 bnx2_netif_stop(bp);
5617
5618 bnx2_init_nic(bp);
5619
5620 bnx2_netif_start(bp);
5621 }
5622 return 0;
5623}
5624
5625#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5626static void
5627poll_bnx2(struct net_device *dev)
5628{
Michael Chan972ec0d2006-01-23 16:12:43 -08005629 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005630
5631 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005632 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005633 enable_irq(bp->pdev->irq);
5634}
5635#endif
5636
5637static int __devinit
5638bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5639{
5640 struct bnx2 *bp;
5641 unsigned long mem_len;
5642 int rc;
5643 u32 reg;
5644
5645 SET_MODULE_OWNER(dev);
5646 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005647 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005648
5649 bp->flags = 0;
5650 bp->phy_flags = 0;
5651
5652 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5653 rc = pci_enable_device(pdev);
5654 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005655 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005656 goto err_out;
5657 }
5658
5659 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005660 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005661 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005662 rc = -ENODEV;
5663 goto err_out_disable;
5664 }
5665
5666 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5667 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005668 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005669 goto err_out_disable;
5670 }
5671
5672 pci_set_master(pdev);
5673
5674 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5675 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005676 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005677 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005678 rc = -EIO;
5679 goto err_out_release;
5680 }
5681
Michael Chanb6016b72005-05-26 13:03:09 -07005682 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5683 bp->flags |= USING_DAC_FLAG;
5684 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005685 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005686 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005687 rc = -EIO;
5688 goto err_out_release;
5689 }
5690 }
5691 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005692 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005693 rc = -EIO;
5694 goto err_out_release;
5695 }
5696
5697 bp->dev = dev;
5698 bp->pdev = pdev;
5699
5700 spin_lock_init(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005701 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5702
5703 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005704 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005705 dev->mem_end = dev->mem_start + mem_len;
5706 dev->irq = pdev->irq;
5707
5708 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5709
5710 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005711 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005712 rc = -ENOMEM;
5713 goto err_out_release;
5714 }
5715
5716 /* Configure byte swap and enable write to the reg_window registers.
5717 * Rely on CPU to do target byte swapping on big endian systems
5718 * The chip's target access swapping will not swap all accesses
5719 */
5720 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5721 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5722 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5723
Pavel Machek829ca9a2005-09-03 15:56:56 -07005724 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005725
5726 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5727
Michael Chan59b47d82006-11-19 14:10:45 -08005728 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5729 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5730 if (bp->pcix_cap == 0) {
5731 dev_err(&pdev->dev,
5732 "Cannot find PCIX capability, aborting.\n");
5733 rc = -EIO;
5734 goto err_out_unmap;
5735 }
5736 }
5737
Michael Chanb6016b72005-05-26 13:03:09 -07005738 /* Get bus information. */
5739 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5740 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5741 u32 clkreg;
5742
5743 bp->flags |= PCIX_FLAG;
5744
5745 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005746
Michael Chanb6016b72005-05-26 13:03:09 -07005747 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5748 switch (clkreg) {
5749 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5750 bp->bus_speed_mhz = 133;
5751 break;
5752
5753 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5754 bp->bus_speed_mhz = 100;
5755 break;
5756
5757 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5758 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5759 bp->bus_speed_mhz = 66;
5760 break;
5761
5762 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5763 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5764 bp->bus_speed_mhz = 50;
5765 break;
5766
5767 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5768 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5769 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5770 bp->bus_speed_mhz = 33;
5771 break;
5772 }
5773 }
5774 else {
5775 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5776 bp->bus_speed_mhz = 66;
5777 else
5778 bp->bus_speed_mhz = 33;
5779 }
5780
5781 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5782 bp->flags |= PCI_32BIT_FLAG;
5783
5784 /* 5706A0 may falsely detect SERR and PERR. */
5785 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5786 reg = REG_RD(bp, PCI_COMMAND);
5787 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5788 REG_WR(bp, PCI_COMMAND, reg);
5789 }
5790 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5791 !(bp->flags & PCIX_FLAG)) {
5792
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005793 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005794 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005795 goto err_out_unmap;
5796 }
5797
5798 bnx2_init_nvram(bp);
5799
Michael Chane3648b32005-11-04 08:51:21 -08005800 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5801
5802 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5803 BNX2_SHM_HDR_SIGNATURE_SIG)
5804 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5805 else
5806 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5807
Michael Chanb6016b72005-05-26 13:03:09 -07005808 /* Get the permanent MAC address. First we need to make sure the
5809 * firmware is actually running.
5810 */
Michael Chane3648b32005-11-04 08:51:21 -08005811 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005812
5813 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5814 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005815 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005816 rc = -ENODEV;
5817 goto err_out_unmap;
5818 }
5819
Michael Chane3648b32005-11-04 08:51:21 -08005820 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005821
Michael Chane3648b32005-11-04 08:51:21 -08005822 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005823 bp->mac_addr[0] = (u8) (reg >> 8);
5824 bp->mac_addr[1] = (u8) reg;
5825
Michael Chane3648b32005-11-04 08:51:21 -08005826 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005827 bp->mac_addr[2] = (u8) (reg >> 24);
5828 bp->mac_addr[3] = (u8) (reg >> 16);
5829 bp->mac_addr[4] = (u8) (reg >> 8);
5830 bp->mac_addr[5] = (u8) reg;
5831
5832 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005833 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005834
5835 bp->rx_csum = 1;
5836
5837 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5838
5839 bp->tx_quick_cons_trip_int = 20;
5840 bp->tx_quick_cons_trip = 20;
5841 bp->tx_ticks_int = 80;
5842 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005843
Michael Chanb6016b72005-05-26 13:03:09 -07005844 bp->rx_quick_cons_trip_int = 6;
5845 bp->rx_quick_cons_trip = 6;
5846 bp->rx_ticks_int = 18;
5847 bp->rx_ticks = 18;
5848
5849 bp->stats_ticks = 1000000 & 0xffff00;
5850
5851 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005852 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005853
Michael Chan5b0c76a2005-11-04 08:45:49 -08005854 bp->phy_addr = 1;
5855
Michael Chanb6016b72005-05-26 13:03:09 -07005856 /* Disable WOL support if we are running on a SERDES chip. */
5857 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5858 bp->phy_flags |= PHY_SERDES_FLAG;
5859 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005860 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5861 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005862 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005863 BNX2_SHARED_HW_CFG_CONFIG);
5864 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5865 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5866 }
Michael Chanb6016b72005-05-26 13:03:09 -07005867 }
5868
Michael Chan16088272006-06-12 22:16:43 -07005869 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5870 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5871 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005872 bp->flags |= NO_WOL_FLAG;
5873
Michael Chanb6016b72005-05-26 13:03:09 -07005874 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5875 bp->tx_quick_cons_trip_int =
5876 bp->tx_quick_cons_trip;
5877 bp->tx_ticks_int = bp->tx_ticks;
5878 bp->rx_quick_cons_trip_int =
5879 bp->rx_quick_cons_trip;
5880 bp->rx_ticks_int = bp->rx_ticks;
5881 bp->comp_prod_trip_int = bp->comp_prod_trip;
5882 bp->com_ticks_int = bp->com_ticks;
5883 bp->cmd_ticks_int = bp->cmd_ticks;
5884 }
5885
Michael Chanf9317a42006-09-29 17:06:23 -07005886 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5887 *
5888 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5889 * with byte enables disabled on the unused 32-bit word. This is legal
5890 * but causes problems on the AMD 8132 which will eventually stop
5891 * responding after a while.
5892 *
5893 * AMD believes this incompatibility is unique to the 5706, and
5894 * prefers to locally disable MSI rather than globally disabling it
5895 * using pci_msi_quirk.
5896 */
5897 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5898 struct pci_dev *amd_8132 = NULL;
5899
5900 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5901 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5902 amd_8132))) {
5903 u8 rev;
5904
5905 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5906 if (rev >= 0x10 && rev <= 0x13) {
5907 disable_msi = 1;
5908 pci_dev_put(amd_8132);
5909 break;
5910 }
5911 }
5912 }
5913
Michael Chanb6016b72005-05-26 13:03:09 -07005914 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5915 bp->req_line_speed = 0;
5916 if (bp->phy_flags & PHY_SERDES_FLAG) {
5917 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005918
Michael Chane3648b32005-11-04 08:51:21 -08005919 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005920 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5921 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5922 bp->autoneg = 0;
5923 bp->req_line_speed = bp->line_speed = SPEED_1000;
5924 bp->req_duplex = DUPLEX_FULL;
5925 }
Michael Chanb6016b72005-05-26 13:03:09 -07005926 }
5927 else {
5928 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5929 }
5930
5931 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5932
Michael Chancd339a02005-08-25 15:35:24 -07005933 init_timer(&bp->timer);
5934 bp->timer.expires = RUN_AT(bp->timer_interval);
5935 bp->timer.data = (unsigned long) bp;
5936 bp->timer.function = bnx2_timer;
5937
Michael Chanb6016b72005-05-26 13:03:09 -07005938 return 0;
5939
5940err_out_unmap:
5941 if (bp->regview) {
5942 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005943 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005944 }
5945
5946err_out_release:
5947 pci_release_regions(pdev);
5948
5949err_out_disable:
5950 pci_disable_device(pdev);
5951 pci_set_drvdata(pdev, NULL);
5952
5953err_out:
5954 return rc;
5955}
5956
5957static int __devinit
5958bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5959{
5960 static int version_printed = 0;
5961 struct net_device *dev = NULL;
5962 struct bnx2 *bp;
5963 int rc, i;
5964
5965 if (version_printed++ == 0)
5966 printk(KERN_INFO "%s", version);
5967
5968 /* dev zeroed in init_etherdev */
5969 dev = alloc_etherdev(sizeof(*bp));
5970
5971 if (!dev)
5972 return -ENOMEM;
5973
5974 rc = bnx2_init_board(pdev, dev);
5975 if (rc < 0) {
5976 free_netdev(dev);
5977 return rc;
5978 }
5979
5980 dev->open = bnx2_open;
5981 dev->hard_start_xmit = bnx2_start_xmit;
5982 dev->stop = bnx2_close;
5983 dev->get_stats = bnx2_get_stats;
5984 dev->set_multicast_list = bnx2_set_rx_mode;
5985 dev->do_ioctl = bnx2_ioctl;
5986 dev->set_mac_address = bnx2_change_mac_addr;
5987 dev->change_mtu = bnx2_change_mtu;
5988 dev->tx_timeout = bnx2_tx_timeout;
5989 dev->watchdog_timeo = TX_TIMEOUT;
5990#ifdef BCM_VLAN
5991 dev->vlan_rx_register = bnx2_vlan_rx_register;
5992 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5993#endif
5994 dev->poll = bnx2_poll;
5995 dev->ethtool_ops = &bnx2_ethtool_ops;
5996 dev->weight = 64;
5997
Michael Chan972ec0d2006-01-23 16:12:43 -08005998 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005999
6000#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6001 dev->poll_controller = poll_bnx2;
6002#endif
6003
6004 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006005 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006006 if (bp->regview)
6007 iounmap(bp->regview);
6008 pci_release_regions(pdev);
6009 pci_disable_device(pdev);
6010 pci_set_drvdata(pdev, NULL);
6011 free_netdev(dev);
6012 return rc;
6013 }
6014
6015 pci_set_drvdata(pdev, dev);
6016
6017 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07006018 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07006019 bp->name = board_info[ent->driver_data].name,
6020 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6021 "IRQ %d, ",
6022 dev->name,
6023 bp->name,
6024 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6025 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6026 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6027 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6028 bp->bus_speed_mhz,
6029 dev->base_addr,
6030 bp->pdev->irq);
6031
6032 printk("node addr ");
6033 for (i = 0; i < 6; i++)
6034 printk("%2.2x", dev->dev_addr[i]);
6035 printk("\n");
6036
6037 dev->features |= NETIF_F_SG;
6038 if (bp->flags & USING_DAC_FLAG)
6039 dev->features |= NETIF_F_HIGHDMA;
6040 dev->features |= NETIF_F_IP_CSUM;
6041#ifdef BCM_VLAN
6042 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6043#endif
6044#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07006045 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006046#endif
6047
6048 netif_carrier_off(bp->dev);
6049
6050 return 0;
6051}
6052
6053static void __devexit
6054bnx2_remove_one(struct pci_dev *pdev)
6055{
6056 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006057 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006058
Michael Chanafdc08b2005-08-25 15:34:29 -07006059 flush_scheduled_work();
6060
Michael Chanb6016b72005-05-26 13:03:09 -07006061 unregister_netdev(dev);
6062
6063 if (bp->regview)
6064 iounmap(bp->regview);
6065
6066 free_netdev(dev);
6067 pci_release_regions(pdev);
6068 pci_disable_device(pdev);
6069 pci_set_drvdata(pdev, NULL);
6070}
6071
6072static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006073bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006074{
6075 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006076 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006077 u32 reset_code;
6078
6079 if (!netif_running(dev))
6080 return 0;
6081
Michael Chan1d60290f2006-03-20 17:50:08 -08006082 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006083 bnx2_netif_stop(bp);
6084 netif_device_detach(dev);
6085 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006086 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006087 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006088 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006089 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6090 else
6091 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6092 bnx2_reset_chip(bp, reset_code);
6093 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006094 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006095 return 0;
6096}
6097
6098static int
6099bnx2_resume(struct pci_dev *pdev)
6100{
6101 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006102 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006103
6104 if (!netif_running(dev))
6105 return 0;
6106
Pavel Machek829ca9a2005-09-03 15:56:56 -07006107 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006108 netif_device_attach(dev);
6109 bnx2_init_nic(bp);
6110 bnx2_netif_start(bp);
6111 return 0;
6112}
6113
6114static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006115 .name = DRV_MODULE_NAME,
6116 .id_table = bnx2_pci_tbl,
6117 .probe = bnx2_init_one,
6118 .remove = __devexit_p(bnx2_remove_one),
6119 .suspend = bnx2_suspend,
6120 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006121};
6122
6123static int __init bnx2_init(void)
6124{
Jeff Garzik29917622006-08-19 17:48:59 -04006125 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006126}
6127
6128static void __exit bnx2_cleanup(void)
6129{
6130 pci_unregister_driver(&bnx2_pci_driver);
6131}
6132
6133module_init(bnx2_init);
6134module_exit(bnx2_cleanup);
6135
6136
6137