blob: ca5acc4736df2bd80134e1663464fd6ae4cd51fe [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080056#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070057
58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": "
Michael Chand6aa4ac2007-01-08 19:56:31 -080060#define DRV_MODULE_VERSION "1.5.3"
61#define DRV_MODULE_RELDATE "January 8, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070062
63#define RUN_AT(x) (jiffies + (x))
64
65/* Time in jiffies before concluding the transmitter is hung. */
66#define TX_TIMEOUT (5*HZ)
67
Randy Dunlape19360f2006-04-10 23:22:06 -070068static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070069 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080072MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070073MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
75
76static int disable_msi = 0;
77
78module_param(disable_msi, int, 0);
79MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080087 BCM5708,
88 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080089 BCM5709,
Michael Chanb6016b72005-05-26 13:03:09 -070090} board_t;
91
92/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050093static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070094 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chanb6016b72005-05-26 13:03:09 -0700104 };
105
106static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chanb6016b72005-05-26 13:03:09 -0700123 { 0, }
124};
125
126static struct flash_spec flash_table[] =
127{
128 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 "Entry 0100"},
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
171 /* Fast EEPROM */
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175 "EEPROM - fast"},
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1001"},
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1010"},
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1100"},
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1101"},
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700211};
212
213MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
214
Michael Chane89bbf12005-08-25 15:36:58 -0700215static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216{
Michael Chan2f8af122006-08-15 01:39:10 -0700217 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700218
Michael Chan2f8af122006-08-15 01:39:10 -0700219 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800220
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
223 */
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
226 diff &= 0xffff;
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
229 }
Michael Chane89bbf12005-08-25 15:36:58 -0700230 return (bp->tx_ring_size - diff);
231}
232
Michael Chanb6016b72005-05-26 13:03:09 -0700233static u32
234bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
235{
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
238}
239
240static void
241bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
242{
243 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
245}
246
247static void
248bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
249{
250 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800251 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
252 int i;
253
254 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257 for (i = 0; i < 5; i++) {
258 u32 val;
259 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
261 break;
262 udelay(5);
263 }
264 } else {
265 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266 REG_WR(bp, BNX2_CTX_DATA, val);
267 }
Michael Chanb6016b72005-05-26 13:03:09 -0700268}
269
270static int
271bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
272{
273 u32 val1;
274 int i, ret;
275
276 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
279
280 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
282
283 udelay(40);
284 }
285
286 val1 = (bp->phy_addr << 21) | (reg << 16) |
287 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288 BNX2_EMAC_MDIO_COMM_START_BUSY;
289 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
290
291 for (i = 0; i < 50; i++) {
292 udelay(10);
293
294 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
296 udelay(5);
297
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
300
301 break;
302 }
303 }
304
305 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
306 *val = 0x0;
307 ret = -EBUSY;
308 }
309 else {
310 *val = val1;
311 ret = 0;
312 }
313
314 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
317
318 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
320
321 udelay(40);
322 }
323
324 return ret;
325}
326
327static int
328bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
329{
330 u32 val1;
331 int i, ret;
332
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
336
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339
340 udelay(40);
341 }
342
343 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400347
Michael Chanb6016b72005-05-26 13:03:09 -0700348 for (i = 0; i < 50; i++) {
349 udelay(10);
350
351 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
353 udelay(5);
354 break;
355 }
356 }
357
358 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
359 ret = -EBUSY;
360 else
361 ret = 0;
362
363 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370 udelay(40);
371 }
372
373 return ret;
374}
375
376static void
377bnx2_disable_int(struct bnx2 *bp)
378{
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
382}
383
384static void
385bnx2_enable_int(struct bnx2 *bp)
386{
Michael Chanb6016b72005-05-26 13:03:09 -0700387 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800388 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
390
391 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700392 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
393
Michael Chanbf5295b2006-03-23 01:11:56 -0800394 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700395}
396
397static void
398bnx2_disable_int_sync(struct bnx2 *bp)
399{
400 atomic_inc(&bp->intr_sem);
401 bnx2_disable_int(bp);
402 synchronize_irq(bp->pdev->irq);
403}
404
405static void
406bnx2_netif_stop(struct bnx2 *bp)
407{
408 bnx2_disable_int_sync(bp);
409 if (netif_running(bp->dev)) {
410 netif_poll_disable(bp->dev);
411 netif_tx_disable(bp->dev);
412 bp->dev->trans_start = jiffies; /* prevent tx timeout */
413 }
414}
415
416static void
417bnx2_netif_start(struct bnx2 *bp)
418{
419 if (atomic_dec_and_test(&bp->intr_sem)) {
420 if (netif_running(bp->dev)) {
421 netif_wake_queue(bp->dev);
422 netif_poll_enable(bp->dev);
423 bnx2_enable_int(bp);
424 }
425 }
426}
427
428static void
429bnx2_free_mem(struct bnx2 *bp)
430{
Michael Chan13daffa2006-03-20 17:49:20 -0800431 int i;
432
Michael Chan59b47d82006-11-19 14:10:45 -0800433 for (i = 0; i < bp->ctx_pages; i++) {
434 if (bp->ctx_blk[i]) {
435 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
436 bp->ctx_blk[i],
437 bp->ctx_blk_mapping[i]);
438 bp->ctx_blk[i] = NULL;
439 }
440 }
Michael Chanb6016b72005-05-26 13:03:09 -0700441 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800442 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700443 bp->status_blk, bp->status_blk_mapping);
444 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800445 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700446 }
447 if (bp->tx_desc_ring) {
448 pci_free_consistent(bp->pdev,
449 sizeof(struct tx_bd) * TX_DESC_CNT,
450 bp->tx_desc_ring, bp->tx_desc_mapping);
451 bp->tx_desc_ring = NULL;
452 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400453 kfree(bp->tx_buf_ring);
454 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800455 for (i = 0; i < bp->rx_max_ring; i++) {
456 if (bp->rx_desc_ring[i])
457 pci_free_consistent(bp->pdev,
458 sizeof(struct rx_bd) * RX_DESC_CNT,
459 bp->rx_desc_ring[i],
460 bp->rx_desc_mapping[i]);
461 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700462 }
Michael Chan13daffa2006-03-20 17:49:20 -0800463 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400464 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700465}
466
467static int
468bnx2_alloc_mem(struct bnx2 *bp)
469{
Michael Chan0f31f992006-03-23 01:12:38 -0800470 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800471
Michael Chan0f31f992006-03-23 01:12:38 -0800472 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
473 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700474 if (bp->tx_buf_ring == NULL)
475 return -ENOMEM;
476
Michael Chanb6016b72005-05-26 13:03:09 -0700477 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478 sizeof(struct tx_bd) *
479 TX_DESC_CNT,
480 &bp->tx_desc_mapping);
481 if (bp->tx_desc_ring == NULL)
482 goto alloc_mem_err;
483
Michael Chan13daffa2006-03-20 17:49:20 -0800484 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
485 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700486 if (bp->rx_buf_ring == NULL)
487 goto alloc_mem_err;
488
Michael Chan13daffa2006-03-20 17:49:20 -0800489 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
490 bp->rx_max_ring);
491
492 for (i = 0; i < bp->rx_max_ring; i++) {
493 bp->rx_desc_ring[i] =
494 pci_alloc_consistent(bp->pdev,
495 sizeof(struct rx_bd) * RX_DESC_CNT,
496 &bp->rx_desc_mapping[i]);
497 if (bp->rx_desc_ring[i] == NULL)
498 goto alloc_mem_err;
499
500 }
Michael Chanb6016b72005-05-26 13:03:09 -0700501
Michael Chan0f31f992006-03-23 01:12:38 -0800502 /* Combine status and statistics blocks into one allocation. */
503 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504 bp->status_stats_size = status_blk_size +
505 sizeof(struct statistics_block);
506
507 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700508 &bp->status_blk_mapping);
509 if (bp->status_blk == NULL)
510 goto alloc_mem_err;
511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700513
Michael Chan0f31f992006-03-23 01:12:38 -0800514 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
515 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700516
Michael Chan0f31f992006-03-23 01:12:38 -0800517 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700518
Michael Chan59b47d82006-11-19 14:10:45 -0800519 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521 if (bp->ctx_pages == 0)
522 bp->ctx_pages = 1;
523 for (i = 0; i < bp->ctx_pages; i++) {
524 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
525 BCM_PAGE_SIZE,
526 &bp->ctx_blk_mapping[i]);
527 if (bp->ctx_blk[i] == NULL)
528 goto alloc_mem_err;
529 }
530 }
Michael Chanb6016b72005-05-26 13:03:09 -0700531 return 0;
532
533alloc_mem_err:
534 bnx2_free_mem(bp);
535 return -ENOMEM;
536}
537
538static void
Michael Chane3648b32005-11-04 08:51:21 -0800539bnx2_report_fw_link(struct bnx2 *bp)
540{
541 u32 fw_link_status = 0;
542
543 if (bp->link_up) {
544 u32 bmsr;
545
546 switch (bp->line_speed) {
547 case SPEED_10:
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_10HALF;
550 else
551 fw_link_status = BNX2_LINK_STATUS_10FULL;
552 break;
553 case SPEED_100:
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_100HALF;
556 else
557 fw_link_status = BNX2_LINK_STATUS_100FULL;
558 break;
559 case SPEED_1000:
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_1000HALF;
562 else
563 fw_link_status = BNX2_LINK_STATUS_1000FULL;
564 break;
565 case SPEED_2500:
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_2500HALF;
568 else
569 fw_link_status = BNX2_LINK_STATUS_2500FULL;
570 break;
571 }
572
573 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
574
575 if (bp->autoneg) {
576 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
577
578 bnx2_read_phy(bp, MII_BMSR, &bmsr);
579 bnx2_read_phy(bp, MII_BMSR, &bmsr);
580
581 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
584 else
585 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
586 }
587 }
588 else
589 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
590
591 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
592}
593
594static void
Michael Chanb6016b72005-05-26 13:03:09 -0700595bnx2_report_link(struct bnx2 *bp)
596{
597 if (bp->link_up) {
598 netif_carrier_on(bp->dev);
599 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
600
601 printk("%d Mbps ", bp->line_speed);
602
603 if (bp->duplex == DUPLEX_FULL)
604 printk("full duplex");
605 else
606 printk("half duplex");
607
608 if (bp->flow_ctrl) {
609 if (bp->flow_ctrl & FLOW_CTRL_RX) {
610 printk(", receive ");
611 if (bp->flow_ctrl & FLOW_CTRL_TX)
612 printk("& transmit ");
613 }
614 else {
615 printk(", transmit ");
616 }
617 printk("flow control ON");
618 }
619 printk("\n");
620 }
621 else {
622 netif_carrier_off(bp->dev);
623 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
624 }
Michael Chane3648b32005-11-04 08:51:21 -0800625
626 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700627}
628
629static void
630bnx2_resolve_flow_ctrl(struct bnx2 *bp)
631{
632 u32 local_adv, remote_adv;
633
634 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400635 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700636 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
637
638 if (bp->duplex == DUPLEX_FULL) {
639 bp->flow_ctrl = bp->req_flow_ctrl;
640 }
641 return;
642 }
643
644 if (bp->duplex != DUPLEX_FULL) {
645 return;
646 }
647
Michael Chan5b0c76a2005-11-04 08:45:49 -0800648 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
650 u32 val;
651
652 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_TX;
655 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656 bp->flow_ctrl |= FLOW_CTRL_RX;
657 return;
658 }
659
Michael Chanb6016b72005-05-26 13:03:09 -0700660 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661 bnx2_read_phy(bp, MII_LPA, &remote_adv);
662
663 if (bp->phy_flags & PHY_SERDES_FLAG) {
664 u32 new_local_adv = 0;
665 u32 new_remote_adv = 0;
666
667 if (local_adv & ADVERTISE_1000XPAUSE)
668 new_local_adv |= ADVERTISE_PAUSE_CAP;
669 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670 new_local_adv |= ADVERTISE_PAUSE_ASYM;
671 if (remote_adv & ADVERTISE_1000XPAUSE)
672 new_remote_adv |= ADVERTISE_PAUSE_CAP;
673 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
675
676 local_adv = new_local_adv;
677 remote_adv = new_remote_adv;
678 }
679
680 /* See Table 28B-3 of 802.3ab-1999 spec. */
681 if (local_adv & ADVERTISE_PAUSE_CAP) {
682 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683 if (remote_adv & ADVERTISE_PAUSE_CAP) {
684 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
685 }
686 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687 bp->flow_ctrl = FLOW_CTRL_RX;
688 }
689 }
690 else {
691 if (remote_adv & ADVERTISE_PAUSE_CAP) {
692 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
693 }
694 }
695 }
696 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
699
700 bp->flow_ctrl = FLOW_CTRL_TX;
701 }
702 }
703}
704
705static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800706bnx2_5708s_linkup(struct bnx2 *bp)
707{
708 u32 val;
709
710 bp->link_up = 1;
711 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713 case BCM5708S_1000X_STAT1_SPEED_10:
714 bp->line_speed = SPEED_10;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_100:
717 bp->line_speed = SPEED_100;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_1G:
720 bp->line_speed = SPEED_1000;
721 break;
722 case BCM5708S_1000X_STAT1_SPEED_2G5:
723 bp->line_speed = SPEED_2500;
724 break;
725 }
726 if (val & BCM5708S_1000X_STAT1_FD)
727 bp->duplex = DUPLEX_FULL;
728 else
729 bp->duplex = DUPLEX_HALF;
730
731 return 0;
732}
733
734static int
735bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700736{
737 u32 bmcr, local_adv, remote_adv, common;
738
739 bp->link_up = 1;
740 bp->line_speed = SPEED_1000;
741
742 bnx2_read_phy(bp, MII_BMCR, &bmcr);
743 if (bmcr & BMCR_FULLDPLX) {
744 bp->duplex = DUPLEX_FULL;
745 }
746 else {
747 bp->duplex = DUPLEX_HALF;
748 }
749
750 if (!(bmcr & BMCR_ANENABLE)) {
751 return 0;
752 }
753
754 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755 bnx2_read_phy(bp, MII_LPA, &remote_adv);
756
757 common = local_adv & remote_adv;
758 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
759
760 if (common & ADVERTISE_1000XFULL) {
761 bp->duplex = DUPLEX_FULL;
762 }
763 else {
764 bp->duplex = DUPLEX_HALF;
765 }
766 }
767
768 return 0;
769}
770
771static int
772bnx2_copper_linkup(struct bnx2 *bp)
773{
774 u32 bmcr;
775
776 bnx2_read_phy(bp, MII_BMCR, &bmcr);
777 if (bmcr & BMCR_ANENABLE) {
778 u32 local_adv, remote_adv, common;
779
780 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
782
783 common = local_adv & (remote_adv >> 2);
784 if (common & ADVERTISE_1000FULL) {
785 bp->line_speed = SPEED_1000;
786 bp->duplex = DUPLEX_FULL;
787 }
788 else if (common & ADVERTISE_1000HALF) {
789 bp->line_speed = SPEED_1000;
790 bp->duplex = DUPLEX_HALF;
791 }
792 else {
793 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794 bnx2_read_phy(bp, MII_LPA, &remote_adv);
795
796 common = local_adv & remote_adv;
797 if (common & ADVERTISE_100FULL) {
798 bp->line_speed = SPEED_100;
799 bp->duplex = DUPLEX_FULL;
800 }
801 else if (common & ADVERTISE_100HALF) {
802 bp->line_speed = SPEED_100;
803 bp->duplex = DUPLEX_HALF;
804 }
805 else if (common & ADVERTISE_10FULL) {
806 bp->line_speed = SPEED_10;
807 bp->duplex = DUPLEX_FULL;
808 }
809 else if (common & ADVERTISE_10HALF) {
810 bp->line_speed = SPEED_10;
811 bp->duplex = DUPLEX_HALF;
812 }
813 else {
814 bp->line_speed = 0;
815 bp->link_up = 0;
816 }
817 }
818 }
819 else {
820 if (bmcr & BMCR_SPEED100) {
821 bp->line_speed = SPEED_100;
822 }
823 else {
824 bp->line_speed = SPEED_10;
825 }
826 if (bmcr & BMCR_FULLDPLX) {
827 bp->duplex = DUPLEX_FULL;
828 }
829 else {
830 bp->duplex = DUPLEX_HALF;
831 }
832 }
833
834 return 0;
835}
836
837static int
838bnx2_set_mac_link(struct bnx2 *bp)
839{
840 u32 val;
841
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844 (bp->duplex == DUPLEX_HALF)) {
845 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
846 }
847
848 /* Configure the EMAC mode register. */
849 val = REG_RD(bp, BNX2_EMAC_MODE);
850
851 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800852 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800853 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700854
855 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800856 switch (bp->line_speed) {
857 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800858 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800860 break;
861 }
862 /* fall through */
863 case SPEED_100:
864 val |= BNX2_EMAC_MODE_PORT_MII;
865 break;
866 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800867 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800868 /* fall through */
869 case SPEED_1000:
870 val |= BNX2_EMAC_MODE_PORT_GMII;
871 break;
872 }
Michael Chanb6016b72005-05-26 13:03:09 -0700873 }
874 else {
875 val |= BNX2_EMAC_MODE_PORT_GMII;
876 }
877
878 /* Set the MAC to operate in the appropriate duplex mode. */
879 if (bp->duplex == DUPLEX_HALF)
880 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881 REG_WR(bp, BNX2_EMAC_MODE, val);
882
883 /* Enable/disable rx PAUSE. */
884 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
885
886 if (bp->flow_ctrl & FLOW_CTRL_RX)
887 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
889
890 /* Enable/disable tx PAUSE. */
891 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
893
894 if (bp->flow_ctrl & FLOW_CTRL_TX)
895 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
897
898 /* Acknowledge the interrupt. */
899 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
900
901 return 0;
902}
903
904static int
905bnx2_set_link(struct bnx2 *bp)
906{
907 u32 bmsr;
908 u8 link_up;
909
Michael Chan80be4432006-11-19 14:07:28 -0800910 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700911 bp->link_up = 1;
912 return 0;
913 }
914
915 link_up = bp->link_up;
916
917 bnx2_read_phy(bp, MII_BMSR, &bmsr);
918 bnx2_read_phy(bp, MII_BMSR, &bmsr);
919
920 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
922 u32 val;
923
924 val = REG_RD(bp, BNX2_EMAC_STATUS);
925 if (val & BNX2_EMAC_STATUS_LINK)
926 bmsr |= BMSR_LSTATUS;
927 else
928 bmsr &= ~BMSR_LSTATUS;
929 }
930
931 if (bmsr & BMSR_LSTATUS) {
932 bp->link_up = 1;
933
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800935 if (CHIP_NUM(bp) == CHIP_NUM_5706)
936 bnx2_5706s_linkup(bp);
937 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700939 }
940 else {
941 bnx2_copper_linkup(bp);
942 }
943 bnx2_resolve_flow_ctrl(bp);
944 }
945 else {
946 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947 (bp->autoneg & AUTONEG_SPEED)) {
948
949 u32 bmcr;
950
951 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800952 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700953 if (!(bmcr & BMCR_ANENABLE)) {
954 bnx2_write_phy(bp, MII_BMCR, bmcr |
955 BMCR_ANENABLE);
956 }
957 }
958 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
959 bp->link_up = 0;
960 }
961
962 if (bp->link_up != link_up) {
963 bnx2_report_link(bp);
964 }
965
966 bnx2_set_mac_link(bp);
967
968 return 0;
969}
970
971static int
972bnx2_reset_phy(struct bnx2 *bp)
973{
974 int i;
975 u32 reg;
976
977 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
978
979#define PHY_RESET_MAX_WAIT 100
980 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
981 udelay(10);
982
983 bnx2_read_phy(bp, MII_BMCR, &reg);
984 if (!(reg & BMCR_RESET)) {
985 udelay(20);
986 break;
987 }
988 }
989 if (i == PHY_RESET_MAX_WAIT) {
990 return -EBUSY;
991 }
992 return 0;
993}
994
995static u32
996bnx2_phy_get_pause_adv(struct bnx2 *bp)
997{
998 u32 adv = 0;
999
1000 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1002
1003 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004 adv = ADVERTISE_1000XPAUSE;
1005 }
1006 else {
1007 adv = ADVERTISE_PAUSE_CAP;
1008 }
1009 }
1010 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012 adv = ADVERTISE_1000XPSE_ASYM;
1013 }
1014 else {
1015 adv = ADVERTISE_PAUSE_ASYM;
1016 }
1017 }
1018 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1021 }
1022 else {
1023 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1024 }
1025 }
1026 return adv;
1027}
1028
1029static int
1030bnx2_setup_serdes_phy(struct bnx2 *bp)
1031{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001032 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001033 u32 new_adv = 0;
1034
1035 if (!(bp->autoneg & AUTONEG_SPEED)) {
1036 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001037 int force_link_down = 0;
1038
Michael Chan80be4432006-11-19 14:07:28 -08001039 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1041
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044 new_bmcr |= BMCR_SPEED1000;
1045 if (bp->req_line_speed == SPEED_2500) {
1046 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (!(up1 & BCM5708S_UP1_2G5)) {
1049 up1 |= BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1052 }
1053 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001054 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055 if (up1 & BCM5708S_UP1_2G5) {
1056 up1 &= ~BCM5708S_UP1_2G5;
1057 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058 force_link_down = 1;
1059 }
1060 }
1061
Michael Chanb6016b72005-05-26 13:03:09 -07001062 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001063 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001064 new_bmcr |= BMCR_FULLDPLX;
1065 }
1066 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001067 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001068 new_bmcr &= ~BMCR_FULLDPLX;
1069 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001070 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001071 /* Force a link down visible on the other side */
1072 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001073 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074 ~(ADVERTISE_1000XFULL |
1075 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001076 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077 BMCR_ANRESTART | BMCR_ANENABLE);
1078
1079 bp->link_up = 0;
1080 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001081 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001082 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001083 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001084 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001085 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1086 }
1087 return 0;
1088 }
1089
Michael Chan5b0c76a2005-11-04 08:45:49 -08001090 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092 up1 |= BCM5708S_UP1_2G5;
1093 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1094 }
1095
Michael Chanb6016b72005-05-26 13:03:09 -07001096 if (bp->advertising & ADVERTISED_1000baseT_Full)
1097 new_adv |= ADVERTISE_1000XFULL;
1098
1099 new_adv |= bnx2_phy_get_pause_adv(bp);
1100
1101 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1103
1104 bp->serdes_an_pending = 0;
1105 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106 /* Force a link down visible on the other side */
1107 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001108 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001109 spin_unlock_bh(&bp->phy_lock);
1110 msleep(20);
1111 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001112 }
1113
1114 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1116 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001117 /* Speed up link-up time when the link partner
1118 * does not autonegotiate which is very common
1119 * in blade servers. Some blade servers use
1120 * IPMI for kerboard input and it's important
1121 * to minimize link disruptions. Autoneg. involves
1122 * exchanging base pages plus 3 next pages and
1123 * normally completes in about 120 msec.
1124 */
1125 bp->current_interval = SERDES_AN_TIMEOUT;
1126 bp->serdes_an_pending = 1;
1127 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001128 }
1129
1130 return 0;
1131}
1132
1133#define ETHTOOL_ALL_FIBRE_SPEED \
1134 (ADVERTISED_1000baseT_Full)
1135
1136#define ETHTOOL_ALL_COPPER_SPEED \
1137 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1138 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1139 ADVERTISED_1000baseT_Full)
1140
1141#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001143
Michael Chanb6016b72005-05-26 13:03:09 -07001144#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1145
1146static int
1147bnx2_setup_copper_phy(struct bnx2 *bp)
1148{
1149 u32 bmcr;
1150 u32 new_bmcr;
1151
1152 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1153
1154 if (bp->autoneg & AUTONEG_SPEED) {
1155 u32 adv_reg, adv1000_reg;
1156 u32 new_adv_reg = 0;
1157 u32 new_adv1000_reg = 0;
1158
1159 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161 ADVERTISE_PAUSE_ASYM);
1162
1163 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164 adv1000_reg &= PHY_ALL_1000_SPEED;
1165
1166 if (bp->advertising & ADVERTISED_10baseT_Half)
1167 new_adv_reg |= ADVERTISE_10HALF;
1168 if (bp->advertising & ADVERTISED_10baseT_Full)
1169 new_adv_reg |= ADVERTISE_10FULL;
1170 if (bp->advertising & ADVERTISED_100baseT_Half)
1171 new_adv_reg |= ADVERTISE_100HALF;
1172 if (bp->advertising & ADVERTISED_100baseT_Full)
1173 new_adv_reg |= ADVERTISE_100FULL;
1174 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001176
Michael Chanb6016b72005-05-26 13:03:09 -07001177 new_adv_reg |= ADVERTISE_CSMA;
1178
1179 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1180
1181 if ((adv1000_reg != new_adv1000_reg) ||
1182 (adv_reg != new_adv_reg) ||
1183 ((bmcr & BMCR_ANENABLE) == 0)) {
1184
1185 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1188 BMCR_ANENABLE);
1189 }
1190 else if (bp->link_up) {
1191 /* Flow ctrl may have changed from auto to forced */
1192 /* or vice-versa. */
1193
1194 bnx2_resolve_flow_ctrl(bp);
1195 bnx2_set_mac_link(bp);
1196 }
1197 return 0;
1198 }
1199
1200 new_bmcr = 0;
1201 if (bp->req_line_speed == SPEED_100) {
1202 new_bmcr |= BMCR_SPEED100;
1203 }
1204 if (bp->req_duplex == DUPLEX_FULL) {
1205 new_bmcr |= BMCR_FULLDPLX;
1206 }
1207 if (new_bmcr != bmcr) {
1208 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001209
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001212
Michael Chanb6016b72005-05-26 13:03:09 -07001213 if (bmsr & BMSR_LSTATUS) {
1214 /* Force link down */
1215 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001216 spin_unlock_bh(&bp->phy_lock);
1217 msleep(50);
1218 spin_lock_bh(&bp->phy_lock);
1219
1220 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001222 }
1223
1224 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1225
1226 /* Normally, the new speed is setup after the link has
1227 * gone down and up again. In some cases, link will not go
1228 * down so we need to set up the new speed here.
1229 */
1230 if (bmsr & BMSR_LSTATUS) {
1231 bp->line_speed = bp->req_line_speed;
1232 bp->duplex = bp->req_duplex;
1233 bnx2_resolve_flow_ctrl(bp);
1234 bnx2_set_mac_link(bp);
1235 }
1236 }
1237 return 0;
1238}
1239
1240static int
1241bnx2_setup_phy(struct bnx2 *bp)
1242{
1243 if (bp->loopback == MAC_LOOPBACK)
1244 return 0;
1245
1246 if (bp->phy_flags & PHY_SERDES_FLAG) {
1247 return (bnx2_setup_serdes_phy(bp));
1248 }
1249 else {
1250 return (bnx2_setup_copper_phy(bp));
1251 }
1252}
1253
1254static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001255bnx2_init_5708s_phy(struct bnx2 *bp)
1256{
1257 u32 val;
1258
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1262
1263 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1266
1267 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1270
1271 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273 val |= BCM5708S_UP1_2G5;
1274 bnx2_write_phy(bp, BCM5708S_UP1, val);
1275 }
1276
1277 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001278 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001280 /* increase tx signal amplitude */
1281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282 BCM5708S_BLK_ADDR_TX_MISC);
1283 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1287 }
1288
Michael Chane3648b32005-11-04 08:51:21 -08001289 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001290 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1291
1292 if (val) {
1293 u32 is_backplane;
1294
Michael Chane3648b32005-11-04 08:51:21 -08001295 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001296 BNX2_SHARED_HW_CFG_CONFIG);
1297 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_TX_MISC);
1300 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302 BCM5708S_BLK_ADDR_DIG);
1303 }
1304 }
1305 return 0;
1306}
1307
1308static int
1309bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001310{
1311 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1312
Michael Chan59b47d82006-11-19 14:10:45 -08001313 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001315
1316 if (bp->dev->mtu > 1500) {
1317 u32 val;
1318
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1323
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1327 }
1328 else {
1329 u32 val;
1330
1331 bnx2_write_phy(bp, 0x18, 0x7);
1332 bnx2_read_phy(bp, 0x18, &val);
1333 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1334
1335 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336 bnx2_read_phy(bp, 0x1c, &val);
1337 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1338 }
1339
1340 return 0;
1341}
1342
1343static int
1344bnx2_init_copper_phy(struct bnx2 *bp)
1345{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001346 u32 val;
1347
Michael Chanb6016b72005-05-26 13:03:09 -07001348 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1349 bnx2_write_phy(bp, 0x18, 0x0c00);
1350 bnx2_write_phy(bp, 0x17, 0x000a);
1351 bnx2_write_phy(bp, 0x15, 0x310b);
1352 bnx2_write_phy(bp, 0x17, 0x201f);
1353 bnx2_write_phy(bp, 0x15, 0x9506);
1354 bnx2_write_phy(bp, 0x17, 0x401f);
1355 bnx2_write_phy(bp, 0x15, 0x14e2);
1356 bnx2_write_phy(bp, 0x18, 0x0400);
1357 }
1358
1359 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001360 /* Set extended packet length bit */
1361 bnx2_write_phy(bp, 0x18, 0x7);
1362 bnx2_read_phy(bp, 0x18, &val);
1363 bnx2_write_phy(bp, 0x18, val | 0x4000);
1364
1365 bnx2_read_phy(bp, 0x10, &val);
1366 bnx2_write_phy(bp, 0x10, val | 0x1);
1367 }
1368 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001369 bnx2_write_phy(bp, 0x18, 0x7);
1370 bnx2_read_phy(bp, 0x18, &val);
1371 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1372
1373 bnx2_read_phy(bp, 0x10, &val);
1374 bnx2_write_phy(bp, 0x10, val & ~0x1);
1375 }
1376
Michael Chan5b0c76a2005-11-04 08:45:49 -08001377 /* ethernet@wirespeed */
1378 bnx2_write_phy(bp, 0x18, 0x7007);
1379 bnx2_read_phy(bp, 0x18, &val);
1380 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001381 return 0;
1382}
1383
1384
1385static int
1386bnx2_init_phy(struct bnx2 *bp)
1387{
1388 u32 val;
1389 int rc = 0;
1390
1391 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1392 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1393
1394 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1395
1396 bnx2_reset_phy(bp);
1397
1398 bnx2_read_phy(bp, MII_PHYSID1, &val);
1399 bp->phy_id = val << 16;
1400 bnx2_read_phy(bp, MII_PHYSID2, &val);
1401 bp->phy_id |= val & 0xffff;
1402
1403 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001404 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1405 rc = bnx2_init_5706s_phy(bp);
1406 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1407 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001408 }
1409 else {
1410 rc = bnx2_init_copper_phy(bp);
1411 }
1412
1413 bnx2_setup_phy(bp);
1414
1415 return rc;
1416}
1417
1418static int
1419bnx2_set_mac_loopback(struct bnx2 *bp)
1420{
1421 u32 mac_mode;
1422
1423 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1424 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1425 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1426 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1427 bp->link_up = 1;
1428 return 0;
1429}
1430
Michael Chanbc5a0692006-01-23 16:13:22 -08001431static int bnx2_test_link(struct bnx2 *);
1432
1433static int
1434bnx2_set_phy_loopback(struct bnx2 *bp)
1435{
1436 u32 mac_mode;
1437 int rc, i;
1438
1439 spin_lock_bh(&bp->phy_lock);
1440 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1441 BMCR_SPEED1000);
1442 spin_unlock_bh(&bp->phy_lock);
1443 if (rc)
1444 return rc;
1445
1446 for (i = 0; i < 10; i++) {
1447 if (bnx2_test_link(bp) == 0)
1448 break;
Michael Chan80be4432006-11-19 14:07:28 -08001449 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001450 }
1451
1452 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1453 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1454 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001455 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001456
1457 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1458 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1459 bp->link_up = 1;
1460 return 0;
1461}
1462
Michael Chanb6016b72005-05-26 13:03:09 -07001463static int
Michael Chanb090ae22006-01-23 16:07:10 -08001464bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001465{
1466 int i;
1467 u32 val;
1468
Michael Chanb6016b72005-05-26 13:03:09 -07001469 bp->fw_wr_seq++;
1470 msg_data |= bp->fw_wr_seq;
1471
Michael Chane3648b32005-11-04 08:51:21 -08001472 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001473
1474 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001475 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1476 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001477
Michael Chane3648b32005-11-04 08:51:21 -08001478 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001479
1480 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1481 break;
1482 }
Michael Chanb090ae22006-01-23 16:07:10 -08001483 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1484 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001485
1486 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001487 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1488 if (!silent)
1489 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1490 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001491
1492 msg_data &= ~BNX2_DRV_MSG_CODE;
1493 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1494
Michael Chane3648b32005-11-04 08:51:21 -08001495 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001496
Michael Chanb6016b72005-05-26 13:03:09 -07001497 return -EBUSY;
1498 }
1499
Michael Chanb090ae22006-01-23 16:07:10 -08001500 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1501 return -EIO;
1502
Michael Chanb6016b72005-05-26 13:03:09 -07001503 return 0;
1504}
1505
Michael Chan59b47d82006-11-19 14:10:45 -08001506static int
1507bnx2_init_5709_context(struct bnx2 *bp)
1508{
1509 int i, ret = 0;
1510 u32 val;
1511
1512 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1513 val |= (BCM_PAGE_BITS - 8) << 16;
1514 REG_WR(bp, BNX2_CTX_COMMAND, val);
1515 for (i = 0; i < bp->ctx_pages; i++) {
1516 int j;
1517
1518 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1519 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1520 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1521 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1522 (u64) bp->ctx_blk_mapping[i] >> 32);
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1524 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1525 for (j = 0; j < 10; j++) {
1526
1527 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1528 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1529 break;
1530 udelay(5);
1531 }
1532 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1533 ret = -EBUSY;
1534 break;
1535 }
1536 }
1537 return ret;
1538}
1539
Michael Chanb6016b72005-05-26 13:03:09 -07001540static void
1541bnx2_init_context(struct bnx2 *bp)
1542{
1543 u32 vcid;
1544
1545 vcid = 96;
1546 while (vcid) {
1547 u32 vcid_addr, pcid_addr, offset;
1548
1549 vcid--;
1550
1551 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1552 u32 new_vcid;
1553
1554 vcid_addr = GET_PCID_ADDR(vcid);
1555 if (vcid & 0x8) {
1556 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1557 }
1558 else {
1559 new_vcid = vcid;
1560 }
1561 pcid_addr = GET_PCID_ADDR(new_vcid);
1562 }
1563 else {
1564 vcid_addr = GET_CID_ADDR(vcid);
1565 pcid_addr = vcid_addr;
1566 }
1567
1568 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1569 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1570
1571 /* Zero out the context. */
1572 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1573 CTX_WR(bp, 0x00, offset, 0);
1574 }
1575
1576 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1577 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1578 }
1579}
1580
1581static int
1582bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1583{
1584 u16 *good_mbuf;
1585 u32 good_mbuf_cnt;
1586 u32 val;
1587
1588 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1589 if (good_mbuf == NULL) {
1590 printk(KERN_ERR PFX "Failed to allocate memory in "
1591 "bnx2_alloc_bad_rbuf\n");
1592 return -ENOMEM;
1593 }
1594
1595 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1596 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1597
1598 good_mbuf_cnt = 0;
1599
1600 /* Allocate a bunch of mbufs and save the good ones in an array. */
1601 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1602 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1603 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1604
1605 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1606
1607 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1608
1609 /* The addresses with Bit 9 set are bad memory blocks. */
1610 if (!(val & (1 << 9))) {
1611 good_mbuf[good_mbuf_cnt] = (u16) val;
1612 good_mbuf_cnt++;
1613 }
1614
1615 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1616 }
1617
1618 /* Free the good ones back to the mbuf pool thus discarding
1619 * all the bad ones. */
1620 while (good_mbuf_cnt) {
1621 good_mbuf_cnt--;
1622
1623 val = good_mbuf[good_mbuf_cnt];
1624 val = (val << 9) | val | 1;
1625
1626 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1627 }
1628 kfree(good_mbuf);
1629 return 0;
1630}
1631
1632static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001633bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001634{
1635 u32 val;
1636 u8 *mac_addr = bp->dev->dev_addr;
1637
1638 val = (mac_addr[0] << 8) | mac_addr[1];
1639
1640 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1641
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001642 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001643 (mac_addr[4] << 8) | mac_addr[5];
1644
1645 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1646}
1647
1648static inline int
1649bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1650{
1651 struct sk_buff *skb;
1652 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1653 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001654 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001655 unsigned long align;
1656
Michael Chan932f3772006-08-15 01:39:36 -07001657 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001658 if (skb == NULL) {
1659 return -ENOMEM;
1660 }
1661
Michael Chan59b47d82006-11-19 14:10:45 -08001662 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1663 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001664
Michael Chanb6016b72005-05-26 13:03:09 -07001665 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1666 PCI_DMA_FROMDEVICE);
1667
1668 rx_buf->skb = skb;
1669 pci_unmap_addr_set(rx_buf, mapping, mapping);
1670
1671 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1672 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1673
1674 bp->rx_prod_bseq += bp->rx_buf_use_size;
1675
1676 return 0;
1677}
1678
1679static void
1680bnx2_phy_int(struct bnx2 *bp)
1681{
1682 u32 new_link_state, old_link_state;
1683
1684 new_link_state = bp->status_blk->status_attn_bits &
1685 STATUS_ATTN_BITS_LINK_STATE;
1686 old_link_state = bp->status_blk->status_attn_bits_ack &
1687 STATUS_ATTN_BITS_LINK_STATE;
1688 if (new_link_state != old_link_state) {
1689 if (new_link_state) {
1690 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1691 STATUS_ATTN_BITS_LINK_STATE);
1692 }
1693 else {
1694 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1695 STATUS_ATTN_BITS_LINK_STATE);
1696 }
1697 bnx2_set_link(bp);
1698 }
1699}
1700
1701static void
1702bnx2_tx_int(struct bnx2 *bp)
1703{
Michael Chanf4e418f2005-11-04 08:53:48 -08001704 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001705 u16 hw_cons, sw_cons, sw_ring_cons;
1706 int tx_free_bd = 0;
1707
Michael Chanf4e418f2005-11-04 08:53:48 -08001708 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001709 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1710 hw_cons++;
1711 }
1712 sw_cons = bp->tx_cons;
1713
1714 while (sw_cons != hw_cons) {
1715 struct sw_bd *tx_buf;
1716 struct sk_buff *skb;
1717 int i, last;
1718
1719 sw_ring_cons = TX_RING_IDX(sw_cons);
1720
1721 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1722 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001723#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001724 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001725 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001726 u16 last_idx, last_ring_idx;
1727
1728 last_idx = sw_cons +
1729 skb_shinfo(skb)->nr_frags + 1;
1730 last_ring_idx = sw_ring_cons +
1731 skb_shinfo(skb)->nr_frags + 1;
1732 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1733 last_idx++;
1734 }
1735 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1736 break;
1737 }
1738 }
1739#endif
1740 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1741 skb_headlen(skb), PCI_DMA_TODEVICE);
1742
1743 tx_buf->skb = NULL;
1744 last = skb_shinfo(skb)->nr_frags;
1745
1746 for (i = 0; i < last; i++) {
1747 sw_cons = NEXT_TX_BD(sw_cons);
1748
1749 pci_unmap_page(bp->pdev,
1750 pci_unmap_addr(
1751 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1752 mapping),
1753 skb_shinfo(skb)->frags[i].size,
1754 PCI_DMA_TODEVICE);
1755 }
1756
1757 sw_cons = NEXT_TX_BD(sw_cons);
1758
1759 tx_free_bd += last + 1;
1760
Michael Chan745720e2006-06-29 12:37:41 -07001761 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001762
Michael Chanf4e418f2005-11-04 08:53:48 -08001763 hw_cons = bp->hw_tx_cons =
1764 sblk->status_tx_quick_consumer_index0;
1765
Michael Chanb6016b72005-05-26 13:03:09 -07001766 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1767 hw_cons++;
1768 }
1769 }
1770
Michael Chane89bbf12005-08-25 15:36:58 -07001771 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001772 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1773 * before checking for netif_queue_stopped(). Without the
1774 * memory barrier, there is a small possibility that bnx2_start_xmit()
1775 * will miss it and cause the queue to be stopped forever.
1776 */
1777 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001778
Michael Chan2f8af122006-08-15 01:39:10 -07001779 if (unlikely(netif_queue_stopped(bp->dev)) &&
1780 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1781 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001782 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001783 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001784 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001785 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001786 }
Michael Chanb6016b72005-05-26 13:03:09 -07001787}
1788
1789static inline void
1790bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1791 u16 cons, u16 prod)
1792{
Michael Chan236b6392006-03-20 17:49:02 -08001793 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1794 struct rx_bd *cons_bd, *prod_bd;
1795
1796 cons_rx_buf = &bp->rx_buf_ring[cons];
1797 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001798
1799 pci_dma_sync_single_for_device(bp->pdev,
1800 pci_unmap_addr(cons_rx_buf, mapping),
1801 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1802
Michael Chan236b6392006-03-20 17:49:02 -08001803 bp->rx_prod_bseq += bp->rx_buf_use_size;
1804
1805 prod_rx_buf->skb = skb;
1806
1807 if (cons == prod)
1808 return;
1809
Michael Chanb6016b72005-05-26 13:03:09 -07001810 pci_unmap_addr_set(prod_rx_buf, mapping,
1811 pci_unmap_addr(cons_rx_buf, mapping));
1812
Michael Chan3fdfcc22006-03-20 17:49:49 -08001813 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1814 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001815 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1816 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001817}
1818
1819static int
1820bnx2_rx_int(struct bnx2 *bp, int budget)
1821{
Michael Chanf4e418f2005-11-04 08:53:48 -08001822 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001823 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1824 struct l2_fhdr *rx_hdr;
1825 int rx_pkt = 0;
1826
Michael Chanf4e418f2005-11-04 08:53:48 -08001827 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001828 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1829 hw_cons++;
1830 }
1831 sw_cons = bp->rx_cons;
1832 sw_prod = bp->rx_prod;
1833
1834 /* Memory barrier necessary as speculative reads of the rx
1835 * buffer can be ahead of the index in the status block
1836 */
1837 rmb();
1838 while (sw_cons != hw_cons) {
1839 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001840 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001841 struct sw_bd *rx_buf;
1842 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001843 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001844
1845 sw_ring_cons = RX_RING_IDX(sw_cons);
1846 sw_ring_prod = RX_RING_IDX(sw_prod);
1847
1848 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1849 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001850
1851 rx_buf->skb = NULL;
1852
1853 dma_addr = pci_unmap_addr(rx_buf, mapping);
1854
1855 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001856 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1857
1858 rx_hdr = (struct l2_fhdr *) skb->data;
1859 len = rx_hdr->l2_fhdr_pkt_len - 4;
1860
Michael Chanade2bfe2006-01-23 16:09:51 -08001861 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001862 (L2_FHDR_ERRORS_BAD_CRC |
1863 L2_FHDR_ERRORS_PHY_DECODE |
1864 L2_FHDR_ERRORS_ALIGNMENT |
1865 L2_FHDR_ERRORS_TOO_SHORT |
1866 L2_FHDR_ERRORS_GIANT_FRAME)) {
1867
1868 goto reuse_rx;
1869 }
1870
1871 /* Since we don't have a jumbo ring, copy small packets
1872 * if mtu > 1500
1873 */
1874 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1875 struct sk_buff *new_skb;
1876
Michael Chan932f3772006-08-15 01:39:36 -07001877 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001878 if (new_skb == NULL)
1879 goto reuse_rx;
1880
1881 /* aligned copy */
1882 memcpy(new_skb->data,
1883 skb->data + bp->rx_offset - 2,
1884 len + 2);
1885
1886 skb_reserve(new_skb, 2);
1887 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001888
1889 bnx2_reuse_rx_skb(bp, skb,
1890 sw_ring_cons, sw_ring_prod);
1891
1892 skb = new_skb;
1893 }
1894 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001895 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001896 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1897
1898 skb_reserve(skb, bp->rx_offset);
1899 skb_put(skb, len);
1900 }
1901 else {
1902reuse_rx:
1903 bnx2_reuse_rx_skb(bp, skb,
1904 sw_ring_cons, sw_ring_prod);
1905 goto next_rx;
1906 }
1907
1908 skb->protocol = eth_type_trans(skb, bp->dev);
1909
1910 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001911 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001912
Michael Chan745720e2006-06-29 12:37:41 -07001913 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001914 goto next_rx;
1915
1916 }
1917
Michael Chanb6016b72005-05-26 13:03:09 -07001918 skb->ip_summed = CHECKSUM_NONE;
1919 if (bp->rx_csum &&
1920 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1921 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1922
Michael Chanade2bfe2006-01-23 16:09:51 -08001923 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1924 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1926 }
1927
1928#ifdef BCM_VLAN
1929 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1930 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1931 rx_hdr->l2_fhdr_vlan_tag);
1932 }
1933 else
1934#endif
1935 netif_receive_skb(skb);
1936
1937 bp->dev->last_rx = jiffies;
1938 rx_pkt++;
1939
1940next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001941 sw_cons = NEXT_RX_BD(sw_cons);
1942 sw_prod = NEXT_RX_BD(sw_prod);
1943
1944 if ((rx_pkt == budget))
1945 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001946
1947 /* Refresh hw_cons to see if there is new work */
1948 if (sw_cons == hw_cons) {
1949 hw_cons = bp->hw_rx_cons =
1950 sblk->status_rx_quick_consumer_index0;
1951 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1952 hw_cons++;
1953 rmb();
1954 }
Michael Chanb6016b72005-05-26 13:03:09 -07001955 }
1956 bp->rx_cons = sw_cons;
1957 bp->rx_prod = sw_prod;
1958
1959 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1960
1961 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1962
1963 mmiowb();
1964
1965 return rx_pkt;
1966
1967}
1968
1969/* MSI ISR - The only difference between this and the INTx ISR
1970 * is that the MSI interrupt is always serviced.
1971 */
1972static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001973bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001974{
1975 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001976 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001977
Michael Chanc921e4c2005-09-08 13:15:32 -07001978 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001979 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1980 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1981 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1982
1983 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001984 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1985 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001986
Michael Chan73eef4c2005-08-25 15:39:15 -07001987 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001988
Michael Chan73eef4c2005-08-25 15:39:15 -07001989 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001990}
1991
1992static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001993bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001994{
1995 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001996 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001997
1998 /* When using INTx, it is possible for the interrupt to arrive
1999 * at the CPU before the status block posted prior to the
2000 * interrupt. Reading a register will flush the status block.
2001 * When using MSI, the MSI message will always complete after
2002 * the status block write.
2003 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002004 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002005 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2006 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002007 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002008
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2012
2013 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002014 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2015 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002016
Michael Chan73eef4c2005-08-25 15:39:15 -07002017 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002018
Michael Chan73eef4c2005-08-25 15:39:15 -07002019 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002020}
2021
Michael Chanf4e418f2005-11-04 08:53:48 -08002022static inline int
2023bnx2_has_work(struct bnx2 *bp)
2024{
2025 struct status_block *sblk = bp->status_blk;
2026
2027 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2028 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2029 return 1;
2030
2031 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2032 bp->link_up)
2033 return 1;
2034
2035 return 0;
2036}
2037
Michael Chanb6016b72005-05-26 13:03:09 -07002038static int
2039bnx2_poll(struct net_device *dev, int *budget)
2040{
Michael Chan972ec0d2006-01-23 16:12:43 -08002041 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002042
Michael Chanb6016b72005-05-26 13:03:09 -07002043 if ((bp->status_blk->status_attn_bits &
2044 STATUS_ATTN_BITS_LINK_STATE) !=
2045 (bp->status_blk->status_attn_bits_ack &
2046 STATUS_ATTN_BITS_LINK_STATE)) {
2047
Michael Chanc770a652005-08-25 15:38:39 -07002048 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002049 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002050 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002051
2052 /* This is needed to take care of transient status
2053 * during link changes.
2054 */
2055 REG_WR(bp, BNX2_HC_COMMAND,
2056 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2057 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002058 }
2059
Michael Chanf4e418f2005-11-04 08:53:48 -08002060 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002061 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002062
Michael Chanf4e418f2005-11-04 08:53:48 -08002063 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002064 int orig_budget = *budget;
2065 int work_done;
2066
2067 if (orig_budget > dev->quota)
2068 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002069
Michael Chanb6016b72005-05-26 13:03:09 -07002070 work_done = bnx2_rx_int(bp, orig_budget);
2071 *budget -= work_done;
2072 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002073 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002074
Michael Chanf4e418f2005-11-04 08:53:48 -08002075 bp->last_status_idx = bp->status_blk->status_idx;
2076 rmb();
2077
2078 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002079 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002080 if (likely(bp->flags & USING_MSI_FLAG)) {
2081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083 bp->last_status_idx);
2084 return 0;
2085 }
Michael Chanb6016b72005-05-26 13:03:09 -07002086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2089 bp->last_status_idx);
2090
2091 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2092 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2093 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002094 return 0;
2095 }
2096
2097 return 1;
2098}
2099
Herbert Xu932ff272006-06-09 12:20:56 -07002100/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002101 * from set_multicast.
2102 */
2103static void
2104bnx2_set_rx_mode(struct net_device *dev)
2105{
Michael Chan972ec0d2006-01-23 16:12:43 -08002106 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002107 u32 rx_mode, sort_mode;
2108 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002109
Michael Chanc770a652005-08-25 15:38:39 -07002110 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002111
2112 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2113 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2114 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2115#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002116 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002117 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002118#else
Michael Chane29054f2006-01-23 16:06:06 -08002119 if (!(bp->flags & ASF_ENABLE_FLAG))
2120 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002121#endif
2122 if (dev->flags & IFF_PROMISC) {
2123 /* Promiscuous mode. */
2124 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002125 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2126 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002127 }
2128 else if (dev->flags & IFF_ALLMULTI) {
2129 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2130 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2131 0xffffffff);
2132 }
2133 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2134 }
2135 else {
2136 /* Accept one or more multicast(s). */
2137 struct dev_mc_list *mclist;
2138 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2139 u32 regidx;
2140 u32 bit;
2141 u32 crc;
2142
2143 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2144
2145 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2146 i++, mclist = mclist->next) {
2147
2148 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2149 bit = crc & 0xff;
2150 regidx = (bit & 0xe0) >> 5;
2151 bit &= 0x1f;
2152 mc_filter[regidx] |= (1 << bit);
2153 }
2154
2155 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2156 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2157 mc_filter[i]);
2158 }
2159
2160 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2161 }
2162
2163 if (rx_mode != bp->rx_mode) {
2164 bp->rx_mode = rx_mode;
2165 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2166 }
2167
2168 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2169 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2170 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2171
Michael Chanc770a652005-08-25 15:38:39 -07002172 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002173}
2174
Michael Chanfba9fe92006-06-12 22:21:25 -07002175#define FW_BUF_SIZE 0x8000
2176
2177static int
2178bnx2_gunzip_init(struct bnx2 *bp)
2179{
2180 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2181 goto gunzip_nomem1;
2182
2183 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2184 goto gunzip_nomem2;
2185
2186 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2187 if (bp->strm->workspace == NULL)
2188 goto gunzip_nomem3;
2189
2190 return 0;
2191
2192gunzip_nomem3:
2193 kfree(bp->strm);
2194 bp->strm = NULL;
2195
2196gunzip_nomem2:
2197 vfree(bp->gunzip_buf);
2198 bp->gunzip_buf = NULL;
2199
2200gunzip_nomem1:
2201 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2202 "uncompression.\n", bp->dev->name);
2203 return -ENOMEM;
2204}
2205
2206static void
2207bnx2_gunzip_end(struct bnx2 *bp)
2208{
2209 kfree(bp->strm->workspace);
2210
2211 kfree(bp->strm);
2212 bp->strm = NULL;
2213
2214 if (bp->gunzip_buf) {
2215 vfree(bp->gunzip_buf);
2216 bp->gunzip_buf = NULL;
2217 }
2218}
2219
2220static int
2221bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2222{
2223 int n, rc;
2224
2225 /* check gzip header */
2226 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2227 return -EINVAL;
2228
2229 n = 10;
2230
2231#define FNAME 0x8
2232 if (zbuf[3] & FNAME)
2233 while ((zbuf[n++] != 0) && (n < len));
2234
2235 bp->strm->next_in = zbuf + n;
2236 bp->strm->avail_in = len - n;
2237 bp->strm->next_out = bp->gunzip_buf;
2238 bp->strm->avail_out = FW_BUF_SIZE;
2239
2240 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2241 if (rc != Z_OK)
2242 return rc;
2243
2244 rc = zlib_inflate(bp->strm, Z_FINISH);
2245
2246 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2247 *outbuf = bp->gunzip_buf;
2248
2249 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2250 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2251 bp->dev->name, bp->strm->msg);
2252
2253 zlib_inflateEnd(bp->strm);
2254
2255 if (rc == Z_STREAM_END)
2256 return 0;
2257
2258 return rc;
2259}
2260
Michael Chanb6016b72005-05-26 13:03:09 -07002261static void
2262load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2263 u32 rv2p_proc)
2264{
2265 int i;
2266 u32 val;
2267
2268
2269 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002270 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002271 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002272 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002273 rv2p_code++;
2274
2275 if (rv2p_proc == RV2P_PROC1) {
2276 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2277 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2278 }
2279 else {
2280 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2281 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2282 }
2283 }
2284
2285 /* Reset the processor, un-stall is done later. */
2286 if (rv2p_proc == RV2P_PROC1) {
2287 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2288 }
2289 else {
2290 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2291 }
2292}
2293
Michael Chanaf3ee512006-11-19 14:09:25 -08002294static int
Michael Chanb6016b72005-05-26 13:03:09 -07002295load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2296{
2297 u32 offset;
2298 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002299 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002300
2301 /* Halt the CPU. */
2302 val = REG_RD_IND(bp, cpu_reg->mode);
2303 val |= cpu_reg->mode_value_halt;
2304 REG_WR_IND(bp, cpu_reg->mode, val);
2305 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2306
2307 /* Load the Text area. */
2308 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002309 if (fw->gz_text) {
2310 u32 text_len;
2311 void *text;
2312
2313 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2314 &text_len);
2315 if (rc)
2316 return rc;
2317
2318 fw->text = text;
2319 }
2320 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002321 int j;
2322
2323 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002324 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002325 }
2326 }
2327
2328 /* Load the Data area. */
2329 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2330 if (fw->data) {
2331 int j;
2332
2333 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2334 REG_WR_IND(bp, offset, fw->data[j]);
2335 }
2336 }
2337
2338 /* Load the SBSS area. */
2339 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2340 if (fw->sbss) {
2341 int j;
2342
2343 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2344 REG_WR_IND(bp, offset, fw->sbss[j]);
2345 }
2346 }
2347
2348 /* Load the BSS area. */
2349 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2350 if (fw->bss) {
2351 int j;
2352
2353 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2354 REG_WR_IND(bp, offset, fw->bss[j]);
2355 }
2356 }
2357
2358 /* Load the Read-Only area. */
2359 offset = cpu_reg->spad_base +
2360 (fw->rodata_addr - cpu_reg->mips_view_base);
2361 if (fw->rodata) {
2362 int j;
2363
2364 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2365 REG_WR_IND(bp, offset, fw->rodata[j]);
2366 }
2367 }
2368
2369 /* Clear the pre-fetch instruction. */
2370 REG_WR_IND(bp, cpu_reg->inst, 0);
2371 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2372
2373 /* Start the CPU. */
2374 val = REG_RD_IND(bp, cpu_reg->mode);
2375 val &= ~cpu_reg->mode_value_halt;
2376 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2377 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002378
2379 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002380}
2381
Michael Chanfba9fe92006-06-12 22:21:25 -07002382static int
Michael Chanb6016b72005-05-26 13:03:09 -07002383bnx2_init_cpus(struct bnx2 *bp)
2384{
2385 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002386 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002387 int rc = 0;
2388 void *text;
2389 u32 text_len;
2390
2391 if ((rc = bnx2_gunzip_init(bp)) != 0)
2392 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002393
2394 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002395 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2396 &text_len);
2397 if (rc)
2398 goto init_cpu_err;
2399
2400 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2401
2402 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2403 &text_len);
2404 if (rc)
2405 goto init_cpu_err;
2406
2407 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002408
2409 /* Initialize the RX Processor. */
2410 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2411 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2412 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2413 cpu_reg.state = BNX2_RXP_CPU_STATE;
2414 cpu_reg.state_value_clear = 0xffffff;
2415 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2416 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2417 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2418 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2419 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2420 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2421 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002422
Michael Chand43584c2006-11-19 14:14:35 -08002423 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2424 fw = &bnx2_rxp_fw_09;
2425 else
2426 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002427
Michael Chanaf3ee512006-11-19 14:09:25 -08002428 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002429 if (rc)
2430 goto init_cpu_err;
2431
Michael Chanb6016b72005-05-26 13:03:09 -07002432 /* Initialize the TX Processor. */
2433 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2434 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2435 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2436 cpu_reg.state = BNX2_TXP_CPU_STATE;
2437 cpu_reg.state_value_clear = 0xffffff;
2438 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2439 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2440 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2441 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2442 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2443 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2444 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002445
Michael Chand43584c2006-11-19 14:14:35 -08002446 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2447 fw = &bnx2_txp_fw_09;
2448 else
2449 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002450
Michael Chanaf3ee512006-11-19 14:09:25 -08002451 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002452 if (rc)
2453 goto init_cpu_err;
2454
Michael Chanb6016b72005-05-26 13:03:09 -07002455 /* Initialize the TX Patch-up Processor. */
2456 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2457 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2458 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2459 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2460 cpu_reg.state_value_clear = 0xffffff;
2461 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2462 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2463 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2464 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2465 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2466 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2467 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002468
Michael Chand43584c2006-11-19 14:14:35 -08002469 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2470 fw = &bnx2_tpat_fw_09;
2471 else
2472 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002473
Michael Chanaf3ee512006-11-19 14:09:25 -08002474 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002475 if (rc)
2476 goto init_cpu_err;
2477
Michael Chanb6016b72005-05-26 13:03:09 -07002478 /* Initialize the Completion Processor. */
2479 cpu_reg.mode = BNX2_COM_CPU_MODE;
2480 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2481 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2482 cpu_reg.state = BNX2_COM_CPU_STATE;
2483 cpu_reg.state_value_clear = 0xffffff;
2484 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2485 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2486 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2487 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2488 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2489 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2490 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002491
Michael Chand43584c2006-11-19 14:14:35 -08002492 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2493 fw = &bnx2_com_fw_09;
2494 else
2495 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002496
Michael Chanaf3ee512006-11-19 14:09:25 -08002497 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002498 if (rc)
2499 goto init_cpu_err;
2500
Michael Chand43584c2006-11-19 14:14:35 -08002501 /* Initialize the Command Processor. */
2502 cpu_reg.mode = BNX2_CP_CPU_MODE;
2503 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2504 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2505 cpu_reg.state = BNX2_CP_CPU_STATE;
2506 cpu_reg.state_value_clear = 0xffffff;
2507 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2508 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2509 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2510 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2511 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2512 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2513 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002514
Michael Chand43584c2006-11-19 14:14:35 -08002515 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2516 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002517
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002518 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002519 if (rc)
2520 goto init_cpu_err;
2521 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002522init_cpu_err:
2523 bnx2_gunzip_end(bp);
2524 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002525}
2526
2527static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002528bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002529{
2530 u16 pmcsr;
2531
2532 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2533
2534 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002535 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002536 u32 val;
2537
2538 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2539 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2540 PCI_PM_CTRL_PME_STATUS);
2541
2542 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2543 /* delay required during transition out of D3hot */
2544 msleep(20);
2545
2546 val = REG_RD(bp, BNX2_EMAC_MODE);
2547 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2548 val &= ~BNX2_EMAC_MODE_MPKT;
2549 REG_WR(bp, BNX2_EMAC_MODE, val);
2550
2551 val = REG_RD(bp, BNX2_RPM_CONFIG);
2552 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2553 REG_WR(bp, BNX2_RPM_CONFIG, val);
2554 break;
2555 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002556 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002557 int i;
2558 u32 val, wol_msg;
2559
2560 if (bp->wol) {
2561 u32 advertising;
2562 u8 autoneg;
2563
2564 autoneg = bp->autoneg;
2565 advertising = bp->advertising;
2566
2567 bp->autoneg = AUTONEG_SPEED;
2568 bp->advertising = ADVERTISED_10baseT_Half |
2569 ADVERTISED_10baseT_Full |
2570 ADVERTISED_100baseT_Half |
2571 ADVERTISED_100baseT_Full |
2572 ADVERTISED_Autoneg;
2573
2574 bnx2_setup_copper_phy(bp);
2575
2576 bp->autoneg = autoneg;
2577 bp->advertising = advertising;
2578
2579 bnx2_set_mac_addr(bp);
2580
2581 val = REG_RD(bp, BNX2_EMAC_MODE);
2582
2583 /* Enable port mode. */
2584 val &= ~BNX2_EMAC_MODE_PORT;
2585 val |= BNX2_EMAC_MODE_PORT_MII |
2586 BNX2_EMAC_MODE_MPKT_RCVD |
2587 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002588 BNX2_EMAC_MODE_MPKT;
2589
2590 REG_WR(bp, BNX2_EMAC_MODE, val);
2591
2592 /* receive all multicast */
2593 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2594 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2595 0xffffffff);
2596 }
2597 REG_WR(bp, BNX2_EMAC_RX_MODE,
2598 BNX2_EMAC_RX_MODE_SORT_MODE);
2599
2600 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2601 BNX2_RPM_SORT_USER0_MC_EN;
2602 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2603 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2604 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2605 BNX2_RPM_SORT_USER0_ENA);
2606
2607 /* Need to enable EMAC and RPM for WOL. */
2608 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2609 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2610 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2611 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2612
2613 val = REG_RD(bp, BNX2_RPM_CONFIG);
2614 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2615 REG_WR(bp, BNX2_RPM_CONFIG, val);
2616
2617 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2618 }
2619 else {
2620 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2621 }
2622
Michael Chandda1e392006-01-23 16:08:14 -08002623 if (!(bp->flags & NO_WOL_FLAG))
2624 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002625
2626 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2627 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2628 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2629
2630 if (bp->wol)
2631 pmcsr |= 3;
2632 }
2633 else {
2634 pmcsr |= 3;
2635 }
2636 if (bp->wol) {
2637 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2638 }
2639 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2640 pmcsr);
2641
2642 /* No more memory access after this point until
2643 * device is brought back to D0.
2644 */
2645 udelay(50);
2646 break;
2647 }
2648 default:
2649 return -EINVAL;
2650 }
2651 return 0;
2652}
2653
2654static int
2655bnx2_acquire_nvram_lock(struct bnx2 *bp)
2656{
2657 u32 val;
2658 int j;
2659
2660 /* Request access to the flash interface. */
2661 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2662 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2663 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2664 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2665 break;
2666
2667 udelay(5);
2668 }
2669
2670 if (j >= NVRAM_TIMEOUT_COUNT)
2671 return -EBUSY;
2672
2673 return 0;
2674}
2675
2676static int
2677bnx2_release_nvram_lock(struct bnx2 *bp)
2678{
2679 int j;
2680 u32 val;
2681
2682 /* Relinquish nvram interface. */
2683 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2684
2685 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2686 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2687 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2688 break;
2689
2690 udelay(5);
2691 }
2692
2693 if (j >= NVRAM_TIMEOUT_COUNT)
2694 return -EBUSY;
2695
2696 return 0;
2697}
2698
2699
2700static int
2701bnx2_enable_nvram_write(struct bnx2 *bp)
2702{
2703 u32 val;
2704
2705 val = REG_RD(bp, BNX2_MISC_CFG);
2706 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2707
2708 if (!bp->flash_info->buffered) {
2709 int j;
2710
2711 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2712 REG_WR(bp, BNX2_NVM_COMMAND,
2713 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2714
2715 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2716 udelay(5);
2717
2718 val = REG_RD(bp, BNX2_NVM_COMMAND);
2719 if (val & BNX2_NVM_COMMAND_DONE)
2720 break;
2721 }
2722
2723 if (j >= NVRAM_TIMEOUT_COUNT)
2724 return -EBUSY;
2725 }
2726 return 0;
2727}
2728
2729static void
2730bnx2_disable_nvram_write(struct bnx2 *bp)
2731{
2732 u32 val;
2733
2734 val = REG_RD(bp, BNX2_MISC_CFG);
2735 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2736}
2737
2738
2739static void
2740bnx2_enable_nvram_access(struct bnx2 *bp)
2741{
2742 u32 val;
2743
2744 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2745 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002746 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002747 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2748}
2749
2750static void
2751bnx2_disable_nvram_access(struct bnx2 *bp)
2752{
2753 u32 val;
2754
2755 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2756 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002757 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002758 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2759 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2760}
2761
2762static int
2763bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2764{
2765 u32 cmd;
2766 int j;
2767
2768 if (bp->flash_info->buffered)
2769 /* Buffered flash, no erase needed */
2770 return 0;
2771
2772 /* Build an erase command */
2773 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2774 BNX2_NVM_COMMAND_DOIT;
2775
2776 /* Need to clear DONE bit separately. */
2777 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2778
2779 /* Address of the NVRAM to read from. */
2780 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2781
2782 /* Issue an erase command. */
2783 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2784
2785 /* Wait for completion. */
2786 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2787 u32 val;
2788
2789 udelay(5);
2790
2791 val = REG_RD(bp, BNX2_NVM_COMMAND);
2792 if (val & BNX2_NVM_COMMAND_DONE)
2793 break;
2794 }
2795
2796 if (j >= NVRAM_TIMEOUT_COUNT)
2797 return -EBUSY;
2798
2799 return 0;
2800}
2801
2802static int
2803bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2804{
2805 u32 cmd;
2806 int j;
2807
2808 /* Build the command word. */
2809 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2810
2811 /* Calculate an offset of a buffered flash. */
2812 if (bp->flash_info->buffered) {
2813 offset = ((offset / bp->flash_info->page_size) <<
2814 bp->flash_info->page_bits) +
2815 (offset % bp->flash_info->page_size);
2816 }
2817
2818 /* Need to clear DONE bit separately. */
2819 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2820
2821 /* Address of the NVRAM to read from. */
2822 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2823
2824 /* Issue a read command. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2826
2827 /* Wait for completion. */
2828 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2829 u32 val;
2830
2831 udelay(5);
2832
2833 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834 if (val & BNX2_NVM_COMMAND_DONE) {
2835 val = REG_RD(bp, BNX2_NVM_READ);
2836
2837 val = be32_to_cpu(val);
2838 memcpy(ret_val, &val, 4);
2839 break;
2840 }
2841 }
2842 if (j >= NVRAM_TIMEOUT_COUNT)
2843 return -EBUSY;
2844
2845 return 0;
2846}
2847
2848
2849static int
2850bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2851{
2852 u32 cmd, val32;
2853 int j;
2854
2855 /* Build the command word. */
2856 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2857
2858 /* Calculate an offset of a buffered flash. */
2859 if (bp->flash_info->buffered) {
2860 offset = ((offset / bp->flash_info->page_size) <<
2861 bp->flash_info->page_bits) +
2862 (offset % bp->flash_info->page_size);
2863 }
2864
2865 /* Need to clear DONE bit separately. */
2866 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2867
2868 memcpy(&val32, val, 4);
2869 val32 = cpu_to_be32(val32);
2870
2871 /* Write the data. */
2872 REG_WR(bp, BNX2_NVM_WRITE, val32);
2873
2874 /* Address of the NVRAM to write to. */
2875 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2876
2877 /* Issue the write command. */
2878 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2879
2880 /* Wait for completion. */
2881 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2882 udelay(5);
2883
2884 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2885 break;
2886 }
2887 if (j >= NVRAM_TIMEOUT_COUNT)
2888 return -EBUSY;
2889
2890 return 0;
2891}
2892
2893static int
2894bnx2_init_nvram(struct bnx2 *bp)
2895{
2896 u32 val;
2897 int j, entry_count, rc;
2898 struct flash_spec *flash;
2899
2900 /* Determine the selected interface. */
2901 val = REG_RD(bp, BNX2_NVM_CFG1);
2902
2903 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2904
2905 rc = 0;
2906 if (val & 0x40000000) {
2907
2908 /* Flash interface has been reconfigured */
2909 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002910 j++, flash++) {
2911 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2912 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002913 bp->flash_info = flash;
2914 break;
2915 }
2916 }
2917 }
2918 else {
Michael Chan37137702005-11-04 08:49:17 -08002919 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002920 /* Not yet been reconfigured */
2921
Michael Chan37137702005-11-04 08:49:17 -08002922 if (val & (1 << 23))
2923 mask = FLASH_BACKUP_STRAP_MASK;
2924 else
2925 mask = FLASH_STRAP_MASK;
2926
Michael Chanb6016b72005-05-26 13:03:09 -07002927 for (j = 0, flash = &flash_table[0]; j < entry_count;
2928 j++, flash++) {
2929
Michael Chan37137702005-11-04 08:49:17 -08002930 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002931 bp->flash_info = flash;
2932
2933 /* Request access to the flash interface. */
2934 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2935 return rc;
2936
2937 /* Enable access to flash interface */
2938 bnx2_enable_nvram_access(bp);
2939
2940 /* Reconfigure the flash interface */
2941 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2942 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2943 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2944 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2945
2946 /* Disable access to flash interface */
2947 bnx2_disable_nvram_access(bp);
2948 bnx2_release_nvram_lock(bp);
2949
2950 break;
2951 }
2952 }
2953 } /* if (val & 0x40000000) */
2954
2955 if (j == entry_count) {
2956 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002957 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002958 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002959 }
2960
Michael Chan1122db72006-01-23 16:11:42 -08002961 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2962 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2963 if (val)
2964 bp->flash_size = val;
2965 else
2966 bp->flash_size = bp->flash_info->total_size;
2967
Michael Chanb6016b72005-05-26 13:03:09 -07002968 return rc;
2969}
2970
2971static int
2972bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2973 int buf_size)
2974{
2975 int rc = 0;
2976 u32 cmd_flags, offset32, len32, extra;
2977
2978 if (buf_size == 0)
2979 return 0;
2980
2981 /* Request access to the flash interface. */
2982 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2983 return rc;
2984
2985 /* Enable access to flash interface */
2986 bnx2_enable_nvram_access(bp);
2987
2988 len32 = buf_size;
2989 offset32 = offset;
2990 extra = 0;
2991
2992 cmd_flags = 0;
2993
2994 if (offset32 & 3) {
2995 u8 buf[4];
2996 u32 pre_len;
2997
2998 offset32 &= ~3;
2999 pre_len = 4 - (offset & 3);
3000
3001 if (pre_len >= len32) {
3002 pre_len = len32;
3003 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3004 BNX2_NVM_COMMAND_LAST;
3005 }
3006 else {
3007 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3008 }
3009
3010 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3011
3012 if (rc)
3013 return rc;
3014
3015 memcpy(ret_buf, buf + (offset & 3), pre_len);
3016
3017 offset32 += 4;
3018 ret_buf += pre_len;
3019 len32 -= pre_len;
3020 }
3021 if (len32 & 3) {
3022 extra = 4 - (len32 & 3);
3023 len32 = (len32 + 4) & ~3;
3024 }
3025
3026 if (len32 == 4) {
3027 u8 buf[4];
3028
3029 if (cmd_flags)
3030 cmd_flags = BNX2_NVM_COMMAND_LAST;
3031 else
3032 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3033 BNX2_NVM_COMMAND_LAST;
3034
3035 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3036
3037 memcpy(ret_buf, buf, 4 - extra);
3038 }
3039 else if (len32 > 0) {
3040 u8 buf[4];
3041
3042 /* Read the first word. */
3043 if (cmd_flags)
3044 cmd_flags = 0;
3045 else
3046 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3047
3048 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3049
3050 /* Advance to the next dword. */
3051 offset32 += 4;
3052 ret_buf += 4;
3053 len32 -= 4;
3054
3055 while (len32 > 4 && rc == 0) {
3056 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3057
3058 /* Advance to the next dword. */
3059 offset32 += 4;
3060 ret_buf += 4;
3061 len32 -= 4;
3062 }
3063
3064 if (rc)
3065 return rc;
3066
3067 cmd_flags = BNX2_NVM_COMMAND_LAST;
3068 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3069
3070 memcpy(ret_buf, buf, 4 - extra);
3071 }
3072
3073 /* Disable access to flash interface */
3074 bnx2_disable_nvram_access(bp);
3075
3076 bnx2_release_nvram_lock(bp);
3077
3078 return rc;
3079}
3080
3081static int
3082bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3083 int buf_size)
3084{
3085 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003086 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003087 int rc = 0;
3088 int align_start, align_end;
3089
3090 buf = data_buf;
3091 offset32 = offset;
3092 len32 = buf_size;
3093 align_start = align_end = 0;
3094
3095 if ((align_start = (offset32 & 3))) {
3096 offset32 &= ~3;
Michael Chan6a13add2006-12-14 15:56:50 -08003097 len32 += (4 - align_start);
Michael Chanb6016b72005-05-26 13:03:09 -07003098 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3099 return rc;
3100 }
3101
3102 if (len32 & 3) {
3103 if ((len32 > 4) || !align_start) {
3104 align_end = 4 - (len32 & 3);
3105 len32 += align_end;
3106 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3107 end, 4))) {
3108 return rc;
3109 }
3110 }
3111 }
3112
3113 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003114 align_buf = kmalloc(len32, GFP_KERNEL);
3115 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003116 return -ENOMEM;
3117 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003118 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003119 }
3120 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003121 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003122 }
Michael Chane6be7632007-01-08 19:56:13 -08003123 memcpy(align_buf + align_start, data_buf, buf_size);
3124 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003125 }
3126
Michael Chanae181bc2006-05-22 16:39:20 -07003127 if (bp->flash_info->buffered == 0) {
3128 flash_buffer = kmalloc(264, GFP_KERNEL);
3129 if (flash_buffer == NULL) {
3130 rc = -ENOMEM;
3131 goto nvram_write_end;
3132 }
3133 }
3134
Michael Chanb6016b72005-05-26 13:03:09 -07003135 written = 0;
3136 while ((written < len32) && (rc == 0)) {
3137 u32 page_start, page_end, data_start, data_end;
3138 u32 addr, cmd_flags;
3139 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003140
3141 /* Find the page_start addr */
3142 page_start = offset32 + written;
3143 page_start -= (page_start % bp->flash_info->page_size);
3144 /* Find the page_end addr */
3145 page_end = page_start + bp->flash_info->page_size;
3146 /* Find the data_start addr */
3147 data_start = (written == 0) ? offset32 : page_start;
3148 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003149 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003150 (offset32 + len32) : page_end;
3151
3152 /* Request access to the flash interface. */
3153 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3154 goto nvram_write_end;
3155
3156 /* Enable access to flash interface */
3157 bnx2_enable_nvram_access(bp);
3158
3159 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3160 if (bp->flash_info->buffered == 0) {
3161 int j;
3162
3163 /* Read the whole page into the buffer
3164 * (non-buffer flash only) */
3165 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3166 if (j == (bp->flash_info->page_size - 4)) {
3167 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3168 }
3169 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003170 page_start + j,
3171 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003172 cmd_flags);
3173
3174 if (rc)
3175 goto nvram_write_end;
3176
3177 cmd_flags = 0;
3178 }
3179 }
3180
3181 /* Enable writes to flash interface (unlock write-protect) */
3182 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3183 goto nvram_write_end;
3184
3185 /* Erase the page */
3186 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3187 goto nvram_write_end;
3188
3189 /* Re-enable the write again for the actual write */
3190 bnx2_enable_nvram_write(bp);
3191
3192 /* Loop to write back the buffer data from page_start to
3193 * data_start */
3194 i = 0;
3195 if (bp->flash_info->buffered == 0) {
3196 for (addr = page_start; addr < data_start;
3197 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003198
Michael Chanb6016b72005-05-26 13:03:09 -07003199 rc = bnx2_nvram_write_dword(bp, addr,
3200 &flash_buffer[i], cmd_flags);
3201
3202 if (rc != 0)
3203 goto nvram_write_end;
3204
3205 cmd_flags = 0;
3206 }
3207 }
3208
3209 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003210 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003211 if ((addr == page_end - 4) ||
3212 ((bp->flash_info->buffered) &&
3213 (addr == data_end - 4))) {
3214
3215 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3216 }
3217 rc = bnx2_nvram_write_dword(bp, addr, buf,
3218 cmd_flags);
3219
3220 if (rc != 0)
3221 goto nvram_write_end;
3222
3223 cmd_flags = 0;
3224 buf += 4;
3225 }
3226
3227 /* Loop to write back the buffer data from data_end
3228 * to page_end */
3229 if (bp->flash_info->buffered == 0) {
3230 for (addr = data_end; addr < page_end;
3231 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003232
Michael Chanb6016b72005-05-26 13:03:09 -07003233 if (addr == page_end-4) {
3234 cmd_flags = BNX2_NVM_COMMAND_LAST;
3235 }
3236 rc = bnx2_nvram_write_dword(bp, addr,
3237 &flash_buffer[i], cmd_flags);
3238
3239 if (rc != 0)
3240 goto nvram_write_end;
3241
3242 cmd_flags = 0;
3243 }
3244 }
3245
3246 /* Disable writes to flash interface (lock write-protect) */
3247 bnx2_disable_nvram_write(bp);
3248
3249 /* Disable access to flash interface */
3250 bnx2_disable_nvram_access(bp);
3251 bnx2_release_nvram_lock(bp);
3252
3253 /* Increment written */
3254 written += data_end - data_start;
3255 }
3256
3257nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003258 kfree(flash_buffer);
3259 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003260 return rc;
3261}
3262
3263static int
3264bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3265{
3266 u32 val;
3267 int i, rc = 0;
3268
3269 /* Wait for the current PCI transaction to complete before
3270 * issuing a reset. */
3271 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3272 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3273 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3274 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3276 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3277 udelay(5);
3278
Michael Chanb090ae22006-01-23 16:07:10 -08003279 /* Wait for the firmware to tell us it is ok to issue a reset. */
3280 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3281
Michael Chanb6016b72005-05-26 13:03:09 -07003282 /* Deposit a driver reset signature so the firmware knows that
3283 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003285 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3286
Michael Chanb6016b72005-05-26 13:03:09 -07003287 /* Do a dummy read to force the chip to complete all current transaction
3288 * before we issue a reset. */
3289 val = REG_RD(bp, BNX2_MISC_ID);
3290
Michael Chan234754d2006-11-19 14:11:41 -08003291 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3292 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3293 REG_RD(bp, BNX2_MISC_COMMAND);
3294 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003295
Michael Chan234754d2006-11-19 14:11:41 -08003296 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3297 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003298
Michael Chan234754d2006-11-19 14:11:41 -08003299 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003300
Michael Chan234754d2006-11-19 14:11:41 -08003301 } else {
3302 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3303 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3304 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3305
3306 /* Chip reset. */
3307 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3308
3309 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3310 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3311 current->state = TASK_UNINTERRUPTIBLE;
3312 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003313 }
Michael Chanb6016b72005-05-26 13:03:09 -07003314
Michael Chan234754d2006-11-19 14:11:41 -08003315 /* Reset takes approximate 30 usec */
3316 for (i = 0; i < 10; i++) {
3317 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3318 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3319 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3320 break;
3321 udelay(10);
3322 }
3323
3324 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3325 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3326 printk(KERN_ERR PFX "Chip reset did not complete\n");
3327 return -EBUSY;
3328 }
Michael Chanb6016b72005-05-26 13:03:09 -07003329 }
3330
3331 /* Make sure byte swapping is properly configured. */
3332 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3333 if (val != 0x01020304) {
3334 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3335 return -ENODEV;
3336 }
3337
Michael Chanb6016b72005-05-26 13:03:09 -07003338 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003339 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3340 if (rc)
3341 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003342
3343 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3344 /* Adjust the voltage regular to two steps lower. The default
3345 * of this register is 0x0000000e. */
3346 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3347
3348 /* Remove bad rbuf memory from the free pool. */
3349 rc = bnx2_alloc_bad_rbuf(bp);
3350 }
3351
3352 return rc;
3353}
3354
3355static int
3356bnx2_init_chip(struct bnx2 *bp)
3357{
3358 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003359 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003360
3361 /* Make sure the interrupt is not active. */
3362 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3363
3364 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3365 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3366#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003367 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003368#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003369 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003370 DMA_READ_CHANS << 12 |
3371 DMA_WRITE_CHANS << 16;
3372
3373 val |= (0x2 << 20) | (1 << 11);
3374
Michael Chandda1e392006-01-23 16:08:14 -08003375 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003376 val |= (1 << 23);
3377
3378 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3379 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3380 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3381
3382 REG_WR(bp, BNX2_DMA_CONFIG, val);
3383
3384 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3385 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3386 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3387 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3388 }
3389
3390 if (bp->flags & PCIX_FLAG) {
3391 u16 val16;
3392
3393 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3394 &val16);
3395 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3396 val16 & ~PCI_X_CMD_ERO);
3397 }
3398
3399 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3400 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3401 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3402 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3403
3404 /* Initialize context mapping and zero out the quick contexts. The
3405 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003406 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3407 bnx2_init_5709_context(bp);
3408 else
3409 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003410
Michael Chanfba9fe92006-06-12 22:21:25 -07003411 if ((rc = bnx2_init_cpus(bp)) != 0)
3412 return rc;
3413
Michael Chanb6016b72005-05-26 13:03:09 -07003414 bnx2_init_nvram(bp);
3415
3416 bnx2_set_mac_addr(bp);
3417
3418 val = REG_RD(bp, BNX2_MQ_CONFIG);
3419 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3420 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3421 REG_WR(bp, BNX2_MQ_CONFIG, val);
3422
3423 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3424 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3425 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3426
3427 val = (BCM_PAGE_BITS - 8) << 24;
3428 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3429
3430 /* Configure page size. */
3431 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3432 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3433 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3434 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3435
3436 val = bp->mac_addr[0] +
3437 (bp->mac_addr[1] << 8) +
3438 (bp->mac_addr[2] << 16) +
3439 bp->mac_addr[3] +
3440 (bp->mac_addr[4] << 8) +
3441 (bp->mac_addr[5] << 16);
3442 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3443
3444 /* Program the MTU. Also include 4 bytes for CRC32. */
3445 val = bp->dev->mtu + ETH_HLEN + 4;
3446 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3447 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3448 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3449
3450 bp->last_status_idx = 0;
3451 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3452
3453 /* Set up how to generate a link change interrupt. */
3454 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3455
3456 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3457 (u64) bp->status_blk_mapping & 0xffffffff);
3458 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3459
3460 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3461 (u64) bp->stats_blk_mapping & 0xffffffff);
3462 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3463 (u64) bp->stats_blk_mapping >> 32);
3464
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003465 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003466 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3467
3468 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3469 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3470
3471 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3472 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3473
3474 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3475
3476 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3477
3478 REG_WR(bp, BNX2_HC_COM_TICKS,
3479 (bp->com_ticks_int << 16) | bp->com_ticks);
3480
3481 REG_WR(bp, BNX2_HC_CMD_TICKS,
3482 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3483
3484 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3485 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3486
3487 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3488 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3489 else {
3490 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3491 BNX2_HC_CONFIG_TX_TMR_MODE |
3492 BNX2_HC_CONFIG_COLLECT_STATS);
3493 }
3494
3495 /* Clear internal stats counters. */
3496 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3497
3498 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3499
Michael Chane29054f2006-01-23 16:06:06 -08003500 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3501 BNX2_PORT_FEATURE_ASF_ENABLED)
3502 bp->flags |= ASF_ENABLE_FLAG;
3503
Michael Chanb6016b72005-05-26 13:03:09 -07003504 /* Initialize the receive filter. */
3505 bnx2_set_rx_mode(bp->dev);
3506
Michael Chanb090ae22006-01-23 16:07:10 -08003507 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3508 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003509
3510 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3511 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3512
3513 udelay(20);
3514
Michael Chanbf5295b2006-03-23 01:11:56 -08003515 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3516
Michael Chanb090ae22006-01-23 16:07:10 -08003517 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003518}
3519
Michael Chan59b47d82006-11-19 14:10:45 -08003520static void
3521bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3522{
3523 u32 val, offset0, offset1, offset2, offset3;
3524
3525 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3526 offset0 = BNX2_L2CTX_TYPE_XI;
3527 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3528 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3529 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3530 } else {
3531 offset0 = BNX2_L2CTX_TYPE;
3532 offset1 = BNX2_L2CTX_CMD_TYPE;
3533 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3534 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3535 }
3536 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3537 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3538
3539 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3540 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3541
3542 val = (u64) bp->tx_desc_mapping >> 32;
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3544
3545 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3546 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3547}
Michael Chanb6016b72005-05-26 13:03:09 -07003548
3549static void
3550bnx2_init_tx_ring(struct bnx2 *bp)
3551{
3552 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003553 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003554
Michael Chan2f8af122006-08-15 01:39:10 -07003555 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3556
Michael Chanb6016b72005-05-26 13:03:09 -07003557 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003558
Michael Chanb6016b72005-05-26 13:03:09 -07003559 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3560 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3561
3562 bp->tx_prod = 0;
3563 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003564 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003565 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003566
Michael Chan59b47d82006-11-19 14:10:45 -08003567 cid = TX_CID;
3568 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3569 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003570
Michael Chan59b47d82006-11-19 14:10:45 -08003571 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003572}
3573
3574static void
3575bnx2_init_rx_ring(struct bnx2 *bp)
3576{
3577 struct rx_bd *rxbd;
3578 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003579 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003580 u32 val;
3581
3582 /* 8 for CRC and VLAN */
3583 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003584 /* hw alignment */
3585 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003586
3587 ring_prod = prod = bp->rx_prod = 0;
3588 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003589 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003590 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003591
Michael Chan13daffa2006-03-20 17:49:20 -08003592 for (i = 0; i < bp->rx_max_ring; i++) {
3593 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003594
Michael Chan13daffa2006-03-20 17:49:20 -08003595 rxbd = &bp->rx_desc_ring[i][0];
3596 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3597 rxbd->rx_bd_len = bp->rx_buf_use_size;
3598 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3599 }
3600 if (i == (bp->rx_max_ring - 1))
3601 j = 0;
3602 else
3603 j = i + 1;
3604 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3605 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3606 0xffffffff;
3607 }
Michael Chanb6016b72005-05-26 13:03:09 -07003608
3609 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3610 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3611 val |= 0x02 << 8;
3612 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3613
Michael Chan13daffa2006-03-20 17:49:20 -08003614 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003615 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3616
Michael Chan13daffa2006-03-20 17:49:20 -08003617 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003618 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3619
Michael Chan236b6392006-03-20 17:49:02 -08003620 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003621 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3622 break;
3623 }
3624 prod = NEXT_RX_BD(prod);
3625 ring_prod = RX_RING_IDX(prod);
3626 }
3627 bp->rx_prod = prod;
3628
3629 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3630
3631 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3632}
3633
3634static void
Michael Chan13daffa2006-03-20 17:49:20 -08003635bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3636{
3637 u32 num_rings, max;
3638
3639 bp->rx_ring_size = size;
3640 num_rings = 1;
3641 while (size > MAX_RX_DESC_CNT) {
3642 size -= MAX_RX_DESC_CNT;
3643 num_rings++;
3644 }
3645 /* round to next power of 2 */
3646 max = MAX_RX_RINGS;
3647 while ((max & num_rings) == 0)
3648 max >>= 1;
3649
3650 if (num_rings != max)
3651 max <<= 1;
3652
3653 bp->rx_max_ring = max;
3654 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3655}
3656
3657static void
Michael Chanb6016b72005-05-26 13:03:09 -07003658bnx2_free_tx_skbs(struct bnx2 *bp)
3659{
3660 int i;
3661
3662 if (bp->tx_buf_ring == NULL)
3663 return;
3664
3665 for (i = 0; i < TX_DESC_CNT; ) {
3666 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3667 struct sk_buff *skb = tx_buf->skb;
3668 int j, last;
3669
3670 if (skb == NULL) {
3671 i++;
3672 continue;
3673 }
3674
3675 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3676 skb_headlen(skb), PCI_DMA_TODEVICE);
3677
3678 tx_buf->skb = NULL;
3679
3680 last = skb_shinfo(skb)->nr_frags;
3681 for (j = 0; j < last; j++) {
3682 tx_buf = &bp->tx_buf_ring[i + j + 1];
3683 pci_unmap_page(bp->pdev,
3684 pci_unmap_addr(tx_buf, mapping),
3685 skb_shinfo(skb)->frags[j].size,
3686 PCI_DMA_TODEVICE);
3687 }
Michael Chan745720e2006-06-29 12:37:41 -07003688 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003689 i += j + 1;
3690 }
3691
3692}
3693
3694static void
3695bnx2_free_rx_skbs(struct bnx2 *bp)
3696{
3697 int i;
3698
3699 if (bp->rx_buf_ring == NULL)
3700 return;
3701
Michael Chan13daffa2006-03-20 17:49:20 -08003702 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003703 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3704 struct sk_buff *skb = rx_buf->skb;
3705
Michael Chan05d0f1c2005-11-04 08:53:48 -08003706 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003707 continue;
3708
3709 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3710 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3711
3712 rx_buf->skb = NULL;
3713
Michael Chan745720e2006-06-29 12:37:41 -07003714 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003715 }
3716}
3717
3718static void
3719bnx2_free_skbs(struct bnx2 *bp)
3720{
3721 bnx2_free_tx_skbs(bp);
3722 bnx2_free_rx_skbs(bp);
3723}
3724
3725static int
3726bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3727{
3728 int rc;
3729
3730 rc = bnx2_reset_chip(bp, reset_code);
3731 bnx2_free_skbs(bp);
3732 if (rc)
3733 return rc;
3734
Michael Chanfba9fe92006-06-12 22:21:25 -07003735 if ((rc = bnx2_init_chip(bp)) != 0)
3736 return rc;
3737
Michael Chanb6016b72005-05-26 13:03:09 -07003738 bnx2_init_tx_ring(bp);
3739 bnx2_init_rx_ring(bp);
3740 return 0;
3741}
3742
3743static int
3744bnx2_init_nic(struct bnx2 *bp)
3745{
3746 int rc;
3747
3748 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3749 return rc;
3750
Michael Chan80be4432006-11-19 14:07:28 -08003751 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003752 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003753 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003754 bnx2_set_link(bp);
3755 return 0;
3756}
3757
3758static int
3759bnx2_test_registers(struct bnx2 *bp)
3760{
3761 int ret;
3762 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003763 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003764 u16 offset;
3765 u16 flags;
3766 u32 rw_mask;
3767 u32 ro_mask;
3768 } reg_tbl[] = {
3769 { 0x006c, 0, 0x00000000, 0x0000003f },
3770 { 0x0090, 0, 0xffffffff, 0x00000000 },
3771 { 0x0094, 0, 0x00000000, 0x00000000 },
3772
3773 { 0x0404, 0, 0x00003f00, 0x00000000 },
3774 { 0x0418, 0, 0x00000000, 0xffffffff },
3775 { 0x041c, 0, 0x00000000, 0xffffffff },
3776 { 0x0420, 0, 0x00000000, 0x80ffffff },
3777 { 0x0424, 0, 0x00000000, 0x00000000 },
3778 { 0x0428, 0, 0x00000000, 0x00000001 },
3779 { 0x0450, 0, 0x00000000, 0x0000ffff },
3780 { 0x0454, 0, 0x00000000, 0xffffffff },
3781 { 0x0458, 0, 0x00000000, 0xffffffff },
3782
3783 { 0x0808, 0, 0x00000000, 0xffffffff },
3784 { 0x0854, 0, 0x00000000, 0xffffffff },
3785 { 0x0868, 0, 0x00000000, 0x77777777 },
3786 { 0x086c, 0, 0x00000000, 0x77777777 },
3787 { 0x0870, 0, 0x00000000, 0x77777777 },
3788 { 0x0874, 0, 0x00000000, 0x77777777 },
3789
3790 { 0x0c00, 0, 0x00000000, 0x00000001 },
3791 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3792 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003793
3794 { 0x1000, 0, 0x00000000, 0x00000001 },
3795 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003796
3797 { 0x1408, 0, 0x01c00800, 0x00000000 },
3798 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3799 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003800 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003801 { 0x14b0, 0, 0x00000002, 0x00000001 },
3802 { 0x14b8, 0, 0x00000000, 0x00000000 },
3803 { 0x14c0, 0, 0x00000000, 0x00000009 },
3804 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3805 { 0x14cc, 0, 0x00000000, 0x00000001 },
3806 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003807
3808 { 0x1800, 0, 0x00000000, 0x00000001 },
3809 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003810
3811 { 0x2800, 0, 0x00000000, 0x00000001 },
3812 { 0x2804, 0, 0x00000000, 0x00003f01 },
3813 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3814 { 0x2810, 0, 0xffff0000, 0x00000000 },
3815 { 0x2814, 0, 0xffff0000, 0x00000000 },
3816 { 0x2818, 0, 0xffff0000, 0x00000000 },
3817 { 0x281c, 0, 0xffff0000, 0x00000000 },
3818 { 0x2834, 0, 0xffffffff, 0x00000000 },
3819 { 0x2840, 0, 0x00000000, 0xffffffff },
3820 { 0x2844, 0, 0x00000000, 0xffffffff },
3821 { 0x2848, 0, 0xffffffff, 0x00000000 },
3822 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3823
3824 { 0x2c00, 0, 0x00000000, 0x00000011 },
3825 { 0x2c04, 0, 0x00000000, 0x00030007 },
3826
Michael Chanb6016b72005-05-26 13:03:09 -07003827 { 0x3c00, 0, 0x00000000, 0x00000001 },
3828 { 0x3c04, 0, 0x00000000, 0x00070000 },
3829 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3830 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3831 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3832 { 0x3c14, 0, 0x00000000, 0xffffffff },
3833 { 0x3c18, 0, 0x00000000, 0xffffffff },
3834 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3835 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003836
3837 { 0x5004, 0, 0x00000000, 0x0000007f },
3838 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3839 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3840
Michael Chanb6016b72005-05-26 13:03:09 -07003841 { 0x5c00, 0, 0x00000000, 0x00000001 },
3842 { 0x5c04, 0, 0x00000000, 0x0003000f },
3843 { 0x5c08, 0, 0x00000003, 0x00000000 },
3844 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3845 { 0x5c10, 0, 0x00000000, 0xffffffff },
3846 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3847 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3848 { 0x5c88, 0, 0x00000000, 0x00077373 },
3849 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3850
3851 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3852 { 0x680c, 0, 0xffffffff, 0x00000000 },
3853 { 0x6810, 0, 0xffffffff, 0x00000000 },
3854 { 0x6814, 0, 0xffffffff, 0x00000000 },
3855 { 0x6818, 0, 0xffffffff, 0x00000000 },
3856 { 0x681c, 0, 0xffffffff, 0x00000000 },
3857 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3858 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3859 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3860 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3861 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3862 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3863 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3864 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3865 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3866 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3867 { 0x684c, 0, 0xffffffff, 0x00000000 },
3868 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3869 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3870 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3871 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3872 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3873 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3874
3875 { 0xffff, 0, 0x00000000, 0x00000000 },
3876 };
3877
3878 ret = 0;
3879 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3880 u32 offset, rw_mask, ro_mask, save_val, val;
3881
3882 offset = (u32) reg_tbl[i].offset;
3883 rw_mask = reg_tbl[i].rw_mask;
3884 ro_mask = reg_tbl[i].ro_mask;
3885
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003886 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003887
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003888 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003889
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003890 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003891 if ((val & rw_mask) != 0) {
3892 goto reg_test_err;
3893 }
3894
3895 if ((val & ro_mask) != (save_val & ro_mask)) {
3896 goto reg_test_err;
3897 }
3898
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003899 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003900
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003901 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003902 if ((val & rw_mask) != rw_mask) {
3903 goto reg_test_err;
3904 }
3905
3906 if ((val & ro_mask) != (save_val & ro_mask)) {
3907 goto reg_test_err;
3908 }
3909
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003910 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003911 continue;
3912
3913reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003914 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003915 ret = -ENODEV;
3916 break;
3917 }
3918 return ret;
3919}
3920
3921static int
3922bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3923{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003924 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003925 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3926 int i;
3927
3928 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3929 u32 offset;
3930
3931 for (offset = 0; offset < size; offset += 4) {
3932
3933 REG_WR_IND(bp, start + offset, test_pattern[i]);
3934
3935 if (REG_RD_IND(bp, start + offset) !=
3936 test_pattern[i]) {
3937 return -ENODEV;
3938 }
3939 }
3940 }
3941 return 0;
3942}
3943
3944static int
3945bnx2_test_memory(struct bnx2 *bp)
3946{
3947 int ret = 0;
3948 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003949 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003950 u32 offset;
3951 u32 len;
3952 } mem_tbl[] = {
3953 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003954 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003955 { 0xe0000, 0x4000 },
3956 { 0x120000, 0x4000 },
3957 { 0x1a0000, 0x4000 },
3958 { 0x160000, 0x4000 },
3959 { 0xffffffff, 0 },
3960 };
3961
3962 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3963 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3964 mem_tbl[i].len)) != 0) {
3965 return ret;
3966 }
3967 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003968
Michael Chanb6016b72005-05-26 13:03:09 -07003969 return ret;
3970}
3971
Michael Chanbc5a0692006-01-23 16:13:22 -08003972#define BNX2_MAC_LOOPBACK 0
3973#define BNX2_PHY_LOOPBACK 1
3974
Michael Chanb6016b72005-05-26 13:03:09 -07003975static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003976bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003977{
3978 unsigned int pkt_size, num_pkts, i;
3979 struct sk_buff *skb, *rx_skb;
3980 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003981 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003982 dma_addr_t map;
3983 struct tx_bd *txbd;
3984 struct sw_bd *rx_buf;
3985 struct l2_fhdr *rx_hdr;
3986 int ret = -ENODEV;
3987
Michael Chanbc5a0692006-01-23 16:13:22 -08003988 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3989 bp->loopback = MAC_LOOPBACK;
3990 bnx2_set_mac_loopback(bp);
3991 }
3992 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003993 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003994 bnx2_set_phy_loopback(bp);
3995 }
3996 else
3997 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003998
3999 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004000 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b2005-11-10 12:58:00 -08004001 if (!skb)
4002 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004003 packet = skb_put(skb, pkt_size);
Michael Chan6634292b2006-12-14 15:57:04 -08004004 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004005 memset(packet + 6, 0x0, 8);
4006 for (i = 14; i < pkt_size; i++)
4007 packet[i] = (unsigned char) (i & 0xff);
4008
4009 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4010 PCI_DMA_TODEVICE);
4011
Michael Chanbf5295b2006-03-23 01:11:56 -08004012 REG_WR(bp, BNX2_HC_COMMAND,
4013 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4014
Michael Chanb6016b72005-05-26 13:03:09 -07004015 REG_RD(bp, BNX2_HC_COMMAND);
4016
4017 udelay(5);
4018 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4019
Michael Chanb6016b72005-05-26 13:03:09 -07004020 num_pkts = 0;
4021
Michael Chanbc5a0692006-01-23 16:13:22 -08004022 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004023
4024 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4025 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4026 txbd->tx_bd_mss_nbytes = pkt_size;
4027 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4028
4029 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004030 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4031 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004032
Michael Chan234754d2006-11-19 14:11:41 -08004033 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4034 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004035
4036 udelay(100);
4037
Michael Chanbf5295b2006-03-23 01:11:56 -08004038 REG_WR(bp, BNX2_HC_COMMAND,
4039 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4040
Michael Chanb6016b72005-05-26 13:03:09 -07004041 REG_RD(bp, BNX2_HC_COMMAND);
4042
4043 udelay(5);
4044
4045 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004046 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004047
Michael Chanbc5a0692006-01-23 16:13:22 -08004048 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004049 goto loopback_test_done;
4050 }
4051
4052 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4053 if (rx_idx != rx_start_idx + num_pkts) {
4054 goto loopback_test_done;
4055 }
4056
4057 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4058 rx_skb = rx_buf->skb;
4059
4060 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4061 skb_reserve(rx_skb, bp->rx_offset);
4062
4063 pci_dma_sync_single_for_cpu(bp->pdev,
4064 pci_unmap_addr(rx_buf, mapping),
4065 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4066
Michael Chanade2bfe2006-01-23 16:09:51 -08004067 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004068 (L2_FHDR_ERRORS_BAD_CRC |
4069 L2_FHDR_ERRORS_PHY_DECODE |
4070 L2_FHDR_ERRORS_ALIGNMENT |
4071 L2_FHDR_ERRORS_TOO_SHORT |
4072 L2_FHDR_ERRORS_GIANT_FRAME)) {
4073
4074 goto loopback_test_done;
4075 }
4076
4077 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4078 goto loopback_test_done;
4079 }
4080
4081 for (i = 14; i < pkt_size; i++) {
4082 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4083 goto loopback_test_done;
4084 }
4085 }
4086
4087 ret = 0;
4088
4089loopback_test_done:
4090 bp->loopback = 0;
4091 return ret;
4092}
4093
Michael Chanbc5a0692006-01-23 16:13:22 -08004094#define BNX2_MAC_LOOPBACK_FAILED 1
4095#define BNX2_PHY_LOOPBACK_FAILED 2
4096#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4097 BNX2_PHY_LOOPBACK_FAILED)
4098
4099static int
4100bnx2_test_loopback(struct bnx2 *bp)
4101{
4102 int rc = 0;
4103
4104 if (!netif_running(bp->dev))
4105 return BNX2_LOOPBACK_FAILED;
4106
4107 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4108 spin_lock_bh(&bp->phy_lock);
4109 bnx2_init_phy(bp);
4110 spin_unlock_bh(&bp->phy_lock);
4111 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4112 rc |= BNX2_MAC_LOOPBACK_FAILED;
4113 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4114 rc |= BNX2_PHY_LOOPBACK_FAILED;
4115 return rc;
4116}
4117
Michael Chanb6016b72005-05-26 13:03:09 -07004118#define NVRAM_SIZE 0x200
4119#define CRC32_RESIDUAL 0xdebb20e3
4120
4121static int
4122bnx2_test_nvram(struct bnx2 *bp)
4123{
4124 u32 buf[NVRAM_SIZE / 4];
4125 u8 *data = (u8 *) buf;
4126 int rc = 0;
4127 u32 magic, csum;
4128
4129 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4130 goto test_nvram_done;
4131
4132 magic = be32_to_cpu(buf[0]);
4133 if (magic != 0x669955aa) {
4134 rc = -ENODEV;
4135 goto test_nvram_done;
4136 }
4137
4138 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4139 goto test_nvram_done;
4140
4141 csum = ether_crc_le(0x100, data);
4142 if (csum != CRC32_RESIDUAL) {
4143 rc = -ENODEV;
4144 goto test_nvram_done;
4145 }
4146
4147 csum = ether_crc_le(0x100, data + 0x100);
4148 if (csum != CRC32_RESIDUAL) {
4149 rc = -ENODEV;
4150 }
4151
4152test_nvram_done:
4153 return rc;
4154}
4155
4156static int
4157bnx2_test_link(struct bnx2 *bp)
4158{
4159 u32 bmsr;
4160
Michael Chanc770a652005-08-25 15:38:39 -07004161 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004162 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004164 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004165
Michael Chanb6016b72005-05-26 13:03:09 -07004166 if (bmsr & BMSR_LSTATUS) {
4167 return 0;
4168 }
4169 return -ENODEV;
4170}
4171
4172static int
4173bnx2_test_intr(struct bnx2 *bp)
4174{
4175 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004176 u16 status_idx;
4177
4178 if (!netif_running(bp->dev))
4179 return -ENODEV;
4180
4181 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4182
4183 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004184 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004185 REG_RD(bp, BNX2_HC_COMMAND);
4186
4187 for (i = 0; i < 10; i++) {
4188 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4189 status_idx) {
4190
4191 break;
4192 }
4193
4194 msleep_interruptible(10);
4195 }
4196 if (i < 10)
4197 return 0;
4198
4199 return -ENODEV;
4200}
4201
4202static void
Michael Chan48b01e22006-11-19 14:08:00 -08004203bnx2_5706_serdes_timer(struct bnx2 *bp)
4204{
4205 spin_lock(&bp->phy_lock);
4206 if (bp->serdes_an_pending)
4207 bp->serdes_an_pending--;
4208 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4209 u32 bmcr;
4210
4211 bp->current_interval = bp->timer_interval;
4212
4213 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4214
4215 if (bmcr & BMCR_ANENABLE) {
4216 u32 phy1, phy2;
4217
4218 bnx2_write_phy(bp, 0x1c, 0x7c00);
4219 bnx2_read_phy(bp, 0x1c, &phy1);
4220
4221 bnx2_write_phy(bp, 0x17, 0x0f01);
4222 bnx2_read_phy(bp, 0x15, &phy2);
4223 bnx2_write_phy(bp, 0x17, 0x0f01);
4224 bnx2_read_phy(bp, 0x15, &phy2);
4225
4226 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4227 !(phy2 & 0x20)) { /* no CONFIG */
4228
4229 bmcr &= ~BMCR_ANENABLE;
4230 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4231 bnx2_write_phy(bp, MII_BMCR, bmcr);
4232 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4233 }
4234 }
4235 }
4236 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4237 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4238 u32 phy2;
4239
4240 bnx2_write_phy(bp, 0x17, 0x0f01);
4241 bnx2_read_phy(bp, 0x15, &phy2);
4242 if (phy2 & 0x20) {
4243 u32 bmcr;
4244
4245 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4246 bmcr |= BMCR_ANENABLE;
4247 bnx2_write_phy(bp, MII_BMCR, bmcr);
4248
4249 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4250 }
4251 } else
4252 bp->current_interval = bp->timer_interval;
4253
4254 spin_unlock(&bp->phy_lock);
4255}
4256
4257static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004258bnx2_5708_serdes_timer(struct bnx2 *bp)
4259{
4260 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4261 bp->serdes_an_pending = 0;
4262 return;
4263 }
4264
4265 spin_lock(&bp->phy_lock);
4266 if (bp->serdes_an_pending)
4267 bp->serdes_an_pending--;
4268 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4269 u32 bmcr;
4270
4271 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4272
4273 if (bmcr & BMCR_ANENABLE) {
4274 bmcr &= ~BMCR_ANENABLE;
4275 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4276 bnx2_write_phy(bp, MII_BMCR, bmcr);
4277 bp->current_interval = SERDES_FORCED_TIMEOUT;
4278 } else {
4279 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4280 bmcr |= BMCR_ANENABLE;
4281 bnx2_write_phy(bp, MII_BMCR, bmcr);
4282 bp->serdes_an_pending = 2;
4283 bp->current_interval = bp->timer_interval;
4284 }
4285
4286 } else
4287 bp->current_interval = bp->timer_interval;
4288
4289 spin_unlock(&bp->phy_lock);
4290}
4291
4292static void
Michael Chanb6016b72005-05-26 13:03:09 -07004293bnx2_timer(unsigned long data)
4294{
4295 struct bnx2 *bp = (struct bnx2 *) data;
4296 u32 msg;
4297
Michael Chancd339a02005-08-25 15:35:24 -07004298 if (!netif_running(bp->dev))
4299 return;
4300
Michael Chanb6016b72005-05-26 13:03:09 -07004301 if (atomic_read(&bp->intr_sem) != 0)
4302 goto bnx2_restart_timer;
4303
4304 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004305 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004306
Michael Chancea94db2006-06-12 22:16:13 -07004307 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4308
Michael Chanf8dd0642006-11-19 14:08:29 -08004309 if (bp->phy_flags & PHY_SERDES_FLAG) {
4310 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4311 bnx2_5706_serdes_timer(bp);
4312 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4313 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004314 }
4315
4316bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004317 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004318}
4319
4320/* Called with rtnl_lock */
4321static int
4322bnx2_open(struct net_device *dev)
4323{
Michael Chan972ec0d2006-01-23 16:12:43 -08004324 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004325 int rc;
4326
Pavel Machek829ca9a2005-09-03 15:56:56 -07004327 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004328 bnx2_disable_int(bp);
4329
4330 rc = bnx2_alloc_mem(bp);
4331 if (rc)
4332 return rc;
4333
4334 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4335 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4336 !disable_msi) {
4337
4338 if (pci_enable_msi(bp->pdev) == 0) {
4339 bp->flags |= USING_MSI_FLAG;
4340 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4341 dev);
4342 }
4343 else {
4344 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004345 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004346 }
4347 }
4348 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004349 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004350 dev->name, dev);
4351 }
4352 if (rc) {
4353 bnx2_free_mem(bp);
4354 return rc;
4355 }
4356
4357 rc = bnx2_init_nic(bp);
4358
4359 if (rc) {
4360 free_irq(bp->pdev->irq, dev);
4361 if (bp->flags & USING_MSI_FLAG) {
4362 pci_disable_msi(bp->pdev);
4363 bp->flags &= ~USING_MSI_FLAG;
4364 }
4365 bnx2_free_skbs(bp);
4366 bnx2_free_mem(bp);
4367 return rc;
4368 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004369
Michael Chancd339a02005-08-25 15:35:24 -07004370 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004371
4372 atomic_set(&bp->intr_sem, 0);
4373
4374 bnx2_enable_int(bp);
4375
4376 if (bp->flags & USING_MSI_FLAG) {
4377 /* Test MSI to make sure it is working
4378 * If MSI test fails, go back to INTx mode
4379 */
4380 if (bnx2_test_intr(bp) != 0) {
4381 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4382 " using MSI, switching to INTx mode. Please"
4383 " report this failure to the PCI maintainer"
4384 " and include system chipset information.\n",
4385 bp->dev->name);
4386
4387 bnx2_disable_int(bp);
4388 free_irq(bp->pdev->irq, dev);
4389 pci_disable_msi(bp->pdev);
4390 bp->flags &= ~USING_MSI_FLAG;
4391
4392 rc = bnx2_init_nic(bp);
4393
4394 if (!rc) {
4395 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004396 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004397 }
4398 if (rc) {
4399 bnx2_free_skbs(bp);
4400 bnx2_free_mem(bp);
4401 del_timer_sync(&bp->timer);
4402 return rc;
4403 }
4404 bnx2_enable_int(bp);
4405 }
4406 }
4407 if (bp->flags & USING_MSI_FLAG) {
4408 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4409 }
4410
4411 netif_start_queue(dev);
4412
4413 return 0;
4414}
4415
4416static void
David Howellsc4028952006-11-22 14:57:56 +00004417bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004418{
David Howellsc4028952006-11-22 14:57:56 +00004419 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004420
Michael Chanafdc08b2005-08-25 15:34:29 -07004421 if (!netif_running(bp->dev))
4422 return;
4423
4424 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004425 bnx2_netif_stop(bp);
4426
4427 bnx2_init_nic(bp);
4428
4429 atomic_set(&bp->intr_sem, 1);
4430 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004431 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004432}
4433
4434static void
4435bnx2_tx_timeout(struct net_device *dev)
4436{
Michael Chan972ec0d2006-01-23 16:12:43 -08004437 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004438
4439 /* This allows the netif to be shutdown gracefully before resetting */
4440 schedule_work(&bp->reset_task);
4441}
4442
4443#ifdef BCM_VLAN
4444/* Called with rtnl_lock */
4445static void
4446bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4447{
Michael Chan972ec0d2006-01-23 16:12:43 -08004448 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004449
4450 bnx2_netif_stop(bp);
4451
4452 bp->vlgrp = vlgrp;
4453 bnx2_set_rx_mode(dev);
4454
4455 bnx2_netif_start(bp);
4456}
4457
4458/* Called with rtnl_lock */
4459static void
4460bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4461{
Michael Chan972ec0d2006-01-23 16:12:43 -08004462 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004463
4464 bnx2_netif_stop(bp);
4465
4466 if (bp->vlgrp)
4467 bp->vlgrp->vlan_devices[vid] = NULL;
4468 bnx2_set_rx_mode(dev);
4469
4470 bnx2_netif_start(bp);
4471}
4472#endif
4473
Herbert Xu932ff272006-06-09 12:20:56 -07004474/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004475 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4476 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004477 */
4478static int
4479bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4480{
Michael Chan972ec0d2006-01-23 16:12:43 -08004481 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004482 dma_addr_t mapping;
4483 struct tx_bd *txbd;
4484 struct sw_bd *tx_buf;
4485 u32 len, vlan_tag_flags, last_frag, mss;
4486 u16 prod, ring_prod;
4487 int i;
4488
Michael Chane89bbf12005-08-25 15:36:58 -07004489 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004490 netif_stop_queue(dev);
4491 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4492 dev->name);
4493
4494 return NETDEV_TX_BUSY;
4495 }
4496 len = skb_headlen(skb);
4497 prod = bp->tx_prod;
4498 ring_prod = TX_RING_IDX(prod);
4499
4500 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004501 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004502 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4503 }
4504
4505 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4506 vlan_tag_flags |=
4507 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4508 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004509#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004510 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004511 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4512 u32 tcp_opt_len, ip_tcp_len;
4513
4514 if (skb_header_cloned(skb) &&
4515 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4516 dev_kfree_skb(skb);
4517 return NETDEV_TX_OK;
4518 }
4519
4520 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4521 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4522
4523 tcp_opt_len = 0;
4524 if (skb->h.th->doff > 5) {
4525 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4526 }
4527 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4528
4529 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004530 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004531 skb->h.th->check =
4532 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4533 skb->nh.iph->daddr,
4534 0, IPPROTO_TCP, 0);
4535
4536 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4537 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4538 (tcp_opt_len >> 2)) << 8;
4539 }
4540 }
4541 else
4542#endif
4543 {
4544 mss = 0;
4545 }
4546
4547 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004548
Michael Chanb6016b72005-05-26 13:03:09 -07004549 tx_buf = &bp->tx_buf_ring[ring_prod];
4550 tx_buf->skb = skb;
4551 pci_unmap_addr_set(tx_buf, mapping, mapping);
4552
4553 txbd = &bp->tx_desc_ring[ring_prod];
4554
4555 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4556 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4557 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4558 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4559
4560 last_frag = skb_shinfo(skb)->nr_frags;
4561
4562 for (i = 0; i < last_frag; i++) {
4563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4564
4565 prod = NEXT_TX_BD(prod);
4566 ring_prod = TX_RING_IDX(prod);
4567 txbd = &bp->tx_desc_ring[ring_prod];
4568
4569 len = frag->size;
4570 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4571 len, PCI_DMA_TODEVICE);
4572 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4573 mapping, mapping);
4574
4575 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4576 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4577 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4578 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4579
4580 }
4581 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4582
4583 prod = NEXT_TX_BD(prod);
4584 bp->tx_prod_bseq += skb->len;
4585
Michael Chan234754d2006-11-19 14:11:41 -08004586 REG_WR16(bp, bp->tx_bidx_addr, prod);
4587 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004588
4589 mmiowb();
4590
4591 bp->tx_prod = prod;
4592 dev->trans_start = jiffies;
4593
Michael Chane89bbf12005-08-25 15:36:58 -07004594 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004595 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004596 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004597 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004598 }
4599
4600 return NETDEV_TX_OK;
4601}
4602
4603/* Called with rtnl_lock */
4604static int
4605bnx2_close(struct net_device *dev)
4606{
Michael Chan972ec0d2006-01-23 16:12:43 -08004607 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004608 u32 reset_code;
4609
Michael Chanafdc08b2005-08-25 15:34:29 -07004610 /* Calling flush_scheduled_work() may deadlock because
4611 * linkwatch_event() may be on the workqueue and it will try to get
4612 * the rtnl_lock which we are holding.
4613 */
4614 while (bp->in_reset_task)
4615 msleep(1);
4616
Michael Chanb6016b72005-05-26 13:03:09 -07004617 bnx2_netif_stop(bp);
4618 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004619 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004620 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004621 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004622 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4623 else
4624 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4625 bnx2_reset_chip(bp, reset_code);
4626 free_irq(bp->pdev->irq, dev);
4627 if (bp->flags & USING_MSI_FLAG) {
4628 pci_disable_msi(bp->pdev);
4629 bp->flags &= ~USING_MSI_FLAG;
4630 }
4631 bnx2_free_skbs(bp);
4632 bnx2_free_mem(bp);
4633 bp->link_up = 0;
4634 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004635 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004636 return 0;
4637}
4638
4639#define GET_NET_STATS64(ctr) \
4640 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4641 (unsigned long) (ctr##_lo)
4642
4643#define GET_NET_STATS32(ctr) \
4644 (ctr##_lo)
4645
4646#if (BITS_PER_LONG == 64)
4647#define GET_NET_STATS GET_NET_STATS64
4648#else
4649#define GET_NET_STATS GET_NET_STATS32
4650#endif
4651
4652static struct net_device_stats *
4653bnx2_get_stats(struct net_device *dev)
4654{
Michael Chan972ec0d2006-01-23 16:12:43 -08004655 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004656 struct statistics_block *stats_blk = bp->stats_blk;
4657 struct net_device_stats *net_stats = &bp->net_stats;
4658
4659 if (bp->stats_blk == NULL) {
4660 return net_stats;
4661 }
4662 net_stats->rx_packets =
4663 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4664 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4665 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4666
4667 net_stats->tx_packets =
4668 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4669 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4670 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4671
4672 net_stats->rx_bytes =
4673 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4674
4675 net_stats->tx_bytes =
4676 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4677
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004678 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004679 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4680
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004681 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004682 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4683
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004684 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004685 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4686 stats_blk->stat_EtherStatsOverrsizePkts);
4687
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004688 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004689 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4690
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004691 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004692 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4693
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004694 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004695 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4696
4697 net_stats->rx_errors = net_stats->rx_length_errors +
4698 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4699 net_stats->rx_crc_errors;
4700
4701 net_stats->tx_aborted_errors =
4702 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4703 stats_blk->stat_Dot3StatsLateCollisions);
4704
Michael Chan5b0c76a2005-11-04 08:45:49 -08004705 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4706 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004707 net_stats->tx_carrier_errors = 0;
4708 else {
4709 net_stats->tx_carrier_errors =
4710 (unsigned long)
4711 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4712 }
4713
4714 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004715 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004716 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4717 +
4718 net_stats->tx_aborted_errors +
4719 net_stats->tx_carrier_errors;
4720
Michael Chancea94db2006-06-12 22:16:13 -07004721 net_stats->rx_missed_errors =
4722 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4723 stats_blk->stat_FwRxDrop);
4724
Michael Chanb6016b72005-05-26 13:03:09 -07004725 return net_stats;
4726}
4727
4728/* All ethtool functions called with rtnl_lock */
4729
4730static int
4731bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4732{
Michael Chan972ec0d2006-01-23 16:12:43 -08004733 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004734
4735 cmd->supported = SUPPORTED_Autoneg;
4736 if (bp->phy_flags & PHY_SERDES_FLAG) {
4737 cmd->supported |= SUPPORTED_1000baseT_Full |
4738 SUPPORTED_FIBRE;
4739
4740 cmd->port = PORT_FIBRE;
4741 }
4742 else {
4743 cmd->supported |= SUPPORTED_10baseT_Half |
4744 SUPPORTED_10baseT_Full |
4745 SUPPORTED_100baseT_Half |
4746 SUPPORTED_100baseT_Full |
4747 SUPPORTED_1000baseT_Full |
4748 SUPPORTED_TP;
4749
4750 cmd->port = PORT_TP;
4751 }
4752
4753 cmd->advertising = bp->advertising;
4754
4755 if (bp->autoneg & AUTONEG_SPEED) {
4756 cmd->autoneg = AUTONEG_ENABLE;
4757 }
4758 else {
4759 cmd->autoneg = AUTONEG_DISABLE;
4760 }
4761
4762 if (netif_carrier_ok(dev)) {
4763 cmd->speed = bp->line_speed;
4764 cmd->duplex = bp->duplex;
4765 }
4766 else {
4767 cmd->speed = -1;
4768 cmd->duplex = -1;
4769 }
4770
4771 cmd->transceiver = XCVR_INTERNAL;
4772 cmd->phy_address = bp->phy_addr;
4773
4774 return 0;
4775}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004776
Michael Chanb6016b72005-05-26 13:03:09 -07004777static int
4778bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4779{
Michael Chan972ec0d2006-01-23 16:12:43 -08004780 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004781 u8 autoneg = bp->autoneg;
4782 u8 req_duplex = bp->req_duplex;
4783 u16 req_line_speed = bp->req_line_speed;
4784 u32 advertising = bp->advertising;
4785
4786 if (cmd->autoneg == AUTONEG_ENABLE) {
4787 autoneg |= AUTONEG_SPEED;
4788
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004789 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004790
4791 /* allow advertising 1 speed */
4792 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4793 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4794 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4795 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4796
4797 if (bp->phy_flags & PHY_SERDES_FLAG)
4798 return -EINVAL;
4799
4800 advertising = cmd->advertising;
4801
4802 }
4803 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4804 advertising = cmd->advertising;
4805 }
4806 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4807 return -EINVAL;
4808 }
4809 else {
4810 if (bp->phy_flags & PHY_SERDES_FLAG) {
4811 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4812 }
4813 else {
4814 advertising = ETHTOOL_ALL_COPPER_SPEED;
4815 }
4816 }
4817 advertising |= ADVERTISED_Autoneg;
4818 }
4819 else {
4820 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004821 if ((cmd->speed != SPEED_1000 &&
4822 cmd->speed != SPEED_2500) ||
4823 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004824 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004825
4826 if (cmd->speed == SPEED_2500 &&
4827 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4828 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004829 }
4830 else if (cmd->speed == SPEED_1000) {
4831 return -EINVAL;
4832 }
4833 autoneg &= ~AUTONEG_SPEED;
4834 req_line_speed = cmd->speed;
4835 req_duplex = cmd->duplex;
4836 advertising = 0;
4837 }
4838
4839 bp->autoneg = autoneg;
4840 bp->advertising = advertising;
4841 bp->req_line_speed = req_line_speed;
4842 bp->req_duplex = req_duplex;
4843
Michael Chanc770a652005-08-25 15:38:39 -07004844 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004845
4846 bnx2_setup_phy(bp);
4847
Michael Chanc770a652005-08-25 15:38:39 -07004848 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004849
4850 return 0;
4851}
4852
4853static void
4854bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4855{
Michael Chan972ec0d2006-01-23 16:12:43 -08004856 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004857
4858 strcpy(info->driver, DRV_MODULE_NAME);
4859 strcpy(info->version, DRV_MODULE_VERSION);
4860 strcpy(info->bus_info, pci_name(bp->pdev));
4861 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4862 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4863 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004864 info->fw_version[1] = info->fw_version[3] = '.';
4865 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004866}
4867
Michael Chan244ac4f2006-03-20 17:48:46 -08004868#define BNX2_REGDUMP_LEN (32 * 1024)
4869
4870static int
4871bnx2_get_regs_len(struct net_device *dev)
4872{
4873 return BNX2_REGDUMP_LEN;
4874}
4875
4876static void
4877bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4878{
4879 u32 *p = _p, i, offset;
4880 u8 *orig_p = _p;
4881 struct bnx2 *bp = netdev_priv(dev);
4882 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4883 0x0800, 0x0880, 0x0c00, 0x0c10,
4884 0x0c30, 0x0d08, 0x1000, 0x101c,
4885 0x1040, 0x1048, 0x1080, 0x10a4,
4886 0x1400, 0x1490, 0x1498, 0x14f0,
4887 0x1500, 0x155c, 0x1580, 0x15dc,
4888 0x1600, 0x1658, 0x1680, 0x16d8,
4889 0x1800, 0x1820, 0x1840, 0x1854,
4890 0x1880, 0x1894, 0x1900, 0x1984,
4891 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4892 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4893 0x2000, 0x2030, 0x23c0, 0x2400,
4894 0x2800, 0x2820, 0x2830, 0x2850,
4895 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4896 0x3c00, 0x3c94, 0x4000, 0x4010,
4897 0x4080, 0x4090, 0x43c0, 0x4458,
4898 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4899 0x4fc0, 0x5010, 0x53c0, 0x5444,
4900 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4901 0x5fc0, 0x6000, 0x6400, 0x6428,
4902 0x6800, 0x6848, 0x684c, 0x6860,
4903 0x6888, 0x6910, 0x8000 };
4904
4905 regs->version = 0;
4906
4907 memset(p, 0, BNX2_REGDUMP_LEN);
4908
4909 if (!netif_running(bp->dev))
4910 return;
4911
4912 i = 0;
4913 offset = reg_boundaries[0];
4914 p += offset;
4915 while (offset < BNX2_REGDUMP_LEN) {
4916 *p++ = REG_RD(bp, offset);
4917 offset += 4;
4918 if (offset == reg_boundaries[i + 1]) {
4919 offset = reg_boundaries[i + 2];
4920 p = (u32 *) (orig_p + offset);
4921 i += 2;
4922 }
4923 }
4924}
4925
Michael Chanb6016b72005-05-26 13:03:09 -07004926static void
4927bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4928{
Michael Chan972ec0d2006-01-23 16:12:43 -08004929 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004930
4931 if (bp->flags & NO_WOL_FLAG) {
4932 wol->supported = 0;
4933 wol->wolopts = 0;
4934 }
4935 else {
4936 wol->supported = WAKE_MAGIC;
4937 if (bp->wol)
4938 wol->wolopts = WAKE_MAGIC;
4939 else
4940 wol->wolopts = 0;
4941 }
4942 memset(&wol->sopass, 0, sizeof(wol->sopass));
4943}
4944
4945static int
4946bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4947{
Michael Chan972ec0d2006-01-23 16:12:43 -08004948 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004949
4950 if (wol->wolopts & ~WAKE_MAGIC)
4951 return -EINVAL;
4952
4953 if (wol->wolopts & WAKE_MAGIC) {
4954 if (bp->flags & NO_WOL_FLAG)
4955 return -EINVAL;
4956
4957 bp->wol = 1;
4958 }
4959 else {
4960 bp->wol = 0;
4961 }
4962 return 0;
4963}
4964
4965static int
4966bnx2_nway_reset(struct net_device *dev)
4967{
Michael Chan972ec0d2006-01-23 16:12:43 -08004968 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004969 u32 bmcr;
4970
4971 if (!(bp->autoneg & AUTONEG_SPEED)) {
4972 return -EINVAL;
4973 }
4974
Michael Chanc770a652005-08-25 15:38:39 -07004975 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004976
4977 /* Force a link down visible on the other side */
4978 if (bp->phy_flags & PHY_SERDES_FLAG) {
4979 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004980 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004981
4982 msleep(20);
4983
Michael Chanc770a652005-08-25 15:38:39 -07004984 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004985
4986 bp->current_interval = SERDES_AN_TIMEOUT;
4987 bp->serdes_an_pending = 1;
4988 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004989 }
4990
4991 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4992 bmcr &= ~BMCR_LOOPBACK;
4993 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4994
Michael Chanc770a652005-08-25 15:38:39 -07004995 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004996
4997 return 0;
4998}
4999
5000static int
5001bnx2_get_eeprom_len(struct net_device *dev)
5002{
Michael Chan972ec0d2006-01-23 16:12:43 -08005003 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005004
Michael Chan1122db72006-01-23 16:11:42 -08005005 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005006 return 0;
5007
Michael Chan1122db72006-01-23 16:11:42 -08005008 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005009}
5010
5011static int
5012bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5013 u8 *eebuf)
5014{
Michael Chan972ec0d2006-01-23 16:12:43 -08005015 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005016 int rc;
5017
John W. Linville1064e942005-11-10 12:58:24 -08005018 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005019
5020 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5021
5022 return rc;
5023}
5024
5025static int
5026bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5027 u8 *eebuf)
5028{
Michael Chan972ec0d2006-01-23 16:12:43 -08005029 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005030 int rc;
5031
John W. Linville1064e942005-11-10 12:58:24 -08005032 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005033
5034 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5035
5036 return rc;
5037}
5038
5039static int
5040bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5041{
Michael Chan972ec0d2006-01-23 16:12:43 -08005042 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005043
5044 memset(coal, 0, sizeof(struct ethtool_coalesce));
5045
5046 coal->rx_coalesce_usecs = bp->rx_ticks;
5047 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5048 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5049 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5050
5051 coal->tx_coalesce_usecs = bp->tx_ticks;
5052 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5053 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5054 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5055
5056 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5057
5058 return 0;
5059}
5060
5061static int
5062bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5063{
Michael Chan972ec0d2006-01-23 16:12:43 -08005064 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005065
5066 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5067 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5068
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005069 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005070 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5071
5072 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5073 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5074
5075 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5076 if (bp->rx_quick_cons_trip_int > 0xff)
5077 bp->rx_quick_cons_trip_int = 0xff;
5078
5079 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5080 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5081
5082 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5083 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5084
5085 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5086 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5087
5088 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5089 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5090 0xff;
5091
5092 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5093 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5094 bp->stats_ticks &= 0xffff00;
5095
5096 if (netif_running(bp->dev)) {
5097 bnx2_netif_stop(bp);
5098 bnx2_init_nic(bp);
5099 bnx2_netif_start(bp);
5100 }
5101
5102 return 0;
5103}
5104
5105static void
5106bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5107{
Michael Chan972ec0d2006-01-23 16:12:43 -08005108 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005109
Michael Chan13daffa2006-03-20 17:49:20 -08005110 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005111 ering->rx_mini_max_pending = 0;
5112 ering->rx_jumbo_max_pending = 0;
5113
5114 ering->rx_pending = bp->rx_ring_size;
5115 ering->rx_mini_pending = 0;
5116 ering->rx_jumbo_pending = 0;
5117
5118 ering->tx_max_pending = MAX_TX_DESC_CNT;
5119 ering->tx_pending = bp->tx_ring_size;
5120}
5121
5122static int
5123bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5124{
Michael Chan972ec0d2006-01-23 16:12:43 -08005125 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005126
Michael Chan13daffa2006-03-20 17:49:20 -08005127 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005128 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5129 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5130
5131 return -EINVAL;
5132 }
Michael Chan13daffa2006-03-20 17:49:20 -08005133 if (netif_running(bp->dev)) {
5134 bnx2_netif_stop(bp);
5135 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5136 bnx2_free_skbs(bp);
5137 bnx2_free_mem(bp);
5138 }
5139
5140 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005141 bp->tx_ring_size = ering->tx_pending;
5142
5143 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005144 int rc;
5145
5146 rc = bnx2_alloc_mem(bp);
5147 if (rc)
5148 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005149 bnx2_init_nic(bp);
5150 bnx2_netif_start(bp);
5151 }
5152
5153 return 0;
5154}
5155
5156static void
5157bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5158{
Michael Chan972ec0d2006-01-23 16:12:43 -08005159 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005160
5161 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5162 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5163 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5164}
5165
5166static int
5167bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5168{
Michael Chan972ec0d2006-01-23 16:12:43 -08005169 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005170
5171 bp->req_flow_ctrl = 0;
5172 if (epause->rx_pause)
5173 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5174 if (epause->tx_pause)
5175 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5176
5177 if (epause->autoneg) {
5178 bp->autoneg |= AUTONEG_FLOW_CTRL;
5179 }
5180 else {
5181 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5182 }
5183
Michael Chanc770a652005-08-25 15:38:39 -07005184 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005185
5186 bnx2_setup_phy(bp);
5187
Michael Chanc770a652005-08-25 15:38:39 -07005188 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005189
5190 return 0;
5191}
5192
5193static u32
5194bnx2_get_rx_csum(struct net_device *dev)
5195{
Michael Chan972ec0d2006-01-23 16:12:43 -08005196 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005197
5198 return bp->rx_csum;
5199}
5200
5201static int
5202bnx2_set_rx_csum(struct net_device *dev, u32 data)
5203{
Michael Chan972ec0d2006-01-23 16:12:43 -08005204 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005205
5206 bp->rx_csum = data;
5207 return 0;
5208}
5209
Michael Chanb11d6212006-06-29 12:31:21 -07005210static int
5211bnx2_set_tso(struct net_device *dev, u32 data)
5212{
5213 if (data)
5214 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5215 else
5216 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5217 return 0;
5218}
5219
Michael Chancea94db2006-06-12 22:16:13 -07005220#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005221
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005222static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005223 char string[ETH_GSTRING_LEN];
5224} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5225 { "rx_bytes" },
5226 { "rx_error_bytes" },
5227 { "tx_bytes" },
5228 { "tx_error_bytes" },
5229 { "rx_ucast_packets" },
5230 { "rx_mcast_packets" },
5231 { "rx_bcast_packets" },
5232 { "tx_ucast_packets" },
5233 { "tx_mcast_packets" },
5234 { "tx_bcast_packets" },
5235 { "tx_mac_errors" },
5236 { "tx_carrier_errors" },
5237 { "rx_crc_errors" },
5238 { "rx_align_errors" },
5239 { "tx_single_collisions" },
5240 { "tx_multi_collisions" },
5241 { "tx_deferred" },
5242 { "tx_excess_collisions" },
5243 { "tx_late_collisions" },
5244 { "tx_total_collisions" },
5245 { "rx_fragments" },
5246 { "rx_jabbers" },
5247 { "rx_undersize_packets" },
5248 { "rx_oversize_packets" },
5249 { "rx_64_byte_packets" },
5250 { "rx_65_to_127_byte_packets" },
5251 { "rx_128_to_255_byte_packets" },
5252 { "rx_256_to_511_byte_packets" },
5253 { "rx_512_to_1023_byte_packets" },
5254 { "rx_1024_to_1522_byte_packets" },
5255 { "rx_1523_to_9022_byte_packets" },
5256 { "tx_64_byte_packets" },
5257 { "tx_65_to_127_byte_packets" },
5258 { "tx_128_to_255_byte_packets" },
5259 { "tx_256_to_511_byte_packets" },
5260 { "tx_512_to_1023_byte_packets" },
5261 { "tx_1024_to_1522_byte_packets" },
5262 { "tx_1523_to_9022_byte_packets" },
5263 { "rx_xon_frames" },
5264 { "rx_xoff_frames" },
5265 { "tx_xon_frames" },
5266 { "tx_xoff_frames" },
5267 { "rx_mac_ctrl_frames" },
5268 { "rx_filtered_packets" },
5269 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005270 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005271};
5272
5273#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5274
Arjan van de Venf71e1302006-03-03 21:33:57 -05005275static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005276 STATS_OFFSET32(stat_IfHCInOctets_hi),
5277 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5278 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5279 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5280 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5281 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5282 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5283 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5284 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5286 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005287 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5288 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5289 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5290 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5291 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5292 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5293 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5294 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5295 STATS_OFFSET32(stat_EtherStatsCollisions),
5296 STATS_OFFSET32(stat_EtherStatsFragments),
5297 STATS_OFFSET32(stat_EtherStatsJabbers),
5298 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5299 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5300 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5301 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5314 STATS_OFFSET32(stat_XonPauseFramesReceived),
5315 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5316 STATS_OFFSET32(stat_OutXonSent),
5317 STATS_OFFSET32(stat_OutXoffSent),
5318 STATS_OFFSET32(stat_MacControlFramesReceived),
5319 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5320 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005321 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005322};
5323
5324/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5325 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005326 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005327static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005328 8,0,8,8,8,8,8,8,8,8,
5329 4,0,4,4,4,4,4,4,4,4,
5330 4,4,4,4,4,4,4,4,4,4,
5331 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005332 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005333};
5334
Michael Chan5b0c76a2005-11-04 08:45:49 -08005335static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5336 8,0,8,8,8,8,8,8,8,8,
5337 4,4,4,4,4,4,4,4,4,4,
5338 4,4,4,4,4,4,4,4,4,4,
5339 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005340 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005341};
5342
Michael Chanb6016b72005-05-26 13:03:09 -07005343#define BNX2_NUM_TESTS 6
5344
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005345static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005346 char string[ETH_GSTRING_LEN];
5347} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5348 { "register_test (offline)" },
5349 { "memory_test (offline)" },
5350 { "loopback_test (offline)" },
5351 { "nvram_test (online)" },
5352 { "interrupt_test (online)" },
5353 { "link_test (online)" },
5354};
5355
5356static int
5357bnx2_self_test_count(struct net_device *dev)
5358{
5359 return BNX2_NUM_TESTS;
5360}
5361
5362static void
5363bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5364{
Michael Chan972ec0d2006-01-23 16:12:43 -08005365 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005366
5367 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5368 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005369 int i;
5370
Michael Chanb6016b72005-05-26 13:03:09 -07005371 bnx2_netif_stop(bp);
5372 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5373 bnx2_free_skbs(bp);
5374
5375 if (bnx2_test_registers(bp) != 0) {
5376 buf[0] = 1;
5377 etest->flags |= ETH_TEST_FL_FAILED;
5378 }
5379 if (bnx2_test_memory(bp) != 0) {
5380 buf[1] = 1;
5381 etest->flags |= ETH_TEST_FL_FAILED;
5382 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005383 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005384 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005385
5386 if (!netif_running(bp->dev)) {
5387 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5388 }
5389 else {
5390 bnx2_init_nic(bp);
5391 bnx2_netif_start(bp);
5392 }
5393
5394 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005395 for (i = 0; i < 7; i++) {
5396 if (bp->link_up)
5397 break;
5398 msleep_interruptible(1000);
5399 }
Michael Chanb6016b72005-05-26 13:03:09 -07005400 }
5401
5402 if (bnx2_test_nvram(bp) != 0) {
5403 buf[3] = 1;
5404 etest->flags |= ETH_TEST_FL_FAILED;
5405 }
5406 if (bnx2_test_intr(bp) != 0) {
5407 buf[4] = 1;
5408 etest->flags |= ETH_TEST_FL_FAILED;
5409 }
5410
5411 if (bnx2_test_link(bp) != 0) {
5412 buf[5] = 1;
5413 etest->flags |= ETH_TEST_FL_FAILED;
5414
5415 }
5416}
5417
5418static void
5419bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5420{
5421 switch (stringset) {
5422 case ETH_SS_STATS:
5423 memcpy(buf, bnx2_stats_str_arr,
5424 sizeof(bnx2_stats_str_arr));
5425 break;
5426 case ETH_SS_TEST:
5427 memcpy(buf, bnx2_tests_str_arr,
5428 sizeof(bnx2_tests_str_arr));
5429 break;
5430 }
5431}
5432
5433static int
5434bnx2_get_stats_count(struct net_device *dev)
5435{
5436 return BNX2_NUM_STATS;
5437}
5438
5439static void
5440bnx2_get_ethtool_stats(struct net_device *dev,
5441 struct ethtool_stats *stats, u64 *buf)
5442{
Michael Chan972ec0d2006-01-23 16:12:43 -08005443 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005444 int i;
5445 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005446 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005447
5448 if (hw_stats == NULL) {
5449 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5450 return;
5451 }
5452
Michael Chan5b0c76a2005-11-04 08:45:49 -08005453 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5454 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5455 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5456 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005457 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005458 else
5459 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005460
5461 for (i = 0; i < BNX2_NUM_STATS; i++) {
5462 if (stats_len_arr[i] == 0) {
5463 /* skip this counter */
5464 buf[i] = 0;
5465 continue;
5466 }
5467 if (stats_len_arr[i] == 4) {
5468 /* 4-byte counter */
5469 buf[i] = (u64)
5470 *(hw_stats + bnx2_stats_offset_arr[i]);
5471 continue;
5472 }
5473 /* 8-byte counter */
5474 buf[i] = (((u64) *(hw_stats +
5475 bnx2_stats_offset_arr[i])) << 32) +
5476 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5477 }
5478}
5479
5480static int
5481bnx2_phys_id(struct net_device *dev, u32 data)
5482{
Michael Chan972ec0d2006-01-23 16:12:43 -08005483 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005484 int i;
5485 u32 save;
5486
5487 if (data == 0)
5488 data = 2;
5489
5490 save = REG_RD(bp, BNX2_MISC_CFG);
5491 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5492
5493 for (i = 0; i < (data * 2); i++) {
5494 if ((i % 2) == 0) {
5495 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5496 }
5497 else {
5498 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5499 BNX2_EMAC_LED_1000MB_OVERRIDE |
5500 BNX2_EMAC_LED_100MB_OVERRIDE |
5501 BNX2_EMAC_LED_10MB_OVERRIDE |
5502 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5503 BNX2_EMAC_LED_TRAFFIC);
5504 }
5505 msleep_interruptible(500);
5506 if (signal_pending(current))
5507 break;
5508 }
5509 REG_WR(bp, BNX2_EMAC_LED, 0);
5510 REG_WR(bp, BNX2_MISC_CFG, save);
5511 return 0;
5512}
5513
Jeff Garzik7282d492006-09-13 14:30:00 -04005514static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005515 .get_settings = bnx2_get_settings,
5516 .set_settings = bnx2_set_settings,
5517 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005518 .get_regs_len = bnx2_get_regs_len,
5519 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005520 .get_wol = bnx2_get_wol,
5521 .set_wol = bnx2_set_wol,
5522 .nway_reset = bnx2_nway_reset,
5523 .get_link = ethtool_op_get_link,
5524 .get_eeprom_len = bnx2_get_eeprom_len,
5525 .get_eeprom = bnx2_get_eeprom,
5526 .set_eeprom = bnx2_set_eeprom,
5527 .get_coalesce = bnx2_get_coalesce,
5528 .set_coalesce = bnx2_set_coalesce,
5529 .get_ringparam = bnx2_get_ringparam,
5530 .set_ringparam = bnx2_set_ringparam,
5531 .get_pauseparam = bnx2_get_pauseparam,
5532 .set_pauseparam = bnx2_set_pauseparam,
5533 .get_rx_csum = bnx2_get_rx_csum,
5534 .set_rx_csum = bnx2_set_rx_csum,
5535 .get_tx_csum = ethtool_op_get_tx_csum,
5536 .set_tx_csum = ethtool_op_set_tx_csum,
5537 .get_sg = ethtool_op_get_sg,
5538 .set_sg = ethtool_op_set_sg,
5539#ifdef BCM_TSO
5540 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005541 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005542#endif
5543 .self_test_count = bnx2_self_test_count,
5544 .self_test = bnx2_self_test,
5545 .get_strings = bnx2_get_strings,
5546 .phys_id = bnx2_phys_id,
5547 .get_stats_count = bnx2_get_stats_count,
5548 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005549 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005550};
5551
5552/* Called with rtnl_lock */
5553static int
5554bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5555{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005556 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005557 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005558 int err;
5559
5560 switch(cmd) {
5561 case SIOCGMIIPHY:
5562 data->phy_id = bp->phy_addr;
5563
5564 /* fallthru */
5565 case SIOCGMIIREG: {
5566 u32 mii_regval;
5567
Michael Chanc770a652005-08-25 15:38:39 -07005568 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005569 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005570 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005571
5572 data->val_out = mii_regval;
5573
5574 return err;
5575 }
5576
5577 case SIOCSMIIREG:
5578 if (!capable(CAP_NET_ADMIN))
5579 return -EPERM;
5580
Michael Chanc770a652005-08-25 15:38:39 -07005581 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005582 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005583 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005584
5585 return err;
5586
5587 default:
5588 /* do nothing */
5589 break;
5590 }
5591 return -EOPNOTSUPP;
5592}
5593
5594/* Called with rtnl_lock */
5595static int
5596bnx2_change_mac_addr(struct net_device *dev, void *p)
5597{
5598 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005599 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005600
Michael Chan73eef4c2005-08-25 15:39:15 -07005601 if (!is_valid_ether_addr(addr->sa_data))
5602 return -EINVAL;
5603
Michael Chanb6016b72005-05-26 13:03:09 -07005604 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5605 if (netif_running(dev))
5606 bnx2_set_mac_addr(bp);
5607
5608 return 0;
5609}
5610
5611/* Called with rtnl_lock */
5612static int
5613bnx2_change_mtu(struct net_device *dev, int new_mtu)
5614{
Michael Chan972ec0d2006-01-23 16:12:43 -08005615 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005616
5617 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5618 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5619 return -EINVAL;
5620
5621 dev->mtu = new_mtu;
5622 if (netif_running(dev)) {
5623 bnx2_netif_stop(bp);
5624
5625 bnx2_init_nic(bp);
5626
5627 bnx2_netif_start(bp);
5628 }
5629 return 0;
5630}
5631
5632#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5633static void
5634poll_bnx2(struct net_device *dev)
5635{
Michael Chan972ec0d2006-01-23 16:12:43 -08005636 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005637
5638 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005639 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005640 enable_irq(bp->pdev->irq);
5641}
5642#endif
5643
Michael Chan253c8b72007-01-08 19:56:01 -08005644static void __devinit
5645bnx2_get_5709_media(struct bnx2 *bp)
5646{
5647 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5648 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5649 u32 strap;
5650
5651 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5652 return;
5653 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5654 bp->phy_flags |= PHY_SERDES_FLAG;
5655 return;
5656 }
5657
5658 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5659 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5660 else
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5662
5663 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5664 switch (strap) {
5665 case 0x4:
5666 case 0x5:
5667 case 0x6:
5668 bp->phy_flags |= PHY_SERDES_FLAG;
5669 return;
5670 }
5671 } else {
5672 switch (strap) {
5673 case 0x1:
5674 case 0x2:
5675 case 0x4:
5676 bp->phy_flags |= PHY_SERDES_FLAG;
5677 return;
5678 }
5679 }
5680}
5681
Michael Chanb6016b72005-05-26 13:03:09 -07005682static int __devinit
5683bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5684{
5685 struct bnx2 *bp;
5686 unsigned long mem_len;
5687 int rc;
5688 u32 reg;
5689
5690 SET_MODULE_OWNER(dev);
5691 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005692 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005693
5694 bp->flags = 0;
5695 bp->phy_flags = 0;
5696
5697 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5698 rc = pci_enable_device(pdev);
5699 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005700 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005701 goto err_out;
5702 }
5703
5704 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005705 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005706 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005707 rc = -ENODEV;
5708 goto err_out_disable;
5709 }
5710
5711 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5712 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005713 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005714 goto err_out_disable;
5715 }
5716
5717 pci_set_master(pdev);
5718
5719 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5720 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005721 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005722 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005723 rc = -EIO;
5724 goto err_out_release;
5725 }
5726
Michael Chanb6016b72005-05-26 13:03:09 -07005727 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5728 bp->flags |= USING_DAC_FLAG;
5729 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005730 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005731 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005732 rc = -EIO;
5733 goto err_out_release;
5734 }
5735 }
5736 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005737 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005738 rc = -EIO;
5739 goto err_out_release;
5740 }
5741
5742 bp->dev = dev;
5743 bp->pdev = pdev;
5744
5745 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00005746 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005747
5748 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005749 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005750 dev->mem_end = dev->mem_start + mem_len;
5751 dev->irq = pdev->irq;
5752
5753 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5754
5755 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005756 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005757 rc = -ENOMEM;
5758 goto err_out_release;
5759 }
5760
5761 /* Configure byte swap and enable write to the reg_window registers.
5762 * Rely on CPU to do target byte swapping on big endian systems
5763 * The chip's target access swapping will not swap all accesses
5764 */
5765 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5766 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5767 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5768
Pavel Machek829ca9a2005-09-03 15:56:56 -07005769 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005770
5771 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5772
Michael Chan59b47d82006-11-19 14:10:45 -08005773 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5774 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5775 if (bp->pcix_cap == 0) {
5776 dev_err(&pdev->dev,
5777 "Cannot find PCIX capability, aborting.\n");
5778 rc = -EIO;
5779 goto err_out_unmap;
5780 }
5781 }
5782
Michael Chanb6016b72005-05-26 13:03:09 -07005783 /* Get bus information. */
5784 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5785 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5786 u32 clkreg;
5787
5788 bp->flags |= PCIX_FLAG;
5789
5790 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005791
Michael Chanb6016b72005-05-26 13:03:09 -07005792 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5793 switch (clkreg) {
5794 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5795 bp->bus_speed_mhz = 133;
5796 break;
5797
5798 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5799 bp->bus_speed_mhz = 100;
5800 break;
5801
5802 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5803 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5804 bp->bus_speed_mhz = 66;
5805 break;
5806
5807 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5808 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5809 bp->bus_speed_mhz = 50;
5810 break;
5811
5812 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5813 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5814 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5815 bp->bus_speed_mhz = 33;
5816 break;
5817 }
5818 }
5819 else {
5820 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5821 bp->bus_speed_mhz = 66;
5822 else
5823 bp->bus_speed_mhz = 33;
5824 }
5825
5826 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5827 bp->flags |= PCI_32BIT_FLAG;
5828
5829 /* 5706A0 may falsely detect SERR and PERR. */
5830 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5831 reg = REG_RD(bp, PCI_COMMAND);
5832 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5833 REG_WR(bp, PCI_COMMAND, reg);
5834 }
5835 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5836 !(bp->flags & PCIX_FLAG)) {
5837
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005838 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005839 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005840 goto err_out_unmap;
5841 }
5842
5843 bnx2_init_nvram(bp);
5844
Michael Chane3648b32005-11-04 08:51:21 -08005845 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5846
5847 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5848 BNX2_SHM_HDR_SIGNATURE_SIG)
5849 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5850 else
5851 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5852
Michael Chanb6016b72005-05-26 13:03:09 -07005853 /* Get the permanent MAC address. First we need to make sure the
5854 * firmware is actually running.
5855 */
Michael Chane3648b32005-11-04 08:51:21 -08005856 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005857
5858 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5859 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005860 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005861 rc = -ENODEV;
5862 goto err_out_unmap;
5863 }
5864
Michael Chane3648b32005-11-04 08:51:21 -08005865 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005866
Michael Chane3648b32005-11-04 08:51:21 -08005867 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005868 bp->mac_addr[0] = (u8) (reg >> 8);
5869 bp->mac_addr[1] = (u8) reg;
5870
Michael Chane3648b32005-11-04 08:51:21 -08005871 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005872 bp->mac_addr[2] = (u8) (reg >> 24);
5873 bp->mac_addr[3] = (u8) (reg >> 16);
5874 bp->mac_addr[4] = (u8) (reg >> 8);
5875 bp->mac_addr[5] = (u8) reg;
5876
5877 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005878 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005879
5880 bp->rx_csum = 1;
5881
5882 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5883
5884 bp->tx_quick_cons_trip_int = 20;
5885 bp->tx_quick_cons_trip = 20;
5886 bp->tx_ticks_int = 80;
5887 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005888
Michael Chanb6016b72005-05-26 13:03:09 -07005889 bp->rx_quick_cons_trip_int = 6;
5890 bp->rx_quick_cons_trip = 6;
5891 bp->rx_ticks_int = 18;
5892 bp->rx_ticks = 18;
5893
5894 bp->stats_ticks = 1000000 & 0xffff00;
5895
5896 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005897 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005898
Michael Chan5b0c76a2005-11-04 08:45:49 -08005899 bp->phy_addr = 1;
5900
Michael Chanb6016b72005-05-26 13:03:09 -07005901 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08005902 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5903 bnx2_get_5709_media(bp);
5904 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07005905 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005906
5907 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07005908 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005909 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08005910 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005911 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005912 BNX2_SHARED_HW_CFG_CONFIG);
5913 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5914 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5915 }
Michael Chan261dd5c2007-01-08 19:55:46 -08005916 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5917 CHIP_NUM(bp) == CHIP_NUM_5708)
5918 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005919
Michael Chan16088272006-06-12 22:16:43 -07005920 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5921 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5922 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005923 bp->flags |= NO_WOL_FLAG;
5924
Michael Chanb6016b72005-05-26 13:03:09 -07005925 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5926 bp->tx_quick_cons_trip_int =
5927 bp->tx_quick_cons_trip;
5928 bp->tx_ticks_int = bp->tx_ticks;
5929 bp->rx_quick_cons_trip_int =
5930 bp->rx_quick_cons_trip;
5931 bp->rx_ticks_int = bp->rx_ticks;
5932 bp->comp_prod_trip_int = bp->comp_prod_trip;
5933 bp->com_ticks_int = bp->com_ticks;
5934 bp->cmd_ticks_int = bp->cmd_ticks;
5935 }
5936
Michael Chanf9317a42006-09-29 17:06:23 -07005937 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5938 *
5939 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5940 * with byte enables disabled on the unused 32-bit word. This is legal
5941 * but causes problems on the AMD 8132 which will eventually stop
5942 * responding after a while.
5943 *
5944 * AMD believes this incompatibility is unique to the 5706, and
5945 * prefers to locally disable MSI rather than globally disabling it
5946 * using pci_msi_quirk.
5947 */
5948 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5949 struct pci_dev *amd_8132 = NULL;
5950
5951 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5952 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5953 amd_8132))) {
5954 u8 rev;
5955
5956 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5957 if (rev >= 0x10 && rev <= 0x13) {
5958 disable_msi = 1;
5959 pci_dev_put(amd_8132);
5960 break;
5961 }
5962 }
5963 }
5964
Michael Chanb6016b72005-05-26 13:03:09 -07005965 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5966 bp->req_line_speed = 0;
5967 if (bp->phy_flags & PHY_SERDES_FLAG) {
5968 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005969
Michael Chane3648b32005-11-04 08:51:21 -08005970 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005971 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5972 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5973 bp->autoneg = 0;
5974 bp->req_line_speed = bp->line_speed = SPEED_1000;
5975 bp->req_duplex = DUPLEX_FULL;
5976 }
Michael Chanb6016b72005-05-26 13:03:09 -07005977 }
5978 else {
5979 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5980 }
5981
5982 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5983
Michael Chancd339a02005-08-25 15:35:24 -07005984 init_timer(&bp->timer);
5985 bp->timer.expires = RUN_AT(bp->timer_interval);
5986 bp->timer.data = (unsigned long) bp;
5987 bp->timer.function = bnx2_timer;
5988
Michael Chanb6016b72005-05-26 13:03:09 -07005989 return 0;
5990
5991err_out_unmap:
5992 if (bp->regview) {
5993 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005994 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005995 }
5996
5997err_out_release:
5998 pci_release_regions(pdev);
5999
6000err_out_disable:
6001 pci_disable_device(pdev);
6002 pci_set_drvdata(pdev, NULL);
6003
6004err_out:
6005 return rc;
6006}
6007
6008static int __devinit
6009bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6010{
6011 static int version_printed = 0;
6012 struct net_device *dev = NULL;
6013 struct bnx2 *bp;
6014 int rc, i;
6015
6016 if (version_printed++ == 0)
6017 printk(KERN_INFO "%s", version);
6018
6019 /* dev zeroed in init_etherdev */
6020 dev = alloc_etherdev(sizeof(*bp));
6021
6022 if (!dev)
6023 return -ENOMEM;
6024
6025 rc = bnx2_init_board(pdev, dev);
6026 if (rc < 0) {
6027 free_netdev(dev);
6028 return rc;
6029 }
6030
6031 dev->open = bnx2_open;
6032 dev->hard_start_xmit = bnx2_start_xmit;
6033 dev->stop = bnx2_close;
6034 dev->get_stats = bnx2_get_stats;
6035 dev->set_multicast_list = bnx2_set_rx_mode;
6036 dev->do_ioctl = bnx2_ioctl;
6037 dev->set_mac_address = bnx2_change_mac_addr;
6038 dev->change_mtu = bnx2_change_mtu;
6039 dev->tx_timeout = bnx2_tx_timeout;
6040 dev->watchdog_timeo = TX_TIMEOUT;
6041#ifdef BCM_VLAN
6042 dev->vlan_rx_register = bnx2_vlan_rx_register;
6043 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6044#endif
6045 dev->poll = bnx2_poll;
6046 dev->ethtool_ops = &bnx2_ethtool_ops;
6047 dev->weight = 64;
6048
Michael Chan972ec0d2006-01-23 16:12:43 -08006049 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006050
6051#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6052 dev->poll_controller = poll_bnx2;
6053#endif
6054
6055 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006056 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006057 if (bp->regview)
6058 iounmap(bp->regview);
6059 pci_release_regions(pdev);
6060 pci_disable_device(pdev);
6061 pci_set_drvdata(pdev, NULL);
6062 free_netdev(dev);
6063 return rc;
6064 }
6065
6066 pci_set_drvdata(pdev, dev);
6067
6068 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07006069 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07006070 bp->name = board_info[ent->driver_data].name,
6071 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6072 "IRQ %d, ",
6073 dev->name,
6074 bp->name,
6075 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6076 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6077 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6078 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6079 bp->bus_speed_mhz,
6080 dev->base_addr,
6081 bp->pdev->irq);
6082
6083 printk("node addr ");
6084 for (i = 0; i < 6; i++)
6085 printk("%2.2x", dev->dev_addr[i]);
6086 printk("\n");
6087
6088 dev->features |= NETIF_F_SG;
6089 if (bp->flags & USING_DAC_FLAG)
6090 dev->features |= NETIF_F_HIGHDMA;
6091 dev->features |= NETIF_F_IP_CSUM;
6092#ifdef BCM_VLAN
6093 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6094#endif
6095#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07006096 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006097#endif
6098
6099 netif_carrier_off(bp->dev);
6100
6101 return 0;
6102}
6103
6104static void __devexit
6105bnx2_remove_one(struct pci_dev *pdev)
6106{
6107 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006108 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006109
Michael Chanafdc08b2005-08-25 15:34:29 -07006110 flush_scheduled_work();
6111
Michael Chanb6016b72005-05-26 13:03:09 -07006112 unregister_netdev(dev);
6113
6114 if (bp->regview)
6115 iounmap(bp->regview);
6116
6117 free_netdev(dev);
6118 pci_release_regions(pdev);
6119 pci_disable_device(pdev);
6120 pci_set_drvdata(pdev, NULL);
6121}
6122
6123static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006124bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006125{
6126 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006127 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006128 u32 reset_code;
6129
6130 if (!netif_running(dev))
6131 return 0;
6132
Michael Chan1d602902006-03-20 17:50:08 -08006133 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006134 bnx2_netif_stop(bp);
6135 netif_device_detach(dev);
6136 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006137 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006138 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006139 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006140 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6141 else
6142 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6143 bnx2_reset_chip(bp, reset_code);
6144 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006145 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006146 return 0;
6147}
6148
6149static int
6150bnx2_resume(struct pci_dev *pdev)
6151{
6152 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006153 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006154
6155 if (!netif_running(dev))
6156 return 0;
6157
Pavel Machek829ca9a2005-09-03 15:56:56 -07006158 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006159 netif_device_attach(dev);
6160 bnx2_init_nic(bp);
6161 bnx2_netif_start(bp);
6162 return 0;
6163}
6164
6165static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006166 .name = DRV_MODULE_NAME,
6167 .id_table = bnx2_pci_tbl,
6168 .probe = bnx2_init_one,
6169 .remove = __devexit_p(bnx2_remove_one),
6170 .suspend = bnx2_suspend,
6171 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006172};
6173
6174static int __init bnx2_init(void)
6175{
Jeff Garzik29917622006-08-19 17:48:59 -04006176 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006177}
6178
6179static void __exit bnx2_cleanup(void)
6180{
6181 pci_unregister_driver(&bnx2_pci_driver);
6182}
6183
6184module_init(bnx2_init);
6185module_exit(bnx2_cleanup);
6186
6187
6188