blob: f296c37f29b67a1894d2b98e7e59758aba073220 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080056#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070057
58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": "
Michael Chanf123bc52006-11-19 14:15:31 -080060#define DRV_MODULE_VERSION "1.5.1"
61#define DRV_MODULE_RELDATE "November 15, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070062
63#define RUN_AT(x) (jiffies + (x))
64
65/* Time in jiffies before concluding the transmitter is hung. */
66#define TX_TIMEOUT (5*HZ)
67
Randy Dunlape19360f2006-04-10 23:22:06 -070068static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070069 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080072MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070073MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
75
76static int disable_msi = 0;
77
78module_param(disable_msi, int, 0);
79MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080087 BCM5708,
88 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080089 BCM5709,
Michael Chanb6016b72005-05-26 13:03:09 -070090} board_t;
91
92/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050093static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070094 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chanb6016b72005-05-26 13:03:09 -0700104 };
105
106static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chanb6016b72005-05-26 13:03:09 -0700123 { 0, }
124};
125
126static struct flash_spec flash_table[] =
127{
128 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 "Entry 0100"},
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
171 /* Fast EEPROM */
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175 "EEPROM - fast"},
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1001"},
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1010"},
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1100"},
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1101"},
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700211};
212
213MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
214
Michael Chane89bbf12005-08-25 15:36:58 -0700215static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216{
Michael Chan2f8af122006-08-15 01:39:10 -0700217 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700218
Michael Chan2f8af122006-08-15 01:39:10 -0700219 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800220
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
223 */
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
226 diff &= 0xffff;
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
229 }
Michael Chane89bbf12005-08-25 15:36:58 -0700230 return (bp->tx_ring_size - diff);
231}
232
Michael Chanb6016b72005-05-26 13:03:09 -0700233static u32
234bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
235{
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
238}
239
240static void
241bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
242{
243 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
245}
246
247static void
248bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
249{
250 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800251 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
252 int i;
253
254 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257 for (i = 0; i < 5; i++) {
258 u32 val;
259 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
261 break;
262 udelay(5);
263 }
264 } else {
265 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266 REG_WR(bp, BNX2_CTX_DATA, val);
267 }
Michael Chanb6016b72005-05-26 13:03:09 -0700268}
269
270static int
271bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
272{
273 u32 val1;
274 int i, ret;
275
276 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
279
280 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
282
283 udelay(40);
284 }
285
286 val1 = (bp->phy_addr << 21) | (reg << 16) |
287 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288 BNX2_EMAC_MDIO_COMM_START_BUSY;
289 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
290
291 for (i = 0; i < 50; i++) {
292 udelay(10);
293
294 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
296 udelay(5);
297
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
300
301 break;
302 }
303 }
304
305 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
306 *val = 0x0;
307 ret = -EBUSY;
308 }
309 else {
310 *val = val1;
311 ret = 0;
312 }
313
314 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
317
318 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
320
321 udelay(40);
322 }
323
324 return ret;
325}
326
327static int
328bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
329{
330 u32 val1;
331 int i, ret;
332
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
336
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339
340 udelay(40);
341 }
342
343 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400347
Michael Chanb6016b72005-05-26 13:03:09 -0700348 for (i = 0; i < 50; i++) {
349 udelay(10);
350
351 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
353 udelay(5);
354 break;
355 }
356 }
357
358 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
359 ret = -EBUSY;
360 else
361 ret = 0;
362
363 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370 udelay(40);
371 }
372
373 return ret;
374}
375
376static void
377bnx2_disable_int(struct bnx2 *bp)
378{
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
382}
383
384static void
385bnx2_enable_int(struct bnx2 *bp)
386{
Michael Chanb6016b72005-05-26 13:03:09 -0700387 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800388 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
390
391 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700392 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
393
Michael Chanbf5295b2006-03-23 01:11:56 -0800394 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700395}
396
397static void
398bnx2_disable_int_sync(struct bnx2 *bp)
399{
400 atomic_inc(&bp->intr_sem);
401 bnx2_disable_int(bp);
402 synchronize_irq(bp->pdev->irq);
403}
404
405static void
406bnx2_netif_stop(struct bnx2 *bp)
407{
408 bnx2_disable_int_sync(bp);
409 if (netif_running(bp->dev)) {
410 netif_poll_disable(bp->dev);
411 netif_tx_disable(bp->dev);
412 bp->dev->trans_start = jiffies; /* prevent tx timeout */
413 }
414}
415
416static void
417bnx2_netif_start(struct bnx2 *bp)
418{
419 if (atomic_dec_and_test(&bp->intr_sem)) {
420 if (netif_running(bp->dev)) {
421 netif_wake_queue(bp->dev);
422 netif_poll_enable(bp->dev);
423 bnx2_enable_int(bp);
424 }
425 }
426}
427
428static void
429bnx2_free_mem(struct bnx2 *bp)
430{
Michael Chan13daffa2006-03-20 17:49:20 -0800431 int i;
432
Michael Chan59b47d82006-11-19 14:10:45 -0800433 for (i = 0; i < bp->ctx_pages; i++) {
434 if (bp->ctx_blk[i]) {
435 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
436 bp->ctx_blk[i],
437 bp->ctx_blk_mapping[i]);
438 bp->ctx_blk[i] = NULL;
439 }
440 }
Michael Chanb6016b72005-05-26 13:03:09 -0700441 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800442 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700443 bp->status_blk, bp->status_blk_mapping);
444 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800445 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700446 }
447 if (bp->tx_desc_ring) {
448 pci_free_consistent(bp->pdev,
449 sizeof(struct tx_bd) * TX_DESC_CNT,
450 bp->tx_desc_ring, bp->tx_desc_mapping);
451 bp->tx_desc_ring = NULL;
452 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400453 kfree(bp->tx_buf_ring);
454 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800455 for (i = 0; i < bp->rx_max_ring; i++) {
456 if (bp->rx_desc_ring[i])
457 pci_free_consistent(bp->pdev,
458 sizeof(struct rx_bd) * RX_DESC_CNT,
459 bp->rx_desc_ring[i],
460 bp->rx_desc_mapping[i]);
461 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700462 }
Michael Chan13daffa2006-03-20 17:49:20 -0800463 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400464 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700465}
466
467static int
468bnx2_alloc_mem(struct bnx2 *bp)
469{
Michael Chan0f31f992006-03-23 01:12:38 -0800470 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800471
Michael Chan0f31f992006-03-23 01:12:38 -0800472 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
473 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700474 if (bp->tx_buf_ring == NULL)
475 return -ENOMEM;
476
Michael Chanb6016b72005-05-26 13:03:09 -0700477 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478 sizeof(struct tx_bd) *
479 TX_DESC_CNT,
480 &bp->tx_desc_mapping);
481 if (bp->tx_desc_ring == NULL)
482 goto alloc_mem_err;
483
Michael Chan13daffa2006-03-20 17:49:20 -0800484 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
485 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700486 if (bp->rx_buf_ring == NULL)
487 goto alloc_mem_err;
488
Michael Chan13daffa2006-03-20 17:49:20 -0800489 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
490 bp->rx_max_ring);
491
492 for (i = 0; i < bp->rx_max_ring; i++) {
493 bp->rx_desc_ring[i] =
494 pci_alloc_consistent(bp->pdev,
495 sizeof(struct rx_bd) * RX_DESC_CNT,
496 &bp->rx_desc_mapping[i]);
497 if (bp->rx_desc_ring[i] == NULL)
498 goto alloc_mem_err;
499
500 }
Michael Chanb6016b72005-05-26 13:03:09 -0700501
Michael Chan0f31f992006-03-23 01:12:38 -0800502 /* Combine status and statistics blocks into one allocation. */
503 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504 bp->status_stats_size = status_blk_size +
505 sizeof(struct statistics_block);
506
507 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700508 &bp->status_blk_mapping);
509 if (bp->status_blk == NULL)
510 goto alloc_mem_err;
511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700513
Michael Chan0f31f992006-03-23 01:12:38 -0800514 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
515 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700516
Michael Chan0f31f992006-03-23 01:12:38 -0800517 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700518
Michael Chan59b47d82006-11-19 14:10:45 -0800519 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521 if (bp->ctx_pages == 0)
522 bp->ctx_pages = 1;
523 for (i = 0; i < bp->ctx_pages; i++) {
524 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
525 BCM_PAGE_SIZE,
526 &bp->ctx_blk_mapping[i]);
527 if (bp->ctx_blk[i] == NULL)
528 goto alloc_mem_err;
529 }
530 }
Michael Chanb6016b72005-05-26 13:03:09 -0700531 return 0;
532
533alloc_mem_err:
534 bnx2_free_mem(bp);
535 return -ENOMEM;
536}
537
538static void
Michael Chane3648b32005-11-04 08:51:21 -0800539bnx2_report_fw_link(struct bnx2 *bp)
540{
541 u32 fw_link_status = 0;
542
543 if (bp->link_up) {
544 u32 bmsr;
545
546 switch (bp->line_speed) {
547 case SPEED_10:
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_10HALF;
550 else
551 fw_link_status = BNX2_LINK_STATUS_10FULL;
552 break;
553 case SPEED_100:
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_100HALF;
556 else
557 fw_link_status = BNX2_LINK_STATUS_100FULL;
558 break;
559 case SPEED_1000:
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_1000HALF;
562 else
563 fw_link_status = BNX2_LINK_STATUS_1000FULL;
564 break;
565 case SPEED_2500:
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_2500HALF;
568 else
569 fw_link_status = BNX2_LINK_STATUS_2500FULL;
570 break;
571 }
572
573 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
574
575 if (bp->autoneg) {
576 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
577
578 bnx2_read_phy(bp, MII_BMSR, &bmsr);
579 bnx2_read_phy(bp, MII_BMSR, &bmsr);
580
581 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
584 else
585 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
586 }
587 }
588 else
589 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
590
591 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
592}
593
594static void
Michael Chanb6016b72005-05-26 13:03:09 -0700595bnx2_report_link(struct bnx2 *bp)
596{
597 if (bp->link_up) {
598 netif_carrier_on(bp->dev);
599 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
600
601 printk("%d Mbps ", bp->line_speed);
602
603 if (bp->duplex == DUPLEX_FULL)
604 printk("full duplex");
605 else
606 printk("half duplex");
607
608 if (bp->flow_ctrl) {
609 if (bp->flow_ctrl & FLOW_CTRL_RX) {
610 printk(", receive ");
611 if (bp->flow_ctrl & FLOW_CTRL_TX)
612 printk("& transmit ");
613 }
614 else {
615 printk(", transmit ");
616 }
617 printk("flow control ON");
618 }
619 printk("\n");
620 }
621 else {
622 netif_carrier_off(bp->dev);
623 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
624 }
Michael Chane3648b32005-11-04 08:51:21 -0800625
626 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700627}
628
629static void
630bnx2_resolve_flow_ctrl(struct bnx2 *bp)
631{
632 u32 local_adv, remote_adv;
633
634 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400635 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700636 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
637
638 if (bp->duplex == DUPLEX_FULL) {
639 bp->flow_ctrl = bp->req_flow_ctrl;
640 }
641 return;
642 }
643
644 if (bp->duplex != DUPLEX_FULL) {
645 return;
646 }
647
Michael Chan5b0c76a2005-11-04 08:45:49 -0800648 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
650 u32 val;
651
652 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_TX;
655 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656 bp->flow_ctrl |= FLOW_CTRL_RX;
657 return;
658 }
659
Michael Chanb6016b72005-05-26 13:03:09 -0700660 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661 bnx2_read_phy(bp, MII_LPA, &remote_adv);
662
663 if (bp->phy_flags & PHY_SERDES_FLAG) {
664 u32 new_local_adv = 0;
665 u32 new_remote_adv = 0;
666
667 if (local_adv & ADVERTISE_1000XPAUSE)
668 new_local_adv |= ADVERTISE_PAUSE_CAP;
669 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670 new_local_adv |= ADVERTISE_PAUSE_ASYM;
671 if (remote_adv & ADVERTISE_1000XPAUSE)
672 new_remote_adv |= ADVERTISE_PAUSE_CAP;
673 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
675
676 local_adv = new_local_adv;
677 remote_adv = new_remote_adv;
678 }
679
680 /* See Table 28B-3 of 802.3ab-1999 spec. */
681 if (local_adv & ADVERTISE_PAUSE_CAP) {
682 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683 if (remote_adv & ADVERTISE_PAUSE_CAP) {
684 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
685 }
686 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687 bp->flow_ctrl = FLOW_CTRL_RX;
688 }
689 }
690 else {
691 if (remote_adv & ADVERTISE_PAUSE_CAP) {
692 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
693 }
694 }
695 }
696 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
699
700 bp->flow_ctrl = FLOW_CTRL_TX;
701 }
702 }
703}
704
705static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800706bnx2_5708s_linkup(struct bnx2 *bp)
707{
708 u32 val;
709
710 bp->link_up = 1;
711 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713 case BCM5708S_1000X_STAT1_SPEED_10:
714 bp->line_speed = SPEED_10;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_100:
717 bp->line_speed = SPEED_100;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_1G:
720 bp->line_speed = SPEED_1000;
721 break;
722 case BCM5708S_1000X_STAT1_SPEED_2G5:
723 bp->line_speed = SPEED_2500;
724 break;
725 }
726 if (val & BCM5708S_1000X_STAT1_FD)
727 bp->duplex = DUPLEX_FULL;
728 else
729 bp->duplex = DUPLEX_HALF;
730
731 return 0;
732}
733
734static int
735bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700736{
737 u32 bmcr, local_adv, remote_adv, common;
738
739 bp->link_up = 1;
740 bp->line_speed = SPEED_1000;
741
742 bnx2_read_phy(bp, MII_BMCR, &bmcr);
743 if (bmcr & BMCR_FULLDPLX) {
744 bp->duplex = DUPLEX_FULL;
745 }
746 else {
747 bp->duplex = DUPLEX_HALF;
748 }
749
750 if (!(bmcr & BMCR_ANENABLE)) {
751 return 0;
752 }
753
754 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755 bnx2_read_phy(bp, MII_LPA, &remote_adv);
756
757 common = local_adv & remote_adv;
758 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
759
760 if (common & ADVERTISE_1000XFULL) {
761 bp->duplex = DUPLEX_FULL;
762 }
763 else {
764 bp->duplex = DUPLEX_HALF;
765 }
766 }
767
768 return 0;
769}
770
771static int
772bnx2_copper_linkup(struct bnx2 *bp)
773{
774 u32 bmcr;
775
776 bnx2_read_phy(bp, MII_BMCR, &bmcr);
777 if (bmcr & BMCR_ANENABLE) {
778 u32 local_adv, remote_adv, common;
779
780 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
782
783 common = local_adv & (remote_adv >> 2);
784 if (common & ADVERTISE_1000FULL) {
785 bp->line_speed = SPEED_1000;
786 bp->duplex = DUPLEX_FULL;
787 }
788 else if (common & ADVERTISE_1000HALF) {
789 bp->line_speed = SPEED_1000;
790 bp->duplex = DUPLEX_HALF;
791 }
792 else {
793 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794 bnx2_read_phy(bp, MII_LPA, &remote_adv);
795
796 common = local_adv & remote_adv;
797 if (common & ADVERTISE_100FULL) {
798 bp->line_speed = SPEED_100;
799 bp->duplex = DUPLEX_FULL;
800 }
801 else if (common & ADVERTISE_100HALF) {
802 bp->line_speed = SPEED_100;
803 bp->duplex = DUPLEX_HALF;
804 }
805 else if (common & ADVERTISE_10FULL) {
806 bp->line_speed = SPEED_10;
807 bp->duplex = DUPLEX_FULL;
808 }
809 else if (common & ADVERTISE_10HALF) {
810 bp->line_speed = SPEED_10;
811 bp->duplex = DUPLEX_HALF;
812 }
813 else {
814 bp->line_speed = 0;
815 bp->link_up = 0;
816 }
817 }
818 }
819 else {
820 if (bmcr & BMCR_SPEED100) {
821 bp->line_speed = SPEED_100;
822 }
823 else {
824 bp->line_speed = SPEED_10;
825 }
826 if (bmcr & BMCR_FULLDPLX) {
827 bp->duplex = DUPLEX_FULL;
828 }
829 else {
830 bp->duplex = DUPLEX_HALF;
831 }
832 }
833
834 return 0;
835}
836
837static int
838bnx2_set_mac_link(struct bnx2 *bp)
839{
840 u32 val;
841
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844 (bp->duplex == DUPLEX_HALF)) {
845 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
846 }
847
848 /* Configure the EMAC mode register. */
849 val = REG_RD(bp, BNX2_EMAC_MODE);
850
851 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800852 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800853 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700854
855 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800856 switch (bp->line_speed) {
857 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800858 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800860 break;
861 }
862 /* fall through */
863 case SPEED_100:
864 val |= BNX2_EMAC_MODE_PORT_MII;
865 break;
866 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800867 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800868 /* fall through */
869 case SPEED_1000:
870 val |= BNX2_EMAC_MODE_PORT_GMII;
871 break;
872 }
Michael Chanb6016b72005-05-26 13:03:09 -0700873 }
874 else {
875 val |= BNX2_EMAC_MODE_PORT_GMII;
876 }
877
878 /* Set the MAC to operate in the appropriate duplex mode. */
879 if (bp->duplex == DUPLEX_HALF)
880 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881 REG_WR(bp, BNX2_EMAC_MODE, val);
882
883 /* Enable/disable rx PAUSE. */
884 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
885
886 if (bp->flow_ctrl & FLOW_CTRL_RX)
887 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
889
890 /* Enable/disable tx PAUSE. */
891 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
893
894 if (bp->flow_ctrl & FLOW_CTRL_TX)
895 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
897
898 /* Acknowledge the interrupt. */
899 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
900
901 return 0;
902}
903
904static int
905bnx2_set_link(struct bnx2 *bp)
906{
907 u32 bmsr;
908 u8 link_up;
909
Michael Chan80be4432006-11-19 14:07:28 -0800910 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700911 bp->link_up = 1;
912 return 0;
913 }
914
915 link_up = bp->link_up;
916
917 bnx2_read_phy(bp, MII_BMSR, &bmsr);
918 bnx2_read_phy(bp, MII_BMSR, &bmsr);
919
920 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
922 u32 val;
923
924 val = REG_RD(bp, BNX2_EMAC_STATUS);
925 if (val & BNX2_EMAC_STATUS_LINK)
926 bmsr |= BMSR_LSTATUS;
927 else
928 bmsr &= ~BMSR_LSTATUS;
929 }
930
931 if (bmsr & BMSR_LSTATUS) {
932 bp->link_up = 1;
933
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800935 if (CHIP_NUM(bp) == CHIP_NUM_5706)
936 bnx2_5706s_linkup(bp);
937 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700939 }
940 else {
941 bnx2_copper_linkup(bp);
942 }
943 bnx2_resolve_flow_ctrl(bp);
944 }
945 else {
946 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947 (bp->autoneg & AUTONEG_SPEED)) {
948
949 u32 bmcr;
950
951 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800952 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700953 if (!(bmcr & BMCR_ANENABLE)) {
954 bnx2_write_phy(bp, MII_BMCR, bmcr |
955 BMCR_ANENABLE);
956 }
957 }
958 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
959 bp->link_up = 0;
960 }
961
962 if (bp->link_up != link_up) {
963 bnx2_report_link(bp);
964 }
965
966 bnx2_set_mac_link(bp);
967
968 return 0;
969}
970
971static int
972bnx2_reset_phy(struct bnx2 *bp)
973{
974 int i;
975 u32 reg;
976
977 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
978
979#define PHY_RESET_MAX_WAIT 100
980 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
981 udelay(10);
982
983 bnx2_read_phy(bp, MII_BMCR, &reg);
984 if (!(reg & BMCR_RESET)) {
985 udelay(20);
986 break;
987 }
988 }
989 if (i == PHY_RESET_MAX_WAIT) {
990 return -EBUSY;
991 }
992 return 0;
993}
994
995static u32
996bnx2_phy_get_pause_adv(struct bnx2 *bp)
997{
998 u32 adv = 0;
999
1000 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1002
1003 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004 adv = ADVERTISE_1000XPAUSE;
1005 }
1006 else {
1007 adv = ADVERTISE_PAUSE_CAP;
1008 }
1009 }
1010 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012 adv = ADVERTISE_1000XPSE_ASYM;
1013 }
1014 else {
1015 adv = ADVERTISE_PAUSE_ASYM;
1016 }
1017 }
1018 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1021 }
1022 else {
1023 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1024 }
1025 }
1026 return adv;
1027}
1028
1029static int
1030bnx2_setup_serdes_phy(struct bnx2 *bp)
1031{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001032 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001033 u32 new_adv = 0;
1034
1035 if (!(bp->autoneg & AUTONEG_SPEED)) {
1036 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001037 int force_link_down = 0;
1038
Michael Chan80be4432006-11-19 14:07:28 -08001039 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1041
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044 new_bmcr |= BMCR_SPEED1000;
1045 if (bp->req_line_speed == SPEED_2500) {
1046 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (!(up1 & BCM5708S_UP1_2G5)) {
1049 up1 |= BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1052 }
1053 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001054 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055 if (up1 & BCM5708S_UP1_2G5) {
1056 up1 &= ~BCM5708S_UP1_2G5;
1057 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058 force_link_down = 1;
1059 }
1060 }
1061
Michael Chanb6016b72005-05-26 13:03:09 -07001062 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001063 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001064 new_bmcr |= BMCR_FULLDPLX;
1065 }
1066 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001067 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001068 new_bmcr &= ~BMCR_FULLDPLX;
1069 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001070 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001071 /* Force a link down visible on the other side */
1072 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001073 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074 ~(ADVERTISE_1000XFULL |
1075 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001076 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077 BMCR_ANRESTART | BMCR_ANENABLE);
1078
1079 bp->link_up = 0;
1080 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001081 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001082 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001083 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001084 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001085 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1086 }
1087 return 0;
1088 }
1089
Michael Chan5b0c76a2005-11-04 08:45:49 -08001090 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092 up1 |= BCM5708S_UP1_2G5;
1093 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1094 }
1095
Michael Chanb6016b72005-05-26 13:03:09 -07001096 if (bp->advertising & ADVERTISED_1000baseT_Full)
1097 new_adv |= ADVERTISE_1000XFULL;
1098
1099 new_adv |= bnx2_phy_get_pause_adv(bp);
1100
1101 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1103
1104 bp->serdes_an_pending = 0;
1105 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106 /* Force a link down visible on the other side */
1107 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001108 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001109 spin_unlock_bh(&bp->phy_lock);
1110 msleep(20);
1111 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001112 }
1113
1114 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1116 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001117 /* Speed up link-up time when the link partner
1118 * does not autonegotiate which is very common
1119 * in blade servers. Some blade servers use
1120 * IPMI for kerboard input and it's important
1121 * to minimize link disruptions. Autoneg. involves
1122 * exchanging base pages plus 3 next pages and
1123 * normally completes in about 120 msec.
1124 */
1125 bp->current_interval = SERDES_AN_TIMEOUT;
1126 bp->serdes_an_pending = 1;
1127 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001128 }
1129
1130 return 0;
1131}
1132
1133#define ETHTOOL_ALL_FIBRE_SPEED \
1134 (ADVERTISED_1000baseT_Full)
1135
1136#define ETHTOOL_ALL_COPPER_SPEED \
1137 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1138 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1139 ADVERTISED_1000baseT_Full)
1140
1141#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001143
Michael Chanb6016b72005-05-26 13:03:09 -07001144#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1145
1146static int
1147bnx2_setup_copper_phy(struct bnx2 *bp)
1148{
1149 u32 bmcr;
1150 u32 new_bmcr;
1151
1152 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1153
1154 if (bp->autoneg & AUTONEG_SPEED) {
1155 u32 adv_reg, adv1000_reg;
1156 u32 new_adv_reg = 0;
1157 u32 new_adv1000_reg = 0;
1158
1159 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161 ADVERTISE_PAUSE_ASYM);
1162
1163 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164 adv1000_reg &= PHY_ALL_1000_SPEED;
1165
1166 if (bp->advertising & ADVERTISED_10baseT_Half)
1167 new_adv_reg |= ADVERTISE_10HALF;
1168 if (bp->advertising & ADVERTISED_10baseT_Full)
1169 new_adv_reg |= ADVERTISE_10FULL;
1170 if (bp->advertising & ADVERTISED_100baseT_Half)
1171 new_adv_reg |= ADVERTISE_100HALF;
1172 if (bp->advertising & ADVERTISED_100baseT_Full)
1173 new_adv_reg |= ADVERTISE_100FULL;
1174 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001176
Michael Chanb6016b72005-05-26 13:03:09 -07001177 new_adv_reg |= ADVERTISE_CSMA;
1178
1179 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1180
1181 if ((adv1000_reg != new_adv1000_reg) ||
1182 (adv_reg != new_adv_reg) ||
1183 ((bmcr & BMCR_ANENABLE) == 0)) {
1184
1185 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1188 BMCR_ANENABLE);
1189 }
1190 else if (bp->link_up) {
1191 /* Flow ctrl may have changed from auto to forced */
1192 /* or vice-versa. */
1193
1194 bnx2_resolve_flow_ctrl(bp);
1195 bnx2_set_mac_link(bp);
1196 }
1197 return 0;
1198 }
1199
1200 new_bmcr = 0;
1201 if (bp->req_line_speed == SPEED_100) {
1202 new_bmcr |= BMCR_SPEED100;
1203 }
1204 if (bp->req_duplex == DUPLEX_FULL) {
1205 new_bmcr |= BMCR_FULLDPLX;
1206 }
1207 if (new_bmcr != bmcr) {
1208 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001209
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001212
Michael Chanb6016b72005-05-26 13:03:09 -07001213 if (bmsr & BMSR_LSTATUS) {
1214 /* Force link down */
1215 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001216 spin_unlock_bh(&bp->phy_lock);
1217 msleep(50);
1218 spin_lock_bh(&bp->phy_lock);
1219
1220 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001222 }
1223
1224 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1225
1226 /* Normally, the new speed is setup after the link has
1227 * gone down and up again. In some cases, link will not go
1228 * down so we need to set up the new speed here.
1229 */
1230 if (bmsr & BMSR_LSTATUS) {
1231 bp->line_speed = bp->req_line_speed;
1232 bp->duplex = bp->req_duplex;
1233 bnx2_resolve_flow_ctrl(bp);
1234 bnx2_set_mac_link(bp);
1235 }
1236 }
1237 return 0;
1238}
1239
1240static int
1241bnx2_setup_phy(struct bnx2 *bp)
1242{
1243 if (bp->loopback == MAC_LOOPBACK)
1244 return 0;
1245
1246 if (bp->phy_flags & PHY_SERDES_FLAG) {
1247 return (bnx2_setup_serdes_phy(bp));
1248 }
1249 else {
1250 return (bnx2_setup_copper_phy(bp));
1251 }
1252}
1253
1254static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001255bnx2_init_5708s_phy(struct bnx2 *bp)
1256{
1257 u32 val;
1258
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1262
1263 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1266
1267 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1270
1271 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273 val |= BCM5708S_UP1_2G5;
1274 bnx2_write_phy(bp, BCM5708S_UP1, val);
1275 }
1276
1277 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001278 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001280 /* increase tx signal amplitude */
1281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282 BCM5708S_BLK_ADDR_TX_MISC);
1283 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1287 }
1288
Michael Chane3648b32005-11-04 08:51:21 -08001289 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001290 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1291
1292 if (val) {
1293 u32 is_backplane;
1294
Michael Chane3648b32005-11-04 08:51:21 -08001295 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001296 BNX2_SHARED_HW_CFG_CONFIG);
1297 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_TX_MISC);
1300 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302 BCM5708S_BLK_ADDR_DIG);
1303 }
1304 }
1305 return 0;
1306}
1307
1308static int
1309bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001310{
1311 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1312
Michael Chan59b47d82006-11-19 14:10:45 -08001313 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001315
1316 if (bp->dev->mtu > 1500) {
1317 u32 val;
1318
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1323
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1327 }
1328 else {
1329 u32 val;
1330
1331 bnx2_write_phy(bp, 0x18, 0x7);
1332 bnx2_read_phy(bp, 0x18, &val);
1333 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1334
1335 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336 bnx2_read_phy(bp, 0x1c, &val);
1337 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1338 }
1339
1340 return 0;
1341}
1342
1343static int
1344bnx2_init_copper_phy(struct bnx2 *bp)
1345{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001346 u32 val;
1347
Michael Chanb6016b72005-05-26 13:03:09 -07001348 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1349
1350 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1351 bnx2_write_phy(bp, 0x18, 0x0c00);
1352 bnx2_write_phy(bp, 0x17, 0x000a);
1353 bnx2_write_phy(bp, 0x15, 0x310b);
1354 bnx2_write_phy(bp, 0x17, 0x201f);
1355 bnx2_write_phy(bp, 0x15, 0x9506);
1356 bnx2_write_phy(bp, 0x17, 0x401f);
1357 bnx2_write_phy(bp, 0x15, 0x14e2);
1358 bnx2_write_phy(bp, 0x18, 0x0400);
1359 }
1360
1361 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001362 /* Set extended packet length bit */
1363 bnx2_write_phy(bp, 0x18, 0x7);
1364 bnx2_read_phy(bp, 0x18, &val);
1365 bnx2_write_phy(bp, 0x18, val | 0x4000);
1366
1367 bnx2_read_phy(bp, 0x10, &val);
1368 bnx2_write_phy(bp, 0x10, val | 0x1);
1369 }
1370 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001371 bnx2_write_phy(bp, 0x18, 0x7);
1372 bnx2_read_phy(bp, 0x18, &val);
1373 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1374
1375 bnx2_read_phy(bp, 0x10, &val);
1376 bnx2_write_phy(bp, 0x10, val & ~0x1);
1377 }
1378
Michael Chan5b0c76a2005-11-04 08:45:49 -08001379 /* ethernet@wirespeed */
1380 bnx2_write_phy(bp, 0x18, 0x7007);
1381 bnx2_read_phy(bp, 0x18, &val);
1382 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001383 return 0;
1384}
1385
1386
1387static int
1388bnx2_init_phy(struct bnx2 *bp)
1389{
1390 u32 val;
1391 int rc = 0;
1392
1393 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1394 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1395
1396 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1397
1398 bnx2_reset_phy(bp);
1399
1400 bnx2_read_phy(bp, MII_PHYSID1, &val);
1401 bp->phy_id = val << 16;
1402 bnx2_read_phy(bp, MII_PHYSID2, &val);
1403 bp->phy_id |= val & 0xffff;
1404
1405 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001406 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1407 rc = bnx2_init_5706s_phy(bp);
1408 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1409 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001410 }
1411 else {
1412 rc = bnx2_init_copper_phy(bp);
1413 }
1414
1415 bnx2_setup_phy(bp);
1416
1417 return rc;
1418}
1419
1420static int
1421bnx2_set_mac_loopback(struct bnx2 *bp)
1422{
1423 u32 mac_mode;
1424
1425 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1426 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1427 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1428 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1429 bp->link_up = 1;
1430 return 0;
1431}
1432
Michael Chanbc5a0692006-01-23 16:13:22 -08001433static int bnx2_test_link(struct bnx2 *);
1434
1435static int
1436bnx2_set_phy_loopback(struct bnx2 *bp)
1437{
1438 u32 mac_mode;
1439 int rc, i;
1440
1441 spin_lock_bh(&bp->phy_lock);
1442 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1443 BMCR_SPEED1000);
1444 spin_unlock_bh(&bp->phy_lock);
1445 if (rc)
1446 return rc;
1447
1448 for (i = 0; i < 10; i++) {
1449 if (bnx2_test_link(bp) == 0)
1450 break;
Michael Chan80be4432006-11-19 14:07:28 -08001451 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001452 }
1453
1454 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1455 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1456 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001457 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001458
1459 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1460 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1461 bp->link_up = 1;
1462 return 0;
1463}
1464
Michael Chanb6016b72005-05-26 13:03:09 -07001465static int
Michael Chanb090ae22006-01-23 16:07:10 -08001466bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001467{
1468 int i;
1469 u32 val;
1470
Michael Chanb6016b72005-05-26 13:03:09 -07001471 bp->fw_wr_seq++;
1472 msg_data |= bp->fw_wr_seq;
1473
Michael Chane3648b32005-11-04 08:51:21 -08001474 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001475
1476 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001477 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1478 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001479
Michael Chane3648b32005-11-04 08:51:21 -08001480 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001481
1482 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1483 break;
1484 }
Michael Chanb090ae22006-01-23 16:07:10 -08001485 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1486 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001487
1488 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001489 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1490 if (!silent)
1491 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1492 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001493
1494 msg_data &= ~BNX2_DRV_MSG_CODE;
1495 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1496
Michael Chane3648b32005-11-04 08:51:21 -08001497 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001498
Michael Chanb6016b72005-05-26 13:03:09 -07001499 return -EBUSY;
1500 }
1501
Michael Chanb090ae22006-01-23 16:07:10 -08001502 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1503 return -EIO;
1504
Michael Chanb6016b72005-05-26 13:03:09 -07001505 return 0;
1506}
1507
Michael Chan59b47d82006-11-19 14:10:45 -08001508static int
1509bnx2_init_5709_context(struct bnx2 *bp)
1510{
1511 int i, ret = 0;
1512 u32 val;
1513
1514 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1515 val |= (BCM_PAGE_BITS - 8) << 16;
1516 REG_WR(bp, BNX2_CTX_COMMAND, val);
1517 for (i = 0; i < bp->ctx_pages; i++) {
1518 int j;
1519
1520 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1521 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1522 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1524 (u64) bp->ctx_blk_mapping[i] >> 32);
1525 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1526 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1527 for (j = 0; j < 10; j++) {
1528
1529 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1530 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1531 break;
1532 udelay(5);
1533 }
1534 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1535 ret = -EBUSY;
1536 break;
1537 }
1538 }
1539 return ret;
1540}
1541
Michael Chanb6016b72005-05-26 13:03:09 -07001542static void
1543bnx2_init_context(struct bnx2 *bp)
1544{
1545 u32 vcid;
1546
1547 vcid = 96;
1548 while (vcid) {
1549 u32 vcid_addr, pcid_addr, offset;
1550
1551 vcid--;
1552
1553 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1554 u32 new_vcid;
1555
1556 vcid_addr = GET_PCID_ADDR(vcid);
1557 if (vcid & 0x8) {
1558 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1559 }
1560 else {
1561 new_vcid = vcid;
1562 }
1563 pcid_addr = GET_PCID_ADDR(new_vcid);
1564 }
1565 else {
1566 vcid_addr = GET_CID_ADDR(vcid);
1567 pcid_addr = vcid_addr;
1568 }
1569
1570 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1571 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1572
1573 /* Zero out the context. */
1574 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1575 CTX_WR(bp, 0x00, offset, 0);
1576 }
1577
1578 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1579 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1580 }
1581}
1582
1583static int
1584bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1585{
1586 u16 *good_mbuf;
1587 u32 good_mbuf_cnt;
1588 u32 val;
1589
1590 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1591 if (good_mbuf == NULL) {
1592 printk(KERN_ERR PFX "Failed to allocate memory in "
1593 "bnx2_alloc_bad_rbuf\n");
1594 return -ENOMEM;
1595 }
1596
1597 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1598 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1599
1600 good_mbuf_cnt = 0;
1601
1602 /* Allocate a bunch of mbufs and save the good ones in an array. */
1603 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1604 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1605 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1606
1607 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1608
1609 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1610
1611 /* The addresses with Bit 9 set are bad memory blocks. */
1612 if (!(val & (1 << 9))) {
1613 good_mbuf[good_mbuf_cnt] = (u16) val;
1614 good_mbuf_cnt++;
1615 }
1616
1617 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1618 }
1619
1620 /* Free the good ones back to the mbuf pool thus discarding
1621 * all the bad ones. */
1622 while (good_mbuf_cnt) {
1623 good_mbuf_cnt--;
1624
1625 val = good_mbuf[good_mbuf_cnt];
1626 val = (val << 9) | val | 1;
1627
1628 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1629 }
1630 kfree(good_mbuf);
1631 return 0;
1632}
1633
1634static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001635bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001636{
1637 u32 val;
1638 u8 *mac_addr = bp->dev->dev_addr;
1639
1640 val = (mac_addr[0] << 8) | mac_addr[1];
1641
1642 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1643
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001644 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001645 (mac_addr[4] << 8) | mac_addr[5];
1646
1647 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1648}
1649
1650static inline int
1651bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1652{
1653 struct sk_buff *skb;
1654 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1655 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001656 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001657 unsigned long align;
1658
Michael Chan932f3772006-08-15 01:39:36 -07001659 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001660 if (skb == NULL) {
1661 return -ENOMEM;
1662 }
1663
Michael Chan59b47d82006-11-19 14:10:45 -08001664 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1665 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001666
Michael Chanb6016b72005-05-26 13:03:09 -07001667 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1668 PCI_DMA_FROMDEVICE);
1669
1670 rx_buf->skb = skb;
1671 pci_unmap_addr_set(rx_buf, mapping, mapping);
1672
1673 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1674 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1675
1676 bp->rx_prod_bseq += bp->rx_buf_use_size;
1677
1678 return 0;
1679}
1680
1681static void
1682bnx2_phy_int(struct bnx2 *bp)
1683{
1684 u32 new_link_state, old_link_state;
1685
1686 new_link_state = bp->status_blk->status_attn_bits &
1687 STATUS_ATTN_BITS_LINK_STATE;
1688 old_link_state = bp->status_blk->status_attn_bits_ack &
1689 STATUS_ATTN_BITS_LINK_STATE;
1690 if (new_link_state != old_link_state) {
1691 if (new_link_state) {
1692 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1693 STATUS_ATTN_BITS_LINK_STATE);
1694 }
1695 else {
1696 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1697 STATUS_ATTN_BITS_LINK_STATE);
1698 }
1699 bnx2_set_link(bp);
1700 }
1701}
1702
1703static void
1704bnx2_tx_int(struct bnx2 *bp)
1705{
Michael Chanf4e418f2005-11-04 08:53:48 -08001706 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001707 u16 hw_cons, sw_cons, sw_ring_cons;
1708 int tx_free_bd = 0;
1709
Michael Chanf4e418f2005-11-04 08:53:48 -08001710 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001711 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1712 hw_cons++;
1713 }
1714 sw_cons = bp->tx_cons;
1715
1716 while (sw_cons != hw_cons) {
1717 struct sw_bd *tx_buf;
1718 struct sk_buff *skb;
1719 int i, last;
1720
1721 sw_ring_cons = TX_RING_IDX(sw_cons);
1722
1723 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1724 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001725#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001726 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001727 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001728 u16 last_idx, last_ring_idx;
1729
1730 last_idx = sw_cons +
1731 skb_shinfo(skb)->nr_frags + 1;
1732 last_ring_idx = sw_ring_cons +
1733 skb_shinfo(skb)->nr_frags + 1;
1734 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1735 last_idx++;
1736 }
1737 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1738 break;
1739 }
1740 }
1741#endif
1742 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1743 skb_headlen(skb), PCI_DMA_TODEVICE);
1744
1745 tx_buf->skb = NULL;
1746 last = skb_shinfo(skb)->nr_frags;
1747
1748 for (i = 0; i < last; i++) {
1749 sw_cons = NEXT_TX_BD(sw_cons);
1750
1751 pci_unmap_page(bp->pdev,
1752 pci_unmap_addr(
1753 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1754 mapping),
1755 skb_shinfo(skb)->frags[i].size,
1756 PCI_DMA_TODEVICE);
1757 }
1758
1759 sw_cons = NEXT_TX_BD(sw_cons);
1760
1761 tx_free_bd += last + 1;
1762
Michael Chan745720e2006-06-29 12:37:41 -07001763 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001764
Michael Chanf4e418f2005-11-04 08:53:48 -08001765 hw_cons = bp->hw_tx_cons =
1766 sblk->status_tx_quick_consumer_index0;
1767
Michael Chanb6016b72005-05-26 13:03:09 -07001768 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1769 hw_cons++;
1770 }
1771 }
1772
Michael Chane89bbf12005-08-25 15:36:58 -07001773 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001774 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1775 * before checking for netif_queue_stopped(). Without the
1776 * memory barrier, there is a small possibility that bnx2_start_xmit()
1777 * will miss it and cause the queue to be stopped forever.
1778 */
1779 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001780
Michael Chan2f8af122006-08-15 01:39:10 -07001781 if (unlikely(netif_queue_stopped(bp->dev)) &&
1782 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1783 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001784 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001785 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001786 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001787 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001788 }
Michael Chanb6016b72005-05-26 13:03:09 -07001789}
1790
1791static inline void
1792bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1793 u16 cons, u16 prod)
1794{
Michael Chan236b6392006-03-20 17:49:02 -08001795 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1796 struct rx_bd *cons_bd, *prod_bd;
1797
1798 cons_rx_buf = &bp->rx_buf_ring[cons];
1799 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001800
1801 pci_dma_sync_single_for_device(bp->pdev,
1802 pci_unmap_addr(cons_rx_buf, mapping),
1803 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1804
Michael Chan236b6392006-03-20 17:49:02 -08001805 bp->rx_prod_bseq += bp->rx_buf_use_size;
1806
1807 prod_rx_buf->skb = skb;
1808
1809 if (cons == prod)
1810 return;
1811
Michael Chanb6016b72005-05-26 13:03:09 -07001812 pci_unmap_addr_set(prod_rx_buf, mapping,
1813 pci_unmap_addr(cons_rx_buf, mapping));
1814
Michael Chan3fdfcc22006-03-20 17:49:49 -08001815 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1816 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001817 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1818 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001819}
1820
1821static int
1822bnx2_rx_int(struct bnx2 *bp, int budget)
1823{
Michael Chanf4e418f2005-11-04 08:53:48 -08001824 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001825 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1826 struct l2_fhdr *rx_hdr;
1827 int rx_pkt = 0;
1828
Michael Chanf4e418f2005-11-04 08:53:48 -08001829 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001830 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1831 hw_cons++;
1832 }
1833 sw_cons = bp->rx_cons;
1834 sw_prod = bp->rx_prod;
1835
1836 /* Memory barrier necessary as speculative reads of the rx
1837 * buffer can be ahead of the index in the status block
1838 */
1839 rmb();
1840 while (sw_cons != hw_cons) {
1841 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001842 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001843 struct sw_bd *rx_buf;
1844 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001845 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001846
1847 sw_ring_cons = RX_RING_IDX(sw_cons);
1848 sw_ring_prod = RX_RING_IDX(sw_prod);
1849
1850 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1851 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001852
1853 rx_buf->skb = NULL;
1854
1855 dma_addr = pci_unmap_addr(rx_buf, mapping);
1856
1857 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001858 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1859
1860 rx_hdr = (struct l2_fhdr *) skb->data;
1861 len = rx_hdr->l2_fhdr_pkt_len - 4;
1862
Michael Chanade2bfe2006-01-23 16:09:51 -08001863 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001864 (L2_FHDR_ERRORS_BAD_CRC |
1865 L2_FHDR_ERRORS_PHY_DECODE |
1866 L2_FHDR_ERRORS_ALIGNMENT |
1867 L2_FHDR_ERRORS_TOO_SHORT |
1868 L2_FHDR_ERRORS_GIANT_FRAME)) {
1869
1870 goto reuse_rx;
1871 }
1872
1873 /* Since we don't have a jumbo ring, copy small packets
1874 * if mtu > 1500
1875 */
1876 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1877 struct sk_buff *new_skb;
1878
Michael Chan932f3772006-08-15 01:39:36 -07001879 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001880 if (new_skb == NULL)
1881 goto reuse_rx;
1882
1883 /* aligned copy */
1884 memcpy(new_skb->data,
1885 skb->data + bp->rx_offset - 2,
1886 len + 2);
1887
1888 skb_reserve(new_skb, 2);
1889 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001890
1891 bnx2_reuse_rx_skb(bp, skb,
1892 sw_ring_cons, sw_ring_prod);
1893
1894 skb = new_skb;
1895 }
1896 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001897 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001898 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1899
1900 skb_reserve(skb, bp->rx_offset);
1901 skb_put(skb, len);
1902 }
1903 else {
1904reuse_rx:
1905 bnx2_reuse_rx_skb(bp, skb,
1906 sw_ring_cons, sw_ring_prod);
1907 goto next_rx;
1908 }
1909
1910 skb->protocol = eth_type_trans(skb, bp->dev);
1911
1912 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001913 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001914
Michael Chan745720e2006-06-29 12:37:41 -07001915 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001916 goto next_rx;
1917
1918 }
1919
Michael Chanb6016b72005-05-26 13:03:09 -07001920 skb->ip_summed = CHECKSUM_NONE;
1921 if (bp->rx_csum &&
1922 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1923 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1924
Michael Chanade2bfe2006-01-23 16:09:51 -08001925 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1926 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001927 skb->ip_summed = CHECKSUM_UNNECESSARY;
1928 }
1929
1930#ifdef BCM_VLAN
1931 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1932 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1933 rx_hdr->l2_fhdr_vlan_tag);
1934 }
1935 else
1936#endif
1937 netif_receive_skb(skb);
1938
1939 bp->dev->last_rx = jiffies;
1940 rx_pkt++;
1941
1942next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001943 sw_cons = NEXT_RX_BD(sw_cons);
1944 sw_prod = NEXT_RX_BD(sw_prod);
1945
1946 if ((rx_pkt == budget))
1947 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001948
1949 /* Refresh hw_cons to see if there is new work */
1950 if (sw_cons == hw_cons) {
1951 hw_cons = bp->hw_rx_cons =
1952 sblk->status_rx_quick_consumer_index0;
1953 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1954 hw_cons++;
1955 rmb();
1956 }
Michael Chanb6016b72005-05-26 13:03:09 -07001957 }
1958 bp->rx_cons = sw_cons;
1959 bp->rx_prod = sw_prod;
1960
1961 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1962
1963 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1964
1965 mmiowb();
1966
1967 return rx_pkt;
1968
1969}
1970
1971/* MSI ISR - The only difference between this and the INTx ISR
1972 * is that the MSI interrupt is always serviced.
1973 */
1974static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001975bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001976{
1977 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001978 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001979
Michael Chanc921e4c2005-09-08 13:15:32 -07001980 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001981 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1982 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1983 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1984
1985 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001986 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1987 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001988
Michael Chan73eef4c2005-08-25 15:39:15 -07001989 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001990
Michael Chan73eef4c2005-08-25 15:39:15 -07001991 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001992}
1993
1994static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001995bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001996{
1997 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001998 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001999
2000 /* When using INTx, it is possible for the interrupt to arrive
2001 * at the CPU before the status block posted prior to the
2002 * interrupt. Reading a register will flush the status block.
2003 * When using MSI, the MSI message will always complete after
2004 * the status block write.
2005 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002006 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002007 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2008 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002009 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002010
2011 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2012 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2013 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2014
2015 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002016 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2017 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002018
Michael Chan73eef4c2005-08-25 15:39:15 -07002019 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002020
Michael Chan73eef4c2005-08-25 15:39:15 -07002021 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002022}
2023
Michael Chanf4e418f2005-11-04 08:53:48 -08002024static inline int
2025bnx2_has_work(struct bnx2 *bp)
2026{
2027 struct status_block *sblk = bp->status_blk;
2028
2029 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2030 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2031 return 1;
2032
2033 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2034 bp->link_up)
2035 return 1;
2036
2037 return 0;
2038}
2039
Michael Chanb6016b72005-05-26 13:03:09 -07002040static int
2041bnx2_poll(struct net_device *dev, int *budget)
2042{
Michael Chan972ec0d2006-01-23 16:12:43 -08002043 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002044
Michael Chanb6016b72005-05-26 13:03:09 -07002045 if ((bp->status_blk->status_attn_bits &
2046 STATUS_ATTN_BITS_LINK_STATE) !=
2047 (bp->status_blk->status_attn_bits_ack &
2048 STATUS_ATTN_BITS_LINK_STATE)) {
2049
Michael Chanc770a652005-08-25 15:38:39 -07002050 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002051 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002052 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002053
2054 /* This is needed to take care of transient status
2055 * during link changes.
2056 */
2057 REG_WR(bp, BNX2_HC_COMMAND,
2058 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2059 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002060 }
2061
Michael Chanf4e418f2005-11-04 08:53:48 -08002062 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002063 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002064
Michael Chanf4e418f2005-11-04 08:53:48 -08002065 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002066 int orig_budget = *budget;
2067 int work_done;
2068
2069 if (orig_budget > dev->quota)
2070 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002071
Michael Chanb6016b72005-05-26 13:03:09 -07002072 work_done = bnx2_rx_int(bp, orig_budget);
2073 *budget -= work_done;
2074 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002075 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002076
Michael Chanf4e418f2005-11-04 08:53:48 -08002077 bp->last_status_idx = bp->status_blk->status_idx;
2078 rmb();
2079
2080 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002081 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002082 if (likely(bp->flags & USING_MSI_FLAG)) {
2083 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2084 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2085 bp->last_status_idx);
2086 return 0;
2087 }
Michael Chanb6016b72005-05-26 13:03:09 -07002088 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002089 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2090 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2091 bp->last_status_idx);
2092
2093 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2094 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2095 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002096 return 0;
2097 }
2098
2099 return 1;
2100}
2101
Herbert Xu932ff272006-06-09 12:20:56 -07002102/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002103 * from set_multicast.
2104 */
2105static void
2106bnx2_set_rx_mode(struct net_device *dev)
2107{
Michael Chan972ec0d2006-01-23 16:12:43 -08002108 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002109 u32 rx_mode, sort_mode;
2110 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002111
Michael Chanc770a652005-08-25 15:38:39 -07002112 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002113
2114 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2115 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2116 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2117#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002118 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002119 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002120#else
Michael Chane29054f2006-01-23 16:06:06 -08002121 if (!(bp->flags & ASF_ENABLE_FLAG))
2122 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002123#endif
2124 if (dev->flags & IFF_PROMISC) {
2125 /* Promiscuous mode. */
2126 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002127 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2128 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002129 }
2130 else if (dev->flags & IFF_ALLMULTI) {
2131 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2132 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2133 0xffffffff);
2134 }
2135 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2136 }
2137 else {
2138 /* Accept one or more multicast(s). */
2139 struct dev_mc_list *mclist;
2140 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2141 u32 regidx;
2142 u32 bit;
2143 u32 crc;
2144
2145 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2146
2147 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2148 i++, mclist = mclist->next) {
2149
2150 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2151 bit = crc & 0xff;
2152 regidx = (bit & 0xe0) >> 5;
2153 bit &= 0x1f;
2154 mc_filter[regidx] |= (1 << bit);
2155 }
2156
2157 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2158 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2159 mc_filter[i]);
2160 }
2161
2162 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2163 }
2164
2165 if (rx_mode != bp->rx_mode) {
2166 bp->rx_mode = rx_mode;
2167 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2168 }
2169
2170 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2171 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2172 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2173
Michael Chanc770a652005-08-25 15:38:39 -07002174 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002175}
2176
Michael Chanfba9fe92006-06-12 22:21:25 -07002177#define FW_BUF_SIZE 0x8000
2178
2179static int
2180bnx2_gunzip_init(struct bnx2 *bp)
2181{
2182 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2183 goto gunzip_nomem1;
2184
2185 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2186 goto gunzip_nomem2;
2187
2188 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2189 if (bp->strm->workspace == NULL)
2190 goto gunzip_nomem3;
2191
2192 return 0;
2193
2194gunzip_nomem3:
2195 kfree(bp->strm);
2196 bp->strm = NULL;
2197
2198gunzip_nomem2:
2199 vfree(bp->gunzip_buf);
2200 bp->gunzip_buf = NULL;
2201
2202gunzip_nomem1:
2203 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2204 "uncompression.\n", bp->dev->name);
2205 return -ENOMEM;
2206}
2207
2208static void
2209bnx2_gunzip_end(struct bnx2 *bp)
2210{
2211 kfree(bp->strm->workspace);
2212
2213 kfree(bp->strm);
2214 bp->strm = NULL;
2215
2216 if (bp->gunzip_buf) {
2217 vfree(bp->gunzip_buf);
2218 bp->gunzip_buf = NULL;
2219 }
2220}
2221
2222static int
2223bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2224{
2225 int n, rc;
2226
2227 /* check gzip header */
2228 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2229 return -EINVAL;
2230
2231 n = 10;
2232
2233#define FNAME 0x8
2234 if (zbuf[3] & FNAME)
2235 while ((zbuf[n++] != 0) && (n < len));
2236
2237 bp->strm->next_in = zbuf + n;
2238 bp->strm->avail_in = len - n;
2239 bp->strm->next_out = bp->gunzip_buf;
2240 bp->strm->avail_out = FW_BUF_SIZE;
2241
2242 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2243 if (rc != Z_OK)
2244 return rc;
2245
2246 rc = zlib_inflate(bp->strm, Z_FINISH);
2247
2248 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2249 *outbuf = bp->gunzip_buf;
2250
2251 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2252 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2253 bp->dev->name, bp->strm->msg);
2254
2255 zlib_inflateEnd(bp->strm);
2256
2257 if (rc == Z_STREAM_END)
2258 return 0;
2259
2260 return rc;
2261}
2262
Michael Chanb6016b72005-05-26 13:03:09 -07002263static void
2264load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2265 u32 rv2p_proc)
2266{
2267 int i;
2268 u32 val;
2269
2270
2271 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002272 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002273 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002274 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002275 rv2p_code++;
2276
2277 if (rv2p_proc == RV2P_PROC1) {
2278 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2279 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2280 }
2281 else {
2282 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2283 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2284 }
2285 }
2286
2287 /* Reset the processor, un-stall is done later. */
2288 if (rv2p_proc == RV2P_PROC1) {
2289 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2290 }
2291 else {
2292 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2293 }
2294}
2295
Michael Chanaf3ee512006-11-19 14:09:25 -08002296static int
Michael Chanb6016b72005-05-26 13:03:09 -07002297load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2298{
2299 u32 offset;
2300 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002301 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002302
2303 /* Halt the CPU. */
2304 val = REG_RD_IND(bp, cpu_reg->mode);
2305 val |= cpu_reg->mode_value_halt;
2306 REG_WR_IND(bp, cpu_reg->mode, val);
2307 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2308
2309 /* Load the Text area. */
2310 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002311 if (fw->gz_text) {
2312 u32 text_len;
2313 void *text;
2314
2315 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2316 &text_len);
2317 if (rc)
2318 return rc;
2319
2320 fw->text = text;
2321 }
2322 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002323 int j;
2324
2325 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002326 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002327 }
2328 }
2329
2330 /* Load the Data area. */
2331 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2332 if (fw->data) {
2333 int j;
2334
2335 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2336 REG_WR_IND(bp, offset, fw->data[j]);
2337 }
2338 }
2339
2340 /* Load the SBSS area. */
2341 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2342 if (fw->sbss) {
2343 int j;
2344
2345 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2346 REG_WR_IND(bp, offset, fw->sbss[j]);
2347 }
2348 }
2349
2350 /* Load the BSS area. */
2351 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2352 if (fw->bss) {
2353 int j;
2354
2355 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2356 REG_WR_IND(bp, offset, fw->bss[j]);
2357 }
2358 }
2359
2360 /* Load the Read-Only area. */
2361 offset = cpu_reg->spad_base +
2362 (fw->rodata_addr - cpu_reg->mips_view_base);
2363 if (fw->rodata) {
2364 int j;
2365
2366 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2367 REG_WR_IND(bp, offset, fw->rodata[j]);
2368 }
2369 }
2370
2371 /* Clear the pre-fetch instruction. */
2372 REG_WR_IND(bp, cpu_reg->inst, 0);
2373 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2374
2375 /* Start the CPU. */
2376 val = REG_RD_IND(bp, cpu_reg->mode);
2377 val &= ~cpu_reg->mode_value_halt;
2378 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2379 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002380
2381 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002382}
2383
Michael Chanfba9fe92006-06-12 22:21:25 -07002384static int
Michael Chanb6016b72005-05-26 13:03:09 -07002385bnx2_init_cpus(struct bnx2 *bp)
2386{
2387 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002388 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002389 int rc = 0;
2390 void *text;
2391 u32 text_len;
2392
2393 if ((rc = bnx2_gunzip_init(bp)) != 0)
2394 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002395
2396 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002397 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2398 &text_len);
2399 if (rc)
2400 goto init_cpu_err;
2401
2402 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2403
2404 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2405 &text_len);
2406 if (rc)
2407 goto init_cpu_err;
2408
2409 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002410
2411 /* Initialize the RX Processor. */
2412 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2413 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2414 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2415 cpu_reg.state = BNX2_RXP_CPU_STATE;
2416 cpu_reg.state_value_clear = 0xffffff;
2417 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2418 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2419 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2420 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2421 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2422 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2423 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002424
Michael Chand43584c2006-11-19 14:14:35 -08002425 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2426 fw = &bnx2_rxp_fw_09;
2427 else
2428 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002429
Michael Chanaf3ee512006-11-19 14:09:25 -08002430 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002431 if (rc)
2432 goto init_cpu_err;
2433
Michael Chanb6016b72005-05-26 13:03:09 -07002434 /* Initialize the TX Processor. */
2435 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2436 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2437 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2438 cpu_reg.state = BNX2_TXP_CPU_STATE;
2439 cpu_reg.state_value_clear = 0xffffff;
2440 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2441 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2442 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2443 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2444 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2445 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2446 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002447
Michael Chand43584c2006-11-19 14:14:35 -08002448 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2449 fw = &bnx2_txp_fw_09;
2450 else
2451 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002452
Michael Chanaf3ee512006-11-19 14:09:25 -08002453 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002454 if (rc)
2455 goto init_cpu_err;
2456
Michael Chanb6016b72005-05-26 13:03:09 -07002457 /* Initialize the TX Patch-up Processor. */
2458 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2459 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2460 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2461 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2462 cpu_reg.state_value_clear = 0xffffff;
2463 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2464 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2465 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2466 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2467 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2468 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2469 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002470
Michael Chand43584c2006-11-19 14:14:35 -08002471 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2472 fw = &bnx2_tpat_fw_09;
2473 else
2474 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002475
Michael Chanaf3ee512006-11-19 14:09:25 -08002476 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002477 if (rc)
2478 goto init_cpu_err;
2479
Michael Chanb6016b72005-05-26 13:03:09 -07002480 /* Initialize the Completion Processor. */
2481 cpu_reg.mode = BNX2_COM_CPU_MODE;
2482 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2483 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2484 cpu_reg.state = BNX2_COM_CPU_STATE;
2485 cpu_reg.state_value_clear = 0xffffff;
2486 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2487 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2488 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2489 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2490 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2491 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2492 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002493
Michael Chand43584c2006-11-19 14:14:35 -08002494 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2495 fw = &bnx2_com_fw_09;
2496 else
2497 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002498
Michael Chanaf3ee512006-11-19 14:09:25 -08002499 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002500 if (rc)
2501 goto init_cpu_err;
2502
Michael Chand43584c2006-11-19 14:14:35 -08002503 /* Initialize the Command Processor. */
2504 cpu_reg.mode = BNX2_CP_CPU_MODE;
2505 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2506 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2507 cpu_reg.state = BNX2_CP_CPU_STATE;
2508 cpu_reg.state_value_clear = 0xffffff;
2509 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2510 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2511 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2512 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2513 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2514 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2515 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002516
Michael Chand43584c2006-11-19 14:14:35 -08002517 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2518 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002519
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002520 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002521 if (rc)
2522 goto init_cpu_err;
2523 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002524init_cpu_err:
2525 bnx2_gunzip_end(bp);
2526 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002527}
2528
2529static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002530bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002531{
2532 u16 pmcsr;
2533
2534 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2535
2536 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002537 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002538 u32 val;
2539
2540 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2541 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2542 PCI_PM_CTRL_PME_STATUS);
2543
2544 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2545 /* delay required during transition out of D3hot */
2546 msleep(20);
2547
2548 val = REG_RD(bp, BNX2_EMAC_MODE);
2549 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2550 val &= ~BNX2_EMAC_MODE_MPKT;
2551 REG_WR(bp, BNX2_EMAC_MODE, val);
2552
2553 val = REG_RD(bp, BNX2_RPM_CONFIG);
2554 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2555 REG_WR(bp, BNX2_RPM_CONFIG, val);
2556 break;
2557 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002558 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002559 int i;
2560 u32 val, wol_msg;
2561
2562 if (bp->wol) {
2563 u32 advertising;
2564 u8 autoneg;
2565
2566 autoneg = bp->autoneg;
2567 advertising = bp->advertising;
2568
2569 bp->autoneg = AUTONEG_SPEED;
2570 bp->advertising = ADVERTISED_10baseT_Half |
2571 ADVERTISED_10baseT_Full |
2572 ADVERTISED_100baseT_Half |
2573 ADVERTISED_100baseT_Full |
2574 ADVERTISED_Autoneg;
2575
2576 bnx2_setup_copper_phy(bp);
2577
2578 bp->autoneg = autoneg;
2579 bp->advertising = advertising;
2580
2581 bnx2_set_mac_addr(bp);
2582
2583 val = REG_RD(bp, BNX2_EMAC_MODE);
2584
2585 /* Enable port mode. */
2586 val &= ~BNX2_EMAC_MODE_PORT;
2587 val |= BNX2_EMAC_MODE_PORT_MII |
2588 BNX2_EMAC_MODE_MPKT_RCVD |
2589 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002590 BNX2_EMAC_MODE_MPKT;
2591
2592 REG_WR(bp, BNX2_EMAC_MODE, val);
2593
2594 /* receive all multicast */
2595 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2596 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2597 0xffffffff);
2598 }
2599 REG_WR(bp, BNX2_EMAC_RX_MODE,
2600 BNX2_EMAC_RX_MODE_SORT_MODE);
2601
2602 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2603 BNX2_RPM_SORT_USER0_MC_EN;
2604 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2605 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2606 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2607 BNX2_RPM_SORT_USER0_ENA);
2608
2609 /* Need to enable EMAC and RPM for WOL. */
2610 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2611 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2612 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2613 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2614
2615 val = REG_RD(bp, BNX2_RPM_CONFIG);
2616 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2617 REG_WR(bp, BNX2_RPM_CONFIG, val);
2618
2619 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2620 }
2621 else {
2622 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2623 }
2624
Michael Chandda1e392006-01-23 16:08:14 -08002625 if (!(bp->flags & NO_WOL_FLAG))
2626 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002627
2628 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2629 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2630 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2631
2632 if (bp->wol)
2633 pmcsr |= 3;
2634 }
2635 else {
2636 pmcsr |= 3;
2637 }
2638 if (bp->wol) {
2639 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2640 }
2641 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2642 pmcsr);
2643
2644 /* No more memory access after this point until
2645 * device is brought back to D0.
2646 */
2647 udelay(50);
2648 break;
2649 }
2650 default:
2651 return -EINVAL;
2652 }
2653 return 0;
2654}
2655
2656static int
2657bnx2_acquire_nvram_lock(struct bnx2 *bp)
2658{
2659 u32 val;
2660 int j;
2661
2662 /* Request access to the flash interface. */
2663 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2664 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2665 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2666 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2667 break;
2668
2669 udelay(5);
2670 }
2671
2672 if (j >= NVRAM_TIMEOUT_COUNT)
2673 return -EBUSY;
2674
2675 return 0;
2676}
2677
2678static int
2679bnx2_release_nvram_lock(struct bnx2 *bp)
2680{
2681 int j;
2682 u32 val;
2683
2684 /* Relinquish nvram interface. */
2685 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2686
2687 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2688 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2689 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2690 break;
2691
2692 udelay(5);
2693 }
2694
2695 if (j >= NVRAM_TIMEOUT_COUNT)
2696 return -EBUSY;
2697
2698 return 0;
2699}
2700
2701
2702static int
2703bnx2_enable_nvram_write(struct bnx2 *bp)
2704{
2705 u32 val;
2706
2707 val = REG_RD(bp, BNX2_MISC_CFG);
2708 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2709
2710 if (!bp->flash_info->buffered) {
2711 int j;
2712
2713 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2714 REG_WR(bp, BNX2_NVM_COMMAND,
2715 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2716
2717 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2718 udelay(5);
2719
2720 val = REG_RD(bp, BNX2_NVM_COMMAND);
2721 if (val & BNX2_NVM_COMMAND_DONE)
2722 break;
2723 }
2724
2725 if (j >= NVRAM_TIMEOUT_COUNT)
2726 return -EBUSY;
2727 }
2728 return 0;
2729}
2730
2731static void
2732bnx2_disable_nvram_write(struct bnx2 *bp)
2733{
2734 u32 val;
2735
2736 val = REG_RD(bp, BNX2_MISC_CFG);
2737 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2738}
2739
2740
2741static void
2742bnx2_enable_nvram_access(struct bnx2 *bp)
2743{
2744 u32 val;
2745
2746 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2747 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002748 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002749 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2750}
2751
2752static void
2753bnx2_disable_nvram_access(struct bnx2 *bp)
2754{
2755 u32 val;
2756
2757 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2758 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002759 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002760 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2761 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2762}
2763
2764static int
2765bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2766{
2767 u32 cmd;
2768 int j;
2769
2770 if (bp->flash_info->buffered)
2771 /* Buffered flash, no erase needed */
2772 return 0;
2773
2774 /* Build an erase command */
2775 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2776 BNX2_NVM_COMMAND_DOIT;
2777
2778 /* Need to clear DONE bit separately. */
2779 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2780
2781 /* Address of the NVRAM to read from. */
2782 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2783
2784 /* Issue an erase command. */
2785 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2786
2787 /* Wait for completion. */
2788 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2789 u32 val;
2790
2791 udelay(5);
2792
2793 val = REG_RD(bp, BNX2_NVM_COMMAND);
2794 if (val & BNX2_NVM_COMMAND_DONE)
2795 break;
2796 }
2797
2798 if (j >= NVRAM_TIMEOUT_COUNT)
2799 return -EBUSY;
2800
2801 return 0;
2802}
2803
2804static int
2805bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2806{
2807 u32 cmd;
2808 int j;
2809
2810 /* Build the command word. */
2811 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2812
2813 /* Calculate an offset of a buffered flash. */
2814 if (bp->flash_info->buffered) {
2815 offset = ((offset / bp->flash_info->page_size) <<
2816 bp->flash_info->page_bits) +
2817 (offset % bp->flash_info->page_size);
2818 }
2819
2820 /* Need to clear DONE bit separately. */
2821 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2822
2823 /* Address of the NVRAM to read from. */
2824 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2825
2826 /* Issue a read command. */
2827 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2828
2829 /* Wait for completion. */
2830 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2831 u32 val;
2832
2833 udelay(5);
2834
2835 val = REG_RD(bp, BNX2_NVM_COMMAND);
2836 if (val & BNX2_NVM_COMMAND_DONE) {
2837 val = REG_RD(bp, BNX2_NVM_READ);
2838
2839 val = be32_to_cpu(val);
2840 memcpy(ret_val, &val, 4);
2841 break;
2842 }
2843 }
2844 if (j >= NVRAM_TIMEOUT_COUNT)
2845 return -EBUSY;
2846
2847 return 0;
2848}
2849
2850
2851static int
2852bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2853{
2854 u32 cmd, val32;
2855 int j;
2856
2857 /* Build the command word. */
2858 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2859
2860 /* Calculate an offset of a buffered flash. */
2861 if (bp->flash_info->buffered) {
2862 offset = ((offset / bp->flash_info->page_size) <<
2863 bp->flash_info->page_bits) +
2864 (offset % bp->flash_info->page_size);
2865 }
2866
2867 /* Need to clear DONE bit separately. */
2868 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2869
2870 memcpy(&val32, val, 4);
2871 val32 = cpu_to_be32(val32);
2872
2873 /* Write the data. */
2874 REG_WR(bp, BNX2_NVM_WRITE, val32);
2875
2876 /* Address of the NVRAM to write to. */
2877 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2878
2879 /* Issue the write command. */
2880 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2881
2882 /* Wait for completion. */
2883 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2884 udelay(5);
2885
2886 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2887 break;
2888 }
2889 if (j >= NVRAM_TIMEOUT_COUNT)
2890 return -EBUSY;
2891
2892 return 0;
2893}
2894
2895static int
2896bnx2_init_nvram(struct bnx2 *bp)
2897{
2898 u32 val;
2899 int j, entry_count, rc;
2900 struct flash_spec *flash;
2901
2902 /* Determine the selected interface. */
2903 val = REG_RD(bp, BNX2_NVM_CFG1);
2904
2905 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2906
2907 rc = 0;
2908 if (val & 0x40000000) {
2909
2910 /* Flash interface has been reconfigured */
2911 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002912 j++, flash++) {
2913 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2914 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002915 bp->flash_info = flash;
2916 break;
2917 }
2918 }
2919 }
2920 else {
Michael Chan37137702005-11-04 08:49:17 -08002921 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002922 /* Not yet been reconfigured */
2923
Michael Chan37137702005-11-04 08:49:17 -08002924 if (val & (1 << 23))
2925 mask = FLASH_BACKUP_STRAP_MASK;
2926 else
2927 mask = FLASH_STRAP_MASK;
2928
Michael Chanb6016b72005-05-26 13:03:09 -07002929 for (j = 0, flash = &flash_table[0]; j < entry_count;
2930 j++, flash++) {
2931
Michael Chan37137702005-11-04 08:49:17 -08002932 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002933 bp->flash_info = flash;
2934
2935 /* Request access to the flash interface. */
2936 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2937 return rc;
2938
2939 /* Enable access to flash interface */
2940 bnx2_enable_nvram_access(bp);
2941
2942 /* Reconfigure the flash interface */
2943 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2944 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2945 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2946 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2947
2948 /* Disable access to flash interface */
2949 bnx2_disable_nvram_access(bp);
2950 bnx2_release_nvram_lock(bp);
2951
2952 break;
2953 }
2954 }
2955 } /* if (val & 0x40000000) */
2956
2957 if (j == entry_count) {
2958 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002959 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002960 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002961 }
2962
Michael Chan1122db72006-01-23 16:11:42 -08002963 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2964 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2965 if (val)
2966 bp->flash_size = val;
2967 else
2968 bp->flash_size = bp->flash_info->total_size;
2969
Michael Chanb6016b72005-05-26 13:03:09 -07002970 return rc;
2971}
2972
2973static int
2974bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2975 int buf_size)
2976{
2977 int rc = 0;
2978 u32 cmd_flags, offset32, len32, extra;
2979
2980 if (buf_size == 0)
2981 return 0;
2982
2983 /* Request access to the flash interface. */
2984 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2985 return rc;
2986
2987 /* Enable access to flash interface */
2988 bnx2_enable_nvram_access(bp);
2989
2990 len32 = buf_size;
2991 offset32 = offset;
2992 extra = 0;
2993
2994 cmd_flags = 0;
2995
2996 if (offset32 & 3) {
2997 u8 buf[4];
2998 u32 pre_len;
2999
3000 offset32 &= ~3;
3001 pre_len = 4 - (offset & 3);
3002
3003 if (pre_len >= len32) {
3004 pre_len = len32;
3005 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3006 BNX2_NVM_COMMAND_LAST;
3007 }
3008 else {
3009 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3010 }
3011
3012 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3013
3014 if (rc)
3015 return rc;
3016
3017 memcpy(ret_buf, buf + (offset & 3), pre_len);
3018
3019 offset32 += 4;
3020 ret_buf += pre_len;
3021 len32 -= pre_len;
3022 }
3023 if (len32 & 3) {
3024 extra = 4 - (len32 & 3);
3025 len32 = (len32 + 4) & ~3;
3026 }
3027
3028 if (len32 == 4) {
3029 u8 buf[4];
3030
3031 if (cmd_flags)
3032 cmd_flags = BNX2_NVM_COMMAND_LAST;
3033 else
3034 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3035 BNX2_NVM_COMMAND_LAST;
3036
3037 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3038
3039 memcpy(ret_buf, buf, 4 - extra);
3040 }
3041 else if (len32 > 0) {
3042 u8 buf[4];
3043
3044 /* Read the first word. */
3045 if (cmd_flags)
3046 cmd_flags = 0;
3047 else
3048 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3049
3050 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3051
3052 /* Advance to the next dword. */
3053 offset32 += 4;
3054 ret_buf += 4;
3055 len32 -= 4;
3056
3057 while (len32 > 4 && rc == 0) {
3058 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3059
3060 /* Advance to the next dword. */
3061 offset32 += 4;
3062 ret_buf += 4;
3063 len32 -= 4;
3064 }
3065
3066 if (rc)
3067 return rc;
3068
3069 cmd_flags = BNX2_NVM_COMMAND_LAST;
3070 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3071
3072 memcpy(ret_buf, buf, 4 - extra);
3073 }
3074
3075 /* Disable access to flash interface */
3076 bnx2_disable_nvram_access(bp);
3077
3078 bnx2_release_nvram_lock(bp);
3079
3080 return rc;
3081}
3082
3083static int
3084bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3085 int buf_size)
3086{
3087 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07003088 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003089 int rc = 0;
3090 int align_start, align_end;
3091
3092 buf = data_buf;
3093 offset32 = offset;
3094 len32 = buf_size;
3095 align_start = align_end = 0;
3096
3097 if ((align_start = (offset32 & 3))) {
3098 offset32 &= ~3;
3099 len32 += align_start;
3100 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3101 return rc;
3102 }
3103
3104 if (len32 & 3) {
3105 if ((len32 > 4) || !align_start) {
3106 align_end = 4 - (len32 & 3);
3107 len32 += align_end;
3108 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3109 end, 4))) {
3110 return rc;
3111 }
3112 }
3113 }
3114
3115 if (align_start || align_end) {
3116 buf = kmalloc(len32, GFP_KERNEL);
3117 if (buf == 0)
3118 return -ENOMEM;
3119 if (align_start) {
3120 memcpy(buf, start, 4);
3121 }
3122 if (align_end) {
3123 memcpy(buf + len32 - 4, end, 4);
3124 }
3125 memcpy(buf + align_start, data_buf, buf_size);
3126 }
3127
Michael Chanae181bc2006-05-22 16:39:20 -07003128 if (bp->flash_info->buffered == 0) {
3129 flash_buffer = kmalloc(264, GFP_KERNEL);
3130 if (flash_buffer == NULL) {
3131 rc = -ENOMEM;
3132 goto nvram_write_end;
3133 }
3134 }
3135
Michael Chanb6016b72005-05-26 13:03:09 -07003136 written = 0;
3137 while ((written < len32) && (rc == 0)) {
3138 u32 page_start, page_end, data_start, data_end;
3139 u32 addr, cmd_flags;
3140 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003141
3142 /* Find the page_start addr */
3143 page_start = offset32 + written;
3144 page_start -= (page_start % bp->flash_info->page_size);
3145 /* Find the page_end addr */
3146 page_end = page_start + bp->flash_info->page_size;
3147 /* Find the data_start addr */
3148 data_start = (written == 0) ? offset32 : page_start;
3149 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003150 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003151 (offset32 + len32) : page_end;
3152
3153 /* Request access to the flash interface. */
3154 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3155 goto nvram_write_end;
3156
3157 /* Enable access to flash interface */
3158 bnx2_enable_nvram_access(bp);
3159
3160 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3161 if (bp->flash_info->buffered == 0) {
3162 int j;
3163
3164 /* Read the whole page into the buffer
3165 * (non-buffer flash only) */
3166 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3167 if (j == (bp->flash_info->page_size - 4)) {
3168 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3169 }
3170 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003171 page_start + j,
3172 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003173 cmd_flags);
3174
3175 if (rc)
3176 goto nvram_write_end;
3177
3178 cmd_flags = 0;
3179 }
3180 }
3181
3182 /* Enable writes to flash interface (unlock write-protect) */
3183 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3184 goto nvram_write_end;
3185
3186 /* Erase the page */
3187 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3188 goto nvram_write_end;
3189
3190 /* Re-enable the write again for the actual write */
3191 bnx2_enable_nvram_write(bp);
3192
3193 /* Loop to write back the buffer data from page_start to
3194 * data_start */
3195 i = 0;
3196 if (bp->flash_info->buffered == 0) {
3197 for (addr = page_start; addr < data_start;
3198 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003199
Michael Chanb6016b72005-05-26 13:03:09 -07003200 rc = bnx2_nvram_write_dword(bp, addr,
3201 &flash_buffer[i], cmd_flags);
3202
3203 if (rc != 0)
3204 goto nvram_write_end;
3205
3206 cmd_flags = 0;
3207 }
3208 }
3209
3210 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003211 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003212 if ((addr == page_end - 4) ||
3213 ((bp->flash_info->buffered) &&
3214 (addr == data_end - 4))) {
3215
3216 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3217 }
3218 rc = bnx2_nvram_write_dword(bp, addr, buf,
3219 cmd_flags);
3220
3221 if (rc != 0)
3222 goto nvram_write_end;
3223
3224 cmd_flags = 0;
3225 buf += 4;
3226 }
3227
3228 /* Loop to write back the buffer data from data_end
3229 * to page_end */
3230 if (bp->flash_info->buffered == 0) {
3231 for (addr = data_end; addr < page_end;
3232 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003233
Michael Chanb6016b72005-05-26 13:03:09 -07003234 if (addr == page_end-4) {
3235 cmd_flags = BNX2_NVM_COMMAND_LAST;
3236 }
3237 rc = bnx2_nvram_write_dword(bp, addr,
3238 &flash_buffer[i], cmd_flags);
3239
3240 if (rc != 0)
3241 goto nvram_write_end;
3242
3243 cmd_flags = 0;
3244 }
3245 }
3246
3247 /* Disable writes to flash interface (lock write-protect) */
3248 bnx2_disable_nvram_write(bp);
3249
3250 /* Disable access to flash interface */
3251 bnx2_disable_nvram_access(bp);
3252 bnx2_release_nvram_lock(bp);
3253
3254 /* Increment written */
3255 written += data_end - data_start;
3256 }
3257
3258nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003259 if (bp->flash_info->buffered == 0)
3260 kfree(flash_buffer);
3261
Michael Chanb6016b72005-05-26 13:03:09 -07003262 if (align_start || align_end)
3263 kfree(buf);
3264 return rc;
3265}
3266
3267static int
3268bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3269{
3270 u32 val;
3271 int i, rc = 0;
3272
3273 /* Wait for the current PCI transaction to complete before
3274 * issuing a reset. */
3275 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3276 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3277 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3278 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3279 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3280 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3281 udelay(5);
3282
Michael Chanb090ae22006-01-23 16:07:10 -08003283 /* Wait for the firmware to tell us it is ok to issue a reset. */
3284 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3285
Michael Chanb6016b72005-05-26 13:03:09 -07003286 /* Deposit a driver reset signature so the firmware knows that
3287 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003288 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003289 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3290
Michael Chanb6016b72005-05-26 13:03:09 -07003291 /* Do a dummy read to force the chip to complete all current transaction
3292 * before we issue a reset. */
3293 val = REG_RD(bp, BNX2_MISC_ID);
3294
Michael Chan234754d2006-11-19 14:11:41 -08003295 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3296 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3297 REG_RD(bp, BNX2_MISC_COMMAND);
3298 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003299
Michael Chan234754d2006-11-19 14:11:41 -08003300 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3301 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003302
Michael Chan234754d2006-11-19 14:11:41 -08003303 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003304
Michael Chan234754d2006-11-19 14:11:41 -08003305 } else {
3306 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3307 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3308 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3309
3310 /* Chip reset. */
3311 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3312
3313 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3314 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3315 current->state = TASK_UNINTERRUPTIBLE;
3316 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003317 }
Michael Chanb6016b72005-05-26 13:03:09 -07003318
Michael Chan234754d2006-11-19 14:11:41 -08003319 /* Reset takes approximate 30 usec */
3320 for (i = 0; i < 10; i++) {
3321 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3322 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3323 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3324 break;
3325 udelay(10);
3326 }
3327
3328 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3329 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3330 printk(KERN_ERR PFX "Chip reset did not complete\n");
3331 return -EBUSY;
3332 }
Michael Chanb6016b72005-05-26 13:03:09 -07003333 }
3334
3335 /* Make sure byte swapping is properly configured. */
3336 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3337 if (val != 0x01020304) {
3338 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3339 return -ENODEV;
3340 }
3341
Michael Chanb6016b72005-05-26 13:03:09 -07003342 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003343 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3344 if (rc)
3345 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003346
3347 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3348 /* Adjust the voltage regular to two steps lower. The default
3349 * of this register is 0x0000000e. */
3350 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3351
3352 /* Remove bad rbuf memory from the free pool. */
3353 rc = bnx2_alloc_bad_rbuf(bp);
3354 }
3355
3356 return rc;
3357}
3358
3359static int
3360bnx2_init_chip(struct bnx2 *bp)
3361{
3362 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003363 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003364
3365 /* Make sure the interrupt is not active. */
3366 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3367
3368 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3369 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3370#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003371 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003372#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003373 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003374 DMA_READ_CHANS << 12 |
3375 DMA_WRITE_CHANS << 16;
3376
3377 val |= (0x2 << 20) | (1 << 11);
3378
Michael Chandda1e392006-01-23 16:08:14 -08003379 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003380 val |= (1 << 23);
3381
3382 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3383 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3384 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3385
3386 REG_WR(bp, BNX2_DMA_CONFIG, val);
3387
3388 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3389 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3390 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3391 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3392 }
3393
3394 if (bp->flags & PCIX_FLAG) {
3395 u16 val16;
3396
3397 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3398 &val16);
3399 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3400 val16 & ~PCI_X_CMD_ERO);
3401 }
3402
3403 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3404 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3405 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3406 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3407
3408 /* Initialize context mapping and zero out the quick contexts. The
3409 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003410 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3411 bnx2_init_5709_context(bp);
3412 else
3413 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003414
Michael Chanfba9fe92006-06-12 22:21:25 -07003415 if ((rc = bnx2_init_cpus(bp)) != 0)
3416 return rc;
3417
Michael Chanb6016b72005-05-26 13:03:09 -07003418 bnx2_init_nvram(bp);
3419
3420 bnx2_set_mac_addr(bp);
3421
3422 val = REG_RD(bp, BNX2_MQ_CONFIG);
3423 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3424 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3425 REG_WR(bp, BNX2_MQ_CONFIG, val);
3426
3427 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3428 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3429 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3430
3431 val = (BCM_PAGE_BITS - 8) << 24;
3432 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3433
3434 /* Configure page size. */
3435 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3436 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3437 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3438 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3439
3440 val = bp->mac_addr[0] +
3441 (bp->mac_addr[1] << 8) +
3442 (bp->mac_addr[2] << 16) +
3443 bp->mac_addr[3] +
3444 (bp->mac_addr[4] << 8) +
3445 (bp->mac_addr[5] << 16);
3446 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3447
3448 /* Program the MTU. Also include 4 bytes for CRC32. */
3449 val = bp->dev->mtu + ETH_HLEN + 4;
3450 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3451 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3452 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3453
3454 bp->last_status_idx = 0;
3455 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3456
3457 /* Set up how to generate a link change interrupt. */
3458 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3459
3460 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3461 (u64) bp->status_blk_mapping & 0xffffffff);
3462 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3463
3464 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3465 (u64) bp->stats_blk_mapping & 0xffffffff);
3466 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3467 (u64) bp->stats_blk_mapping >> 32);
3468
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003469 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003470 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3471
3472 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3473 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3474
3475 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3476 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3477
3478 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3479
3480 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3481
3482 REG_WR(bp, BNX2_HC_COM_TICKS,
3483 (bp->com_ticks_int << 16) | bp->com_ticks);
3484
3485 REG_WR(bp, BNX2_HC_CMD_TICKS,
3486 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3487
3488 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3489 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3490
3491 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3492 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3493 else {
3494 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3495 BNX2_HC_CONFIG_TX_TMR_MODE |
3496 BNX2_HC_CONFIG_COLLECT_STATS);
3497 }
3498
3499 /* Clear internal stats counters. */
3500 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3501
3502 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3503
Michael Chane29054f2006-01-23 16:06:06 -08003504 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3505 BNX2_PORT_FEATURE_ASF_ENABLED)
3506 bp->flags |= ASF_ENABLE_FLAG;
3507
Michael Chanb6016b72005-05-26 13:03:09 -07003508 /* Initialize the receive filter. */
3509 bnx2_set_rx_mode(bp->dev);
3510
Michael Chanb090ae22006-01-23 16:07:10 -08003511 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3512 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003513
3514 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3515 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3516
3517 udelay(20);
3518
Michael Chanbf5295b2006-03-23 01:11:56 -08003519 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3520
Michael Chanb090ae22006-01-23 16:07:10 -08003521 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003522}
3523
Michael Chan59b47d82006-11-19 14:10:45 -08003524static void
3525bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3526{
3527 u32 val, offset0, offset1, offset2, offset3;
3528
3529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530 offset0 = BNX2_L2CTX_TYPE_XI;
3531 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3532 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3533 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3534 } else {
3535 offset0 = BNX2_L2CTX_TYPE;
3536 offset1 = BNX2_L2CTX_CMD_TYPE;
3537 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3538 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3539 }
3540 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3541 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3542
3543 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3544 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3545
3546 val = (u64) bp->tx_desc_mapping >> 32;
3547 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3548
3549 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3550 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3551}
Michael Chanb6016b72005-05-26 13:03:09 -07003552
3553static void
3554bnx2_init_tx_ring(struct bnx2 *bp)
3555{
3556 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003557 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003558
Michael Chan2f8af122006-08-15 01:39:10 -07003559 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3560
Michael Chanb6016b72005-05-26 13:03:09 -07003561 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003562
Michael Chanb6016b72005-05-26 13:03:09 -07003563 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3564 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3565
3566 bp->tx_prod = 0;
3567 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003568 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003569 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003570
Michael Chan59b47d82006-11-19 14:10:45 -08003571 cid = TX_CID;
3572 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3573 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003574
Michael Chan59b47d82006-11-19 14:10:45 -08003575 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003576}
3577
3578static void
3579bnx2_init_rx_ring(struct bnx2 *bp)
3580{
3581 struct rx_bd *rxbd;
3582 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003583 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003584 u32 val;
3585
3586 /* 8 for CRC and VLAN */
3587 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003588 /* hw alignment */
3589 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003590
3591 ring_prod = prod = bp->rx_prod = 0;
3592 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003593 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003594 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003595
Michael Chan13daffa2006-03-20 17:49:20 -08003596 for (i = 0; i < bp->rx_max_ring; i++) {
3597 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003598
Michael Chan13daffa2006-03-20 17:49:20 -08003599 rxbd = &bp->rx_desc_ring[i][0];
3600 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3601 rxbd->rx_bd_len = bp->rx_buf_use_size;
3602 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3603 }
3604 if (i == (bp->rx_max_ring - 1))
3605 j = 0;
3606 else
3607 j = i + 1;
3608 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3609 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3610 0xffffffff;
3611 }
Michael Chanb6016b72005-05-26 13:03:09 -07003612
3613 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3614 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3615 val |= 0x02 << 8;
3616 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3617
Michael Chan13daffa2006-03-20 17:49:20 -08003618 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003619 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3620
Michael Chan13daffa2006-03-20 17:49:20 -08003621 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003622 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3623
Michael Chan236b6392006-03-20 17:49:02 -08003624 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003625 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3626 break;
3627 }
3628 prod = NEXT_RX_BD(prod);
3629 ring_prod = RX_RING_IDX(prod);
3630 }
3631 bp->rx_prod = prod;
3632
3633 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3634
3635 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3636}
3637
3638static void
Michael Chan13daffa2006-03-20 17:49:20 -08003639bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3640{
3641 u32 num_rings, max;
3642
3643 bp->rx_ring_size = size;
3644 num_rings = 1;
3645 while (size > MAX_RX_DESC_CNT) {
3646 size -= MAX_RX_DESC_CNT;
3647 num_rings++;
3648 }
3649 /* round to next power of 2 */
3650 max = MAX_RX_RINGS;
3651 while ((max & num_rings) == 0)
3652 max >>= 1;
3653
3654 if (num_rings != max)
3655 max <<= 1;
3656
3657 bp->rx_max_ring = max;
3658 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3659}
3660
3661static void
Michael Chanb6016b72005-05-26 13:03:09 -07003662bnx2_free_tx_skbs(struct bnx2 *bp)
3663{
3664 int i;
3665
3666 if (bp->tx_buf_ring == NULL)
3667 return;
3668
3669 for (i = 0; i < TX_DESC_CNT; ) {
3670 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3671 struct sk_buff *skb = tx_buf->skb;
3672 int j, last;
3673
3674 if (skb == NULL) {
3675 i++;
3676 continue;
3677 }
3678
3679 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3680 skb_headlen(skb), PCI_DMA_TODEVICE);
3681
3682 tx_buf->skb = NULL;
3683
3684 last = skb_shinfo(skb)->nr_frags;
3685 for (j = 0; j < last; j++) {
3686 tx_buf = &bp->tx_buf_ring[i + j + 1];
3687 pci_unmap_page(bp->pdev,
3688 pci_unmap_addr(tx_buf, mapping),
3689 skb_shinfo(skb)->frags[j].size,
3690 PCI_DMA_TODEVICE);
3691 }
Michael Chan745720e2006-06-29 12:37:41 -07003692 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003693 i += j + 1;
3694 }
3695
3696}
3697
3698static void
3699bnx2_free_rx_skbs(struct bnx2 *bp)
3700{
3701 int i;
3702
3703 if (bp->rx_buf_ring == NULL)
3704 return;
3705
Michael Chan13daffa2006-03-20 17:49:20 -08003706 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003707 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3708 struct sk_buff *skb = rx_buf->skb;
3709
Michael Chan05d0f1c2005-11-04 08:53:48 -08003710 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003711 continue;
3712
3713 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3714 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3715
3716 rx_buf->skb = NULL;
3717
Michael Chan745720e2006-06-29 12:37:41 -07003718 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003719 }
3720}
3721
3722static void
3723bnx2_free_skbs(struct bnx2 *bp)
3724{
3725 bnx2_free_tx_skbs(bp);
3726 bnx2_free_rx_skbs(bp);
3727}
3728
3729static int
3730bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3731{
3732 int rc;
3733
3734 rc = bnx2_reset_chip(bp, reset_code);
3735 bnx2_free_skbs(bp);
3736 if (rc)
3737 return rc;
3738
Michael Chanfba9fe92006-06-12 22:21:25 -07003739 if ((rc = bnx2_init_chip(bp)) != 0)
3740 return rc;
3741
Michael Chanb6016b72005-05-26 13:03:09 -07003742 bnx2_init_tx_ring(bp);
3743 bnx2_init_rx_ring(bp);
3744 return 0;
3745}
3746
3747static int
3748bnx2_init_nic(struct bnx2 *bp)
3749{
3750 int rc;
3751
3752 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3753 return rc;
3754
Michael Chan80be4432006-11-19 14:07:28 -08003755 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003756 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003757 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003758 bnx2_set_link(bp);
3759 return 0;
3760}
3761
3762static int
3763bnx2_test_registers(struct bnx2 *bp)
3764{
3765 int ret;
3766 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003767 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003768 u16 offset;
3769 u16 flags;
3770 u32 rw_mask;
3771 u32 ro_mask;
3772 } reg_tbl[] = {
3773 { 0x006c, 0, 0x00000000, 0x0000003f },
3774 { 0x0090, 0, 0xffffffff, 0x00000000 },
3775 { 0x0094, 0, 0x00000000, 0x00000000 },
3776
3777 { 0x0404, 0, 0x00003f00, 0x00000000 },
3778 { 0x0418, 0, 0x00000000, 0xffffffff },
3779 { 0x041c, 0, 0x00000000, 0xffffffff },
3780 { 0x0420, 0, 0x00000000, 0x80ffffff },
3781 { 0x0424, 0, 0x00000000, 0x00000000 },
3782 { 0x0428, 0, 0x00000000, 0x00000001 },
3783 { 0x0450, 0, 0x00000000, 0x0000ffff },
3784 { 0x0454, 0, 0x00000000, 0xffffffff },
3785 { 0x0458, 0, 0x00000000, 0xffffffff },
3786
3787 { 0x0808, 0, 0x00000000, 0xffffffff },
3788 { 0x0854, 0, 0x00000000, 0xffffffff },
3789 { 0x0868, 0, 0x00000000, 0x77777777 },
3790 { 0x086c, 0, 0x00000000, 0x77777777 },
3791 { 0x0870, 0, 0x00000000, 0x77777777 },
3792 { 0x0874, 0, 0x00000000, 0x77777777 },
3793
3794 { 0x0c00, 0, 0x00000000, 0x00000001 },
3795 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3796 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003797
3798 { 0x1000, 0, 0x00000000, 0x00000001 },
3799 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003800
3801 { 0x1408, 0, 0x01c00800, 0x00000000 },
3802 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3803 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003804 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003805 { 0x14b0, 0, 0x00000002, 0x00000001 },
3806 { 0x14b8, 0, 0x00000000, 0x00000000 },
3807 { 0x14c0, 0, 0x00000000, 0x00000009 },
3808 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3809 { 0x14cc, 0, 0x00000000, 0x00000001 },
3810 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003811
3812 { 0x1800, 0, 0x00000000, 0x00000001 },
3813 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003814
3815 { 0x2800, 0, 0x00000000, 0x00000001 },
3816 { 0x2804, 0, 0x00000000, 0x00003f01 },
3817 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3818 { 0x2810, 0, 0xffff0000, 0x00000000 },
3819 { 0x2814, 0, 0xffff0000, 0x00000000 },
3820 { 0x2818, 0, 0xffff0000, 0x00000000 },
3821 { 0x281c, 0, 0xffff0000, 0x00000000 },
3822 { 0x2834, 0, 0xffffffff, 0x00000000 },
3823 { 0x2840, 0, 0x00000000, 0xffffffff },
3824 { 0x2844, 0, 0x00000000, 0xffffffff },
3825 { 0x2848, 0, 0xffffffff, 0x00000000 },
3826 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3827
3828 { 0x2c00, 0, 0x00000000, 0x00000011 },
3829 { 0x2c04, 0, 0x00000000, 0x00030007 },
3830
Michael Chanb6016b72005-05-26 13:03:09 -07003831 { 0x3c00, 0, 0x00000000, 0x00000001 },
3832 { 0x3c04, 0, 0x00000000, 0x00070000 },
3833 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3834 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3835 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3836 { 0x3c14, 0, 0x00000000, 0xffffffff },
3837 { 0x3c18, 0, 0x00000000, 0xffffffff },
3838 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3839 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003840
3841 { 0x5004, 0, 0x00000000, 0x0000007f },
3842 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3843 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3844
Michael Chanb6016b72005-05-26 13:03:09 -07003845 { 0x5c00, 0, 0x00000000, 0x00000001 },
3846 { 0x5c04, 0, 0x00000000, 0x0003000f },
3847 { 0x5c08, 0, 0x00000003, 0x00000000 },
3848 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3849 { 0x5c10, 0, 0x00000000, 0xffffffff },
3850 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3851 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3852 { 0x5c88, 0, 0x00000000, 0x00077373 },
3853 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3854
3855 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3856 { 0x680c, 0, 0xffffffff, 0x00000000 },
3857 { 0x6810, 0, 0xffffffff, 0x00000000 },
3858 { 0x6814, 0, 0xffffffff, 0x00000000 },
3859 { 0x6818, 0, 0xffffffff, 0x00000000 },
3860 { 0x681c, 0, 0xffffffff, 0x00000000 },
3861 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3862 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3863 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3865 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3869 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3870 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3871 { 0x684c, 0, 0xffffffff, 0x00000000 },
3872 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3873 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3877 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3878
3879 { 0xffff, 0, 0x00000000, 0x00000000 },
3880 };
3881
3882 ret = 0;
3883 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3884 u32 offset, rw_mask, ro_mask, save_val, val;
3885
3886 offset = (u32) reg_tbl[i].offset;
3887 rw_mask = reg_tbl[i].rw_mask;
3888 ro_mask = reg_tbl[i].ro_mask;
3889
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003890 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003891
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003892 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003893
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003894 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003895 if ((val & rw_mask) != 0) {
3896 goto reg_test_err;
3897 }
3898
3899 if ((val & ro_mask) != (save_val & ro_mask)) {
3900 goto reg_test_err;
3901 }
3902
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003903 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003904
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003905 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003906 if ((val & rw_mask) != rw_mask) {
3907 goto reg_test_err;
3908 }
3909
3910 if ((val & ro_mask) != (save_val & ro_mask)) {
3911 goto reg_test_err;
3912 }
3913
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003914 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003915 continue;
3916
3917reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003918 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003919 ret = -ENODEV;
3920 break;
3921 }
3922 return ret;
3923}
3924
3925static int
3926bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3927{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003928 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003929 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3930 int i;
3931
3932 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3933 u32 offset;
3934
3935 for (offset = 0; offset < size; offset += 4) {
3936
3937 REG_WR_IND(bp, start + offset, test_pattern[i]);
3938
3939 if (REG_RD_IND(bp, start + offset) !=
3940 test_pattern[i]) {
3941 return -ENODEV;
3942 }
3943 }
3944 }
3945 return 0;
3946}
3947
3948static int
3949bnx2_test_memory(struct bnx2 *bp)
3950{
3951 int ret = 0;
3952 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003953 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003954 u32 offset;
3955 u32 len;
3956 } mem_tbl[] = {
3957 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003958 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003959 { 0xe0000, 0x4000 },
3960 { 0x120000, 0x4000 },
3961 { 0x1a0000, 0x4000 },
3962 { 0x160000, 0x4000 },
3963 { 0xffffffff, 0 },
3964 };
3965
3966 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3967 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3968 mem_tbl[i].len)) != 0) {
3969 return ret;
3970 }
3971 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003972
Michael Chanb6016b72005-05-26 13:03:09 -07003973 return ret;
3974}
3975
Michael Chanbc5a0692006-01-23 16:13:22 -08003976#define BNX2_MAC_LOOPBACK 0
3977#define BNX2_PHY_LOOPBACK 1
3978
Michael Chanb6016b72005-05-26 13:03:09 -07003979static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003980bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003981{
3982 unsigned int pkt_size, num_pkts, i;
3983 struct sk_buff *skb, *rx_skb;
3984 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003985 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003986 dma_addr_t map;
3987 struct tx_bd *txbd;
3988 struct sw_bd *rx_buf;
3989 struct l2_fhdr *rx_hdr;
3990 int ret = -ENODEV;
3991
Michael Chanbc5a0692006-01-23 16:13:22 -08003992 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3993 bp->loopback = MAC_LOOPBACK;
3994 bnx2_set_mac_loopback(bp);
3995 }
3996 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003997 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003998 bnx2_set_phy_loopback(bp);
3999 }
4000 else
4001 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004002
4003 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004004 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004005 if (!skb)
4006 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004007 packet = skb_put(skb, pkt_size);
4008 memcpy(packet, bp->mac_addr, 6);
4009 memset(packet + 6, 0x0, 8);
4010 for (i = 14; i < pkt_size; i++)
4011 packet[i] = (unsigned char) (i & 0xff);
4012
4013 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4014 PCI_DMA_TODEVICE);
4015
Michael Chanbf5295b2006-03-23 01:11:56 -08004016 REG_WR(bp, BNX2_HC_COMMAND,
4017 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4018
Michael Chanb6016b72005-05-26 13:03:09 -07004019 REG_RD(bp, BNX2_HC_COMMAND);
4020
4021 udelay(5);
4022 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4023
Michael Chanb6016b72005-05-26 13:03:09 -07004024 num_pkts = 0;
4025
Michael Chanbc5a0692006-01-23 16:13:22 -08004026 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004027
4028 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4029 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4030 txbd->tx_bd_mss_nbytes = pkt_size;
4031 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4032
4033 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004034 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4035 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004036
Michael Chan234754d2006-11-19 14:11:41 -08004037 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4038 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004039
4040 udelay(100);
4041
Michael Chanbf5295b2006-03-23 01:11:56 -08004042 REG_WR(bp, BNX2_HC_COMMAND,
4043 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4044
Michael Chanb6016b72005-05-26 13:03:09 -07004045 REG_RD(bp, BNX2_HC_COMMAND);
4046
4047 udelay(5);
4048
4049 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004050 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004051
Michael Chanbc5a0692006-01-23 16:13:22 -08004052 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004053 goto loopback_test_done;
4054 }
4055
4056 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4057 if (rx_idx != rx_start_idx + num_pkts) {
4058 goto loopback_test_done;
4059 }
4060
4061 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4062 rx_skb = rx_buf->skb;
4063
4064 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4065 skb_reserve(rx_skb, bp->rx_offset);
4066
4067 pci_dma_sync_single_for_cpu(bp->pdev,
4068 pci_unmap_addr(rx_buf, mapping),
4069 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4070
Michael Chanade2bfe2006-01-23 16:09:51 -08004071 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004072 (L2_FHDR_ERRORS_BAD_CRC |
4073 L2_FHDR_ERRORS_PHY_DECODE |
4074 L2_FHDR_ERRORS_ALIGNMENT |
4075 L2_FHDR_ERRORS_TOO_SHORT |
4076 L2_FHDR_ERRORS_GIANT_FRAME)) {
4077
4078 goto loopback_test_done;
4079 }
4080
4081 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4082 goto loopback_test_done;
4083 }
4084
4085 for (i = 14; i < pkt_size; i++) {
4086 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4087 goto loopback_test_done;
4088 }
4089 }
4090
4091 ret = 0;
4092
4093loopback_test_done:
4094 bp->loopback = 0;
4095 return ret;
4096}
4097
Michael Chanbc5a0692006-01-23 16:13:22 -08004098#define BNX2_MAC_LOOPBACK_FAILED 1
4099#define BNX2_PHY_LOOPBACK_FAILED 2
4100#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4101 BNX2_PHY_LOOPBACK_FAILED)
4102
4103static int
4104bnx2_test_loopback(struct bnx2 *bp)
4105{
4106 int rc = 0;
4107
4108 if (!netif_running(bp->dev))
4109 return BNX2_LOOPBACK_FAILED;
4110
4111 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4112 spin_lock_bh(&bp->phy_lock);
4113 bnx2_init_phy(bp);
4114 spin_unlock_bh(&bp->phy_lock);
4115 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4116 rc |= BNX2_MAC_LOOPBACK_FAILED;
4117 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4118 rc |= BNX2_PHY_LOOPBACK_FAILED;
4119 return rc;
4120}
4121
Michael Chanb6016b72005-05-26 13:03:09 -07004122#define NVRAM_SIZE 0x200
4123#define CRC32_RESIDUAL 0xdebb20e3
4124
4125static int
4126bnx2_test_nvram(struct bnx2 *bp)
4127{
4128 u32 buf[NVRAM_SIZE / 4];
4129 u8 *data = (u8 *) buf;
4130 int rc = 0;
4131 u32 magic, csum;
4132
4133 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4134 goto test_nvram_done;
4135
4136 magic = be32_to_cpu(buf[0]);
4137 if (magic != 0x669955aa) {
4138 rc = -ENODEV;
4139 goto test_nvram_done;
4140 }
4141
4142 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4143 goto test_nvram_done;
4144
4145 csum = ether_crc_le(0x100, data);
4146 if (csum != CRC32_RESIDUAL) {
4147 rc = -ENODEV;
4148 goto test_nvram_done;
4149 }
4150
4151 csum = ether_crc_le(0x100, data + 0x100);
4152 if (csum != CRC32_RESIDUAL) {
4153 rc = -ENODEV;
4154 }
4155
4156test_nvram_done:
4157 return rc;
4158}
4159
4160static int
4161bnx2_test_link(struct bnx2 *bp)
4162{
4163 u32 bmsr;
4164
Michael Chanc770a652005-08-25 15:38:39 -07004165 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004168 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004169
Michael Chanb6016b72005-05-26 13:03:09 -07004170 if (bmsr & BMSR_LSTATUS) {
4171 return 0;
4172 }
4173 return -ENODEV;
4174}
4175
4176static int
4177bnx2_test_intr(struct bnx2 *bp)
4178{
4179 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004180 u16 status_idx;
4181
4182 if (!netif_running(bp->dev))
4183 return -ENODEV;
4184
4185 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4186
4187 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004188 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004189 REG_RD(bp, BNX2_HC_COMMAND);
4190
4191 for (i = 0; i < 10; i++) {
4192 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4193 status_idx) {
4194
4195 break;
4196 }
4197
4198 msleep_interruptible(10);
4199 }
4200 if (i < 10)
4201 return 0;
4202
4203 return -ENODEV;
4204}
4205
4206static void
Michael Chan48b01e22006-11-19 14:08:00 -08004207bnx2_5706_serdes_timer(struct bnx2 *bp)
4208{
4209 spin_lock(&bp->phy_lock);
4210 if (bp->serdes_an_pending)
4211 bp->serdes_an_pending--;
4212 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4213 u32 bmcr;
4214
4215 bp->current_interval = bp->timer_interval;
4216
4217 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4218
4219 if (bmcr & BMCR_ANENABLE) {
4220 u32 phy1, phy2;
4221
4222 bnx2_write_phy(bp, 0x1c, 0x7c00);
4223 bnx2_read_phy(bp, 0x1c, &phy1);
4224
4225 bnx2_write_phy(bp, 0x17, 0x0f01);
4226 bnx2_read_phy(bp, 0x15, &phy2);
4227 bnx2_write_phy(bp, 0x17, 0x0f01);
4228 bnx2_read_phy(bp, 0x15, &phy2);
4229
4230 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4231 !(phy2 & 0x20)) { /* no CONFIG */
4232
4233 bmcr &= ~BMCR_ANENABLE;
4234 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4235 bnx2_write_phy(bp, MII_BMCR, bmcr);
4236 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4237 }
4238 }
4239 }
4240 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4241 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4242 u32 phy2;
4243
4244 bnx2_write_phy(bp, 0x17, 0x0f01);
4245 bnx2_read_phy(bp, 0x15, &phy2);
4246 if (phy2 & 0x20) {
4247 u32 bmcr;
4248
4249 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4250 bmcr |= BMCR_ANENABLE;
4251 bnx2_write_phy(bp, MII_BMCR, bmcr);
4252
4253 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4254 }
4255 } else
4256 bp->current_interval = bp->timer_interval;
4257
4258 spin_unlock(&bp->phy_lock);
4259}
4260
4261static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004262bnx2_5708_serdes_timer(struct bnx2 *bp)
4263{
4264 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4265 bp->serdes_an_pending = 0;
4266 return;
4267 }
4268
4269 spin_lock(&bp->phy_lock);
4270 if (bp->serdes_an_pending)
4271 bp->serdes_an_pending--;
4272 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4273 u32 bmcr;
4274
4275 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4276
4277 if (bmcr & BMCR_ANENABLE) {
4278 bmcr &= ~BMCR_ANENABLE;
4279 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4280 bnx2_write_phy(bp, MII_BMCR, bmcr);
4281 bp->current_interval = SERDES_FORCED_TIMEOUT;
4282 } else {
4283 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4284 bmcr |= BMCR_ANENABLE;
4285 bnx2_write_phy(bp, MII_BMCR, bmcr);
4286 bp->serdes_an_pending = 2;
4287 bp->current_interval = bp->timer_interval;
4288 }
4289
4290 } else
4291 bp->current_interval = bp->timer_interval;
4292
4293 spin_unlock(&bp->phy_lock);
4294}
4295
4296static void
Michael Chanb6016b72005-05-26 13:03:09 -07004297bnx2_timer(unsigned long data)
4298{
4299 struct bnx2 *bp = (struct bnx2 *) data;
4300 u32 msg;
4301
Michael Chancd339a02005-08-25 15:35:24 -07004302 if (!netif_running(bp->dev))
4303 return;
4304
Michael Chanb6016b72005-05-26 13:03:09 -07004305 if (atomic_read(&bp->intr_sem) != 0)
4306 goto bnx2_restart_timer;
4307
4308 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004309 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004310
Michael Chancea94db2006-06-12 22:16:13 -07004311 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4312
Michael Chanf8dd0642006-11-19 14:08:29 -08004313 if (bp->phy_flags & PHY_SERDES_FLAG) {
4314 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4315 bnx2_5706_serdes_timer(bp);
4316 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4317 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004318 }
4319
4320bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004321 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004322}
4323
4324/* Called with rtnl_lock */
4325static int
4326bnx2_open(struct net_device *dev)
4327{
Michael Chan972ec0d2006-01-23 16:12:43 -08004328 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004329 int rc;
4330
Pavel Machek829ca9a2005-09-03 15:56:56 -07004331 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004332 bnx2_disable_int(bp);
4333
4334 rc = bnx2_alloc_mem(bp);
4335 if (rc)
4336 return rc;
4337
4338 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4339 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4340 !disable_msi) {
4341
4342 if (pci_enable_msi(bp->pdev) == 0) {
4343 bp->flags |= USING_MSI_FLAG;
4344 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4345 dev);
4346 }
4347 else {
4348 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004349 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004350 }
4351 }
4352 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004353 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004354 dev->name, dev);
4355 }
4356 if (rc) {
4357 bnx2_free_mem(bp);
4358 return rc;
4359 }
4360
4361 rc = bnx2_init_nic(bp);
4362
4363 if (rc) {
4364 free_irq(bp->pdev->irq, dev);
4365 if (bp->flags & USING_MSI_FLAG) {
4366 pci_disable_msi(bp->pdev);
4367 bp->flags &= ~USING_MSI_FLAG;
4368 }
4369 bnx2_free_skbs(bp);
4370 bnx2_free_mem(bp);
4371 return rc;
4372 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004373
Michael Chancd339a02005-08-25 15:35:24 -07004374 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004375
4376 atomic_set(&bp->intr_sem, 0);
4377
4378 bnx2_enable_int(bp);
4379
4380 if (bp->flags & USING_MSI_FLAG) {
4381 /* Test MSI to make sure it is working
4382 * If MSI test fails, go back to INTx mode
4383 */
4384 if (bnx2_test_intr(bp) != 0) {
4385 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4386 " using MSI, switching to INTx mode. Please"
4387 " report this failure to the PCI maintainer"
4388 " and include system chipset information.\n",
4389 bp->dev->name);
4390
4391 bnx2_disable_int(bp);
4392 free_irq(bp->pdev->irq, dev);
4393 pci_disable_msi(bp->pdev);
4394 bp->flags &= ~USING_MSI_FLAG;
4395
4396 rc = bnx2_init_nic(bp);
4397
4398 if (!rc) {
4399 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004400 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004401 }
4402 if (rc) {
4403 bnx2_free_skbs(bp);
4404 bnx2_free_mem(bp);
4405 del_timer_sync(&bp->timer);
4406 return rc;
4407 }
4408 bnx2_enable_int(bp);
4409 }
4410 }
4411 if (bp->flags & USING_MSI_FLAG) {
4412 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4413 }
4414
4415 netif_start_queue(dev);
4416
4417 return 0;
4418}
4419
4420static void
David Howellsc4028952006-11-22 14:57:56 +00004421bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004422{
David Howellsc4028952006-11-22 14:57:56 +00004423 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004424
Michael Chanafdc08b2005-08-25 15:34:29 -07004425 if (!netif_running(bp->dev))
4426 return;
4427
4428 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004429 bnx2_netif_stop(bp);
4430
4431 bnx2_init_nic(bp);
4432
4433 atomic_set(&bp->intr_sem, 1);
4434 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004435 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004436}
4437
4438static void
4439bnx2_tx_timeout(struct net_device *dev)
4440{
Michael Chan972ec0d2006-01-23 16:12:43 -08004441 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004442
4443 /* This allows the netif to be shutdown gracefully before resetting */
4444 schedule_work(&bp->reset_task);
4445}
4446
4447#ifdef BCM_VLAN
4448/* Called with rtnl_lock */
4449static void
4450bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4451{
Michael Chan972ec0d2006-01-23 16:12:43 -08004452 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004453
4454 bnx2_netif_stop(bp);
4455
4456 bp->vlgrp = vlgrp;
4457 bnx2_set_rx_mode(dev);
4458
4459 bnx2_netif_start(bp);
4460}
4461
4462/* Called with rtnl_lock */
4463static void
4464bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4465{
Michael Chan972ec0d2006-01-23 16:12:43 -08004466 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004467
4468 bnx2_netif_stop(bp);
4469
4470 if (bp->vlgrp)
4471 bp->vlgrp->vlan_devices[vid] = NULL;
4472 bnx2_set_rx_mode(dev);
4473
4474 bnx2_netif_start(bp);
4475}
4476#endif
4477
Herbert Xu932ff272006-06-09 12:20:56 -07004478/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004479 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4480 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004481 */
4482static int
4483bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4484{
Michael Chan972ec0d2006-01-23 16:12:43 -08004485 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004486 dma_addr_t mapping;
4487 struct tx_bd *txbd;
4488 struct sw_bd *tx_buf;
4489 u32 len, vlan_tag_flags, last_frag, mss;
4490 u16 prod, ring_prod;
4491 int i;
4492
Michael Chane89bbf12005-08-25 15:36:58 -07004493 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004494 netif_stop_queue(dev);
4495 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4496 dev->name);
4497
4498 return NETDEV_TX_BUSY;
4499 }
4500 len = skb_headlen(skb);
4501 prod = bp->tx_prod;
4502 ring_prod = TX_RING_IDX(prod);
4503
4504 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004505 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004506 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4507 }
4508
4509 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4510 vlan_tag_flags |=
4511 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4512 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004513#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004514 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004515 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4516 u32 tcp_opt_len, ip_tcp_len;
4517
4518 if (skb_header_cloned(skb) &&
4519 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4520 dev_kfree_skb(skb);
4521 return NETDEV_TX_OK;
4522 }
4523
4524 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4525 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4526
4527 tcp_opt_len = 0;
4528 if (skb->h.th->doff > 5) {
4529 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4530 }
4531 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4532
4533 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004534 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004535 skb->h.th->check =
4536 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4537 skb->nh.iph->daddr,
4538 0, IPPROTO_TCP, 0);
4539
4540 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4541 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4542 (tcp_opt_len >> 2)) << 8;
4543 }
4544 }
4545 else
4546#endif
4547 {
4548 mss = 0;
4549 }
4550
4551 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004552
Michael Chanb6016b72005-05-26 13:03:09 -07004553 tx_buf = &bp->tx_buf_ring[ring_prod];
4554 tx_buf->skb = skb;
4555 pci_unmap_addr_set(tx_buf, mapping, mapping);
4556
4557 txbd = &bp->tx_desc_ring[ring_prod];
4558
4559 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4560 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4561 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4562 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4563
4564 last_frag = skb_shinfo(skb)->nr_frags;
4565
4566 for (i = 0; i < last_frag; i++) {
4567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4568
4569 prod = NEXT_TX_BD(prod);
4570 ring_prod = TX_RING_IDX(prod);
4571 txbd = &bp->tx_desc_ring[ring_prod];
4572
4573 len = frag->size;
4574 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4575 len, PCI_DMA_TODEVICE);
4576 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4577 mapping, mapping);
4578
4579 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4580 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4581 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4582 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4583
4584 }
4585 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4586
4587 prod = NEXT_TX_BD(prod);
4588 bp->tx_prod_bseq += skb->len;
4589
Michael Chan234754d2006-11-19 14:11:41 -08004590 REG_WR16(bp, bp->tx_bidx_addr, prod);
4591 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004592
4593 mmiowb();
4594
4595 bp->tx_prod = prod;
4596 dev->trans_start = jiffies;
4597
Michael Chane89bbf12005-08-25 15:36:58 -07004598 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004599 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004600 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004601 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004602 }
4603
4604 return NETDEV_TX_OK;
4605}
4606
4607/* Called with rtnl_lock */
4608static int
4609bnx2_close(struct net_device *dev)
4610{
Michael Chan972ec0d2006-01-23 16:12:43 -08004611 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004612 u32 reset_code;
4613
Michael Chanafdc08b2005-08-25 15:34:29 -07004614 /* Calling flush_scheduled_work() may deadlock because
4615 * linkwatch_event() may be on the workqueue and it will try to get
4616 * the rtnl_lock which we are holding.
4617 */
4618 while (bp->in_reset_task)
4619 msleep(1);
4620
Michael Chanb6016b72005-05-26 13:03:09 -07004621 bnx2_netif_stop(bp);
4622 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004623 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004624 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004625 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004626 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4627 else
4628 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4629 bnx2_reset_chip(bp, reset_code);
4630 free_irq(bp->pdev->irq, dev);
4631 if (bp->flags & USING_MSI_FLAG) {
4632 pci_disable_msi(bp->pdev);
4633 bp->flags &= ~USING_MSI_FLAG;
4634 }
4635 bnx2_free_skbs(bp);
4636 bnx2_free_mem(bp);
4637 bp->link_up = 0;
4638 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004639 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004640 return 0;
4641}
4642
4643#define GET_NET_STATS64(ctr) \
4644 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4645 (unsigned long) (ctr##_lo)
4646
4647#define GET_NET_STATS32(ctr) \
4648 (ctr##_lo)
4649
4650#if (BITS_PER_LONG == 64)
4651#define GET_NET_STATS GET_NET_STATS64
4652#else
4653#define GET_NET_STATS GET_NET_STATS32
4654#endif
4655
4656static struct net_device_stats *
4657bnx2_get_stats(struct net_device *dev)
4658{
Michael Chan972ec0d2006-01-23 16:12:43 -08004659 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004660 struct statistics_block *stats_blk = bp->stats_blk;
4661 struct net_device_stats *net_stats = &bp->net_stats;
4662
4663 if (bp->stats_blk == NULL) {
4664 return net_stats;
4665 }
4666 net_stats->rx_packets =
4667 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4668 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4669 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4670
4671 net_stats->tx_packets =
4672 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4673 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4674 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4675
4676 net_stats->rx_bytes =
4677 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4678
4679 net_stats->tx_bytes =
4680 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4681
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004682 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004683 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4684
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004685 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004686 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4687
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004688 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004689 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4690 stats_blk->stat_EtherStatsOverrsizePkts);
4691
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004692 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004693 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4694
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004695 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004696 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4697
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004698 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004699 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4700
4701 net_stats->rx_errors = net_stats->rx_length_errors +
4702 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4703 net_stats->rx_crc_errors;
4704
4705 net_stats->tx_aborted_errors =
4706 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4707 stats_blk->stat_Dot3StatsLateCollisions);
4708
Michael Chan5b0c76a2005-11-04 08:45:49 -08004709 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4710 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004711 net_stats->tx_carrier_errors = 0;
4712 else {
4713 net_stats->tx_carrier_errors =
4714 (unsigned long)
4715 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4716 }
4717
4718 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004719 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004720 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4721 +
4722 net_stats->tx_aborted_errors +
4723 net_stats->tx_carrier_errors;
4724
Michael Chancea94db2006-06-12 22:16:13 -07004725 net_stats->rx_missed_errors =
4726 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4727 stats_blk->stat_FwRxDrop);
4728
Michael Chanb6016b72005-05-26 13:03:09 -07004729 return net_stats;
4730}
4731
4732/* All ethtool functions called with rtnl_lock */
4733
4734static int
4735bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4736{
Michael Chan972ec0d2006-01-23 16:12:43 -08004737 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004738
4739 cmd->supported = SUPPORTED_Autoneg;
4740 if (bp->phy_flags & PHY_SERDES_FLAG) {
4741 cmd->supported |= SUPPORTED_1000baseT_Full |
4742 SUPPORTED_FIBRE;
4743
4744 cmd->port = PORT_FIBRE;
4745 }
4746 else {
4747 cmd->supported |= SUPPORTED_10baseT_Half |
4748 SUPPORTED_10baseT_Full |
4749 SUPPORTED_100baseT_Half |
4750 SUPPORTED_100baseT_Full |
4751 SUPPORTED_1000baseT_Full |
4752 SUPPORTED_TP;
4753
4754 cmd->port = PORT_TP;
4755 }
4756
4757 cmd->advertising = bp->advertising;
4758
4759 if (bp->autoneg & AUTONEG_SPEED) {
4760 cmd->autoneg = AUTONEG_ENABLE;
4761 }
4762 else {
4763 cmd->autoneg = AUTONEG_DISABLE;
4764 }
4765
4766 if (netif_carrier_ok(dev)) {
4767 cmd->speed = bp->line_speed;
4768 cmd->duplex = bp->duplex;
4769 }
4770 else {
4771 cmd->speed = -1;
4772 cmd->duplex = -1;
4773 }
4774
4775 cmd->transceiver = XCVR_INTERNAL;
4776 cmd->phy_address = bp->phy_addr;
4777
4778 return 0;
4779}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004780
Michael Chanb6016b72005-05-26 13:03:09 -07004781static int
4782bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4783{
Michael Chan972ec0d2006-01-23 16:12:43 -08004784 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004785 u8 autoneg = bp->autoneg;
4786 u8 req_duplex = bp->req_duplex;
4787 u16 req_line_speed = bp->req_line_speed;
4788 u32 advertising = bp->advertising;
4789
4790 if (cmd->autoneg == AUTONEG_ENABLE) {
4791 autoneg |= AUTONEG_SPEED;
4792
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004793 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004794
4795 /* allow advertising 1 speed */
4796 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4797 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4798 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4799 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4800
4801 if (bp->phy_flags & PHY_SERDES_FLAG)
4802 return -EINVAL;
4803
4804 advertising = cmd->advertising;
4805
4806 }
4807 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4808 advertising = cmd->advertising;
4809 }
4810 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4811 return -EINVAL;
4812 }
4813 else {
4814 if (bp->phy_flags & PHY_SERDES_FLAG) {
4815 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4816 }
4817 else {
4818 advertising = ETHTOOL_ALL_COPPER_SPEED;
4819 }
4820 }
4821 advertising |= ADVERTISED_Autoneg;
4822 }
4823 else {
4824 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004825 if ((cmd->speed != SPEED_1000 &&
4826 cmd->speed != SPEED_2500) ||
4827 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004828 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004829
4830 if (cmd->speed == SPEED_2500 &&
4831 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4832 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004833 }
4834 else if (cmd->speed == SPEED_1000) {
4835 return -EINVAL;
4836 }
4837 autoneg &= ~AUTONEG_SPEED;
4838 req_line_speed = cmd->speed;
4839 req_duplex = cmd->duplex;
4840 advertising = 0;
4841 }
4842
4843 bp->autoneg = autoneg;
4844 bp->advertising = advertising;
4845 bp->req_line_speed = req_line_speed;
4846 bp->req_duplex = req_duplex;
4847
Michael Chanc770a652005-08-25 15:38:39 -07004848 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004849
4850 bnx2_setup_phy(bp);
4851
Michael Chanc770a652005-08-25 15:38:39 -07004852 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004853
4854 return 0;
4855}
4856
4857static void
4858bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4859{
Michael Chan972ec0d2006-01-23 16:12:43 -08004860 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004861
4862 strcpy(info->driver, DRV_MODULE_NAME);
4863 strcpy(info->version, DRV_MODULE_VERSION);
4864 strcpy(info->bus_info, pci_name(bp->pdev));
4865 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4866 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4867 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004868 info->fw_version[1] = info->fw_version[3] = '.';
4869 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004870}
4871
Michael Chan244ac4f2006-03-20 17:48:46 -08004872#define BNX2_REGDUMP_LEN (32 * 1024)
4873
4874static int
4875bnx2_get_regs_len(struct net_device *dev)
4876{
4877 return BNX2_REGDUMP_LEN;
4878}
4879
4880static void
4881bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4882{
4883 u32 *p = _p, i, offset;
4884 u8 *orig_p = _p;
4885 struct bnx2 *bp = netdev_priv(dev);
4886 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4887 0x0800, 0x0880, 0x0c00, 0x0c10,
4888 0x0c30, 0x0d08, 0x1000, 0x101c,
4889 0x1040, 0x1048, 0x1080, 0x10a4,
4890 0x1400, 0x1490, 0x1498, 0x14f0,
4891 0x1500, 0x155c, 0x1580, 0x15dc,
4892 0x1600, 0x1658, 0x1680, 0x16d8,
4893 0x1800, 0x1820, 0x1840, 0x1854,
4894 0x1880, 0x1894, 0x1900, 0x1984,
4895 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4896 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4897 0x2000, 0x2030, 0x23c0, 0x2400,
4898 0x2800, 0x2820, 0x2830, 0x2850,
4899 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4900 0x3c00, 0x3c94, 0x4000, 0x4010,
4901 0x4080, 0x4090, 0x43c0, 0x4458,
4902 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4903 0x4fc0, 0x5010, 0x53c0, 0x5444,
4904 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4905 0x5fc0, 0x6000, 0x6400, 0x6428,
4906 0x6800, 0x6848, 0x684c, 0x6860,
4907 0x6888, 0x6910, 0x8000 };
4908
4909 regs->version = 0;
4910
4911 memset(p, 0, BNX2_REGDUMP_LEN);
4912
4913 if (!netif_running(bp->dev))
4914 return;
4915
4916 i = 0;
4917 offset = reg_boundaries[0];
4918 p += offset;
4919 while (offset < BNX2_REGDUMP_LEN) {
4920 *p++ = REG_RD(bp, offset);
4921 offset += 4;
4922 if (offset == reg_boundaries[i + 1]) {
4923 offset = reg_boundaries[i + 2];
4924 p = (u32 *) (orig_p + offset);
4925 i += 2;
4926 }
4927 }
4928}
4929
Michael Chanb6016b72005-05-26 13:03:09 -07004930static void
4931bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4932{
Michael Chan972ec0d2006-01-23 16:12:43 -08004933 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004934
4935 if (bp->flags & NO_WOL_FLAG) {
4936 wol->supported = 0;
4937 wol->wolopts = 0;
4938 }
4939 else {
4940 wol->supported = WAKE_MAGIC;
4941 if (bp->wol)
4942 wol->wolopts = WAKE_MAGIC;
4943 else
4944 wol->wolopts = 0;
4945 }
4946 memset(&wol->sopass, 0, sizeof(wol->sopass));
4947}
4948
4949static int
4950bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4951{
Michael Chan972ec0d2006-01-23 16:12:43 -08004952 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004953
4954 if (wol->wolopts & ~WAKE_MAGIC)
4955 return -EINVAL;
4956
4957 if (wol->wolopts & WAKE_MAGIC) {
4958 if (bp->flags & NO_WOL_FLAG)
4959 return -EINVAL;
4960
4961 bp->wol = 1;
4962 }
4963 else {
4964 bp->wol = 0;
4965 }
4966 return 0;
4967}
4968
4969static int
4970bnx2_nway_reset(struct net_device *dev)
4971{
Michael Chan972ec0d2006-01-23 16:12:43 -08004972 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004973 u32 bmcr;
4974
4975 if (!(bp->autoneg & AUTONEG_SPEED)) {
4976 return -EINVAL;
4977 }
4978
Michael Chanc770a652005-08-25 15:38:39 -07004979 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004980
4981 /* Force a link down visible on the other side */
4982 if (bp->phy_flags & PHY_SERDES_FLAG) {
4983 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004984 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004985
4986 msleep(20);
4987
Michael Chanc770a652005-08-25 15:38:39 -07004988 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004989
4990 bp->current_interval = SERDES_AN_TIMEOUT;
4991 bp->serdes_an_pending = 1;
4992 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004993 }
4994
4995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4996 bmcr &= ~BMCR_LOOPBACK;
4997 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4998
Michael Chanc770a652005-08-25 15:38:39 -07004999 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005000
5001 return 0;
5002}
5003
5004static int
5005bnx2_get_eeprom_len(struct net_device *dev)
5006{
Michael Chan972ec0d2006-01-23 16:12:43 -08005007 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005008
Michael Chan1122db72006-01-23 16:11:42 -08005009 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005010 return 0;
5011
Michael Chan1122db72006-01-23 16:11:42 -08005012 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005013}
5014
5015static int
5016bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5017 u8 *eebuf)
5018{
Michael Chan972ec0d2006-01-23 16:12:43 -08005019 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005020 int rc;
5021
John W. Linville1064e942005-11-10 12:58:24 -08005022 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005023
5024 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5025
5026 return rc;
5027}
5028
5029static int
5030bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5031 u8 *eebuf)
5032{
Michael Chan972ec0d2006-01-23 16:12:43 -08005033 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005034 int rc;
5035
John W. Linville1064e942005-11-10 12:58:24 -08005036 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005037
5038 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5039
5040 return rc;
5041}
5042
5043static int
5044bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5045{
Michael Chan972ec0d2006-01-23 16:12:43 -08005046 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005047
5048 memset(coal, 0, sizeof(struct ethtool_coalesce));
5049
5050 coal->rx_coalesce_usecs = bp->rx_ticks;
5051 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5052 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5053 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5054
5055 coal->tx_coalesce_usecs = bp->tx_ticks;
5056 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5057 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5058 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5059
5060 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5061
5062 return 0;
5063}
5064
5065static int
5066bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5067{
Michael Chan972ec0d2006-01-23 16:12:43 -08005068 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005069
5070 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5071 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5072
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005073 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005074 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5075
5076 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5077 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5078
5079 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5080 if (bp->rx_quick_cons_trip_int > 0xff)
5081 bp->rx_quick_cons_trip_int = 0xff;
5082
5083 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5084 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5085
5086 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5087 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5088
5089 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5090 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5091
5092 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5093 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5094 0xff;
5095
5096 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5097 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5098 bp->stats_ticks &= 0xffff00;
5099
5100 if (netif_running(bp->dev)) {
5101 bnx2_netif_stop(bp);
5102 bnx2_init_nic(bp);
5103 bnx2_netif_start(bp);
5104 }
5105
5106 return 0;
5107}
5108
5109static void
5110bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5111{
Michael Chan972ec0d2006-01-23 16:12:43 -08005112 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005113
Michael Chan13daffa2006-03-20 17:49:20 -08005114 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005115 ering->rx_mini_max_pending = 0;
5116 ering->rx_jumbo_max_pending = 0;
5117
5118 ering->rx_pending = bp->rx_ring_size;
5119 ering->rx_mini_pending = 0;
5120 ering->rx_jumbo_pending = 0;
5121
5122 ering->tx_max_pending = MAX_TX_DESC_CNT;
5123 ering->tx_pending = bp->tx_ring_size;
5124}
5125
5126static int
5127bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5128{
Michael Chan972ec0d2006-01-23 16:12:43 -08005129 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005130
Michael Chan13daffa2006-03-20 17:49:20 -08005131 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005132 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5133 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5134
5135 return -EINVAL;
5136 }
Michael Chan13daffa2006-03-20 17:49:20 -08005137 if (netif_running(bp->dev)) {
5138 bnx2_netif_stop(bp);
5139 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5140 bnx2_free_skbs(bp);
5141 bnx2_free_mem(bp);
5142 }
5143
5144 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005145 bp->tx_ring_size = ering->tx_pending;
5146
5147 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005148 int rc;
5149
5150 rc = bnx2_alloc_mem(bp);
5151 if (rc)
5152 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005153 bnx2_init_nic(bp);
5154 bnx2_netif_start(bp);
5155 }
5156
5157 return 0;
5158}
5159
5160static void
5161bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5162{
Michael Chan972ec0d2006-01-23 16:12:43 -08005163 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005164
5165 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5166 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5167 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5168}
5169
5170static int
5171bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5172{
Michael Chan972ec0d2006-01-23 16:12:43 -08005173 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005174
5175 bp->req_flow_ctrl = 0;
5176 if (epause->rx_pause)
5177 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5178 if (epause->tx_pause)
5179 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5180
5181 if (epause->autoneg) {
5182 bp->autoneg |= AUTONEG_FLOW_CTRL;
5183 }
5184 else {
5185 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5186 }
5187
Michael Chanc770a652005-08-25 15:38:39 -07005188 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005189
5190 bnx2_setup_phy(bp);
5191
Michael Chanc770a652005-08-25 15:38:39 -07005192 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005193
5194 return 0;
5195}
5196
5197static u32
5198bnx2_get_rx_csum(struct net_device *dev)
5199{
Michael Chan972ec0d2006-01-23 16:12:43 -08005200 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005201
5202 return bp->rx_csum;
5203}
5204
5205static int
5206bnx2_set_rx_csum(struct net_device *dev, u32 data)
5207{
Michael Chan972ec0d2006-01-23 16:12:43 -08005208 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005209
5210 bp->rx_csum = data;
5211 return 0;
5212}
5213
Michael Chanb11d6212006-06-29 12:31:21 -07005214static int
5215bnx2_set_tso(struct net_device *dev, u32 data)
5216{
5217 if (data)
5218 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5219 else
5220 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5221 return 0;
5222}
5223
Michael Chancea94db2006-06-12 22:16:13 -07005224#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005225
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005226static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005227 char string[ETH_GSTRING_LEN];
5228} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5229 { "rx_bytes" },
5230 { "rx_error_bytes" },
5231 { "tx_bytes" },
5232 { "tx_error_bytes" },
5233 { "rx_ucast_packets" },
5234 { "rx_mcast_packets" },
5235 { "rx_bcast_packets" },
5236 { "tx_ucast_packets" },
5237 { "tx_mcast_packets" },
5238 { "tx_bcast_packets" },
5239 { "tx_mac_errors" },
5240 { "tx_carrier_errors" },
5241 { "rx_crc_errors" },
5242 { "rx_align_errors" },
5243 { "tx_single_collisions" },
5244 { "tx_multi_collisions" },
5245 { "tx_deferred" },
5246 { "tx_excess_collisions" },
5247 { "tx_late_collisions" },
5248 { "tx_total_collisions" },
5249 { "rx_fragments" },
5250 { "rx_jabbers" },
5251 { "rx_undersize_packets" },
5252 { "rx_oversize_packets" },
5253 { "rx_64_byte_packets" },
5254 { "rx_65_to_127_byte_packets" },
5255 { "rx_128_to_255_byte_packets" },
5256 { "rx_256_to_511_byte_packets" },
5257 { "rx_512_to_1023_byte_packets" },
5258 { "rx_1024_to_1522_byte_packets" },
5259 { "rx_1523_to_9022_byte_packets" },
5260 { "tx_64_byte_packets" },
5261 { "tx_65_to_127_byte_packets" },
5262 { "tx_128_to_255_byte_packets" },
5263 { "tx_256_to_511_byte_packets" },
5264 { "tx_512_to_1023_byte_packets" },
5265 { "tx_1024_to_1522_byte_packets" },
5266 { "tx_1523_to_9022_byte_packets" },
5267 { "rx_xon_frames" },
5268 { "rx_xoff_frames" },
5269 { "tx_xon_frames" },
5270 { "tx_xoff_frames" },
5271 { "rx_mac_ctrl_frames" },
5272 { "rx_filtered_packets" },
5273 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005274 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005275};
5276
5277#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5278
Arjan van de Venf71e1302006-03-03 21:33:57 -05005279static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005280 STATS_OFFSET32(stat_IfHCInOctets_hi),
5281 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5282 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5283 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5284 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5286 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5287 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5288 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5289 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5290 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005291 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5292 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5293 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5294 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5295 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5296 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5297 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5298 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5299 STATS_OFFSET32(stat_EtherStatsCollisions),
5300 STATS_OFFSET32(stat_EtherStatsFragments),
5301 STATS_OFFSET32(stat_EtherStatsJabbers),
5302 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5303 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5314 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5315 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5316 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5317 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5318 STATS_OFFSET32(stat_XonPauseFramesReceived),
5319 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5320 STATS_OFFSET32(stat_OutXonSent),
5321 STATS_OFFSET32(stat_OutXoffSent),
5322 STATS_OFFSET32(stat_MacControlFramesReceived),
5323 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5324 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005325 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005326};
5327
5328/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5329 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005330 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005331static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005332 8,0,8,8,8,8,8,8,8,8,
5333 4,0,4,4,4,4,4,4,4,4,
5334 4,4,4,4,4,4,4,4,4,4,
5335 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005336 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005337};
5338
Michael Chan5b0c76a2005-11-04 08:45:49 -08005339static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5340 8,0,8,8,8,8,8,8,8,8,
5341 4,4,4,4,4,4,4,4,4,4,
5342 4,4,4,4,4,4,4,4,4,4,
5343 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005344 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005345};
5346
Michael Chanb6016b72005-05-26 13:03:09 -07005347#define BNX2_NUM_TESTS 6
5348
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005349static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005350 char string[ETH_GSTRING_LEN];
5351} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5352 { "register_test (offline)" },
5353 { "memory_test (offline)" },
5354 { "loopback_test (offline)" },
5355 { "nvram_test (online)" },
5356 { "interrupt_test (online)" },
5357 { "link_test (online)" },
5358};
5359
5360static int
5361bnx2_self_test_count(struct net_device *dev)
5362{
5363 return BNX2_NUM_TESTS;
5364}
5365
5366static void
5367bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5368{
Michael Chan972ec0d2006-01-23 16:12:43 -08005369 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005370
5371 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5372 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005373 int i;
5374
Michael Chanb6016b72005-05-26 13:03:09 -07005375 bnx2_netif_stop(bp);
5376 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5377 bnx2_free_skbs(bp);
5378
5379 if (bnx2_test_registers(bp) != 0) {
5380 buf[0] = 1;
5381 etest->flags |= ETH_TEST_FL_FAILED;
5382 }
5383 if (bnx2_test_memory(bp) != 0) {
5384 buf[1] = 1;
5385 etest->flags |= ETH_TEST_FL_FAILED;
5386 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005387 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005388 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005389
5390 if (!netif_running(bp->dev)) {
5391 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5392 }
5393 else {
5394 bnx2_init_nic(bp);
5395 bnx2_netif_start(bp);
5396 }
5397
5398 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005399 for (i = 0; i < 7; i++) {
5400 if (bp->link_up)
5401 break;
5402 msleep_interruptible(1000);
5403 }
Michael Chanb6016b72005-05-26 13:03:09 -07005404 }
5405
5406 if (bnx2_test_nvram(bp) != 0) {
5407 buf[3] = 1;
5408 etest->flags |= ETH_TEST_FL_FAILED;
5409 }
5410 if (bnx2_test_intr(bp) != 0) {
5411 buf[4] = 1;
5412 etest->flags |= ETH_TEST_FL_FAILED;
5413 }
5414
5415 if (bnx2_test_link(bp) != 0) {
5416 buf[5] = 1;
5417 etest->flags |= ETH_TEST_FL_FAILED;
5418
5419 }
5420}
5421
5422static void
5423bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5424{
5425 switch (stringset) {
5426 case ETH_SS_STATS:
5427 memcpy(buf, bnx2_stats_str_arr,
5428 sizeof(bnx2_stats_str_arr));
5429 break;
5430 case ETH_SS_TEST:
5431 memcpy(buf, bnx2_tests_str_arr,
5432 sizeof(bnx2_tests_str_arr));
5433 break;
5434 }
5435}
5436
5437static int
5438bnx2_get_stats_count(struct net_device *dev)
5439{
5440 return BNX2_NUM_STATS;
5441}
5442
5443static void
5444bnx2_get_ethtool_stats(struct net_device *dev,
5445 struct ethtool_stats *stats, u64 *buf)
5446{
Michael Chan972ec0d2006-01-23 16:12:43 -08005447 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005448 int i;
5449 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005450 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005451
5452 if (hw_stats == NULL) {
5453 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5454 return;
5455 }
5456
Michael Chan5b0c76a2005-11-04 08:45:49 -08005457 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5458 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5459 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5460 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005461 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005462 else
5463 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005464
5465 for (i = 0; i < BNX2_NUM_STATS; i++) {
5466 if (stats_len_arr[i] == 0) {
5467 /* skip this counter */
5468 buf[i] = 0;
5469 continue;
5470 }
5471 if (stats_len_arr[i] == 4) {
5472 /* 4-byte counter */
5473 buf[i] = (u64)
5474 *(hw_stats + bnx2_stats_offset_arr[i]);
5475 continue;
5476 }
5477 /* 8-byte counter */
5478 buf[i] = (((u64) *(hw_stats +
5479 bnx2_stats_offset_arr[i])) << 32) +
5480 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5481 }
5482}
5483
5484static int
5485bnx2_phys_id(struct net_device *dev, u32 data)
5486{
Michael Chan972ec0d2006-01-23 16:12:43 -08005487 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005488 int i;
5489 u32 save;
5490
5491 if (data == 0)
5492 data = 2;
5493
5494 save = REG_RD(bp, BNX2_MISC_CFG);
5495 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5496
5497 for (i = 0; i < (data * 2); i++) {
5498 if ((i % 2) == 0) {
5499 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5500 }
5501 else {
5502 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5503 BNX2_EMAC_LED_1000MB_OVERRIDE |
5504 BNX2_EMAC_LED_100MB_OVERRIDE |
5505 BNX2_EMAC_LED_10MB_OVERRIDE |
5506 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5507 BNX2_EMAC_LED_TRAFFIC);
5508 }
5509 msleep_interruptible(500);
5510 if (signal_pending(current))
5511 break;
5512 }
5513 REG_WR(bp, BNX2_EMAC_LED, 0);
5514 REG_WR(bp, BNX2_MISC_CFG, save);
5515 return 0;
5516}
5517
Jeff Garzik7282d492006-09-13 14:30:00 -04005518static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005519 .get_settings = bnx2_get_settings,
5520 .set_settings = bnx2_set_settings,
5521 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005522 .get_regs_len = bnx2_get_regs_len,
5523 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005524 .get_wol = bnx2_get_wol,
5525 .set_wol = bnx2_set_wol,
5526 .nway_reset = bnx2_nway_reset,
5527 .get_link = ethtool_op_get_link,
5528 .get_eeprom_len = bnx2_get_eeprom_len,
5529 .get_eeprom = bnx2_get_eeprom,
5530 .set_eeprom = bnx2_set_eeprom,
5531 .get_coalesce = bnx2_get_coalesce,
5532 .set_coalesce = bnx2_set_coalesce,
5533 .get_ringparam = bnx2_get_ringparam,
5534 .set_ringparam = bnx2_set_ringparam,
5535 .get_pauseparam = bnx2_get_pauseparam,
5536 .set_pauseparam = bnx2_set_pauseparam,
5537 .get_rx_csum = bnx2_get_rx_csum,
5538 .set_rx_csum = bnx2_set_rx_csum,
5539 .get_tx_csum = ethtool_op_get_tx_csum,
5540 .set_tx_csum = ethtool_op_set_tx_csum,
5541 .get_sg = ethtool_op_get_sg,
5542 .set_sg = ethtool_op_set_sg,
5543#ifdef BCM_TSO
5544 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005545 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005546#endif
5547 .self_test_count = bnx2_self_test_count,
5548 .self_test = bnx2_self_test,
5549 .get_strings = bnx2_get_strings,
5550 .phys_id = bnx2_phys_id,
5551 .get_stats_count = bnx2_get_stats_count,
5552 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005553 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005554};
5555
5556/* Called with rtnl_lock */
5557static int
5558bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5559{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005560 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005561 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005562 int err;
5563
5564 switch(cmd) {
5565 case SIOCGMIIPHY:
5566 data->phy_id = bp->phy_addr;
5567
5568 /* fallthru */
5569 case SIOCGMIIREG: {
5570 u32 mii_regval;
5571
Michael Chanc770a652005-08-25 15:38:39 -07005572 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005573 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005574 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005575
5576 data->val_out = mii_regval;
5577
5578 return err;
5579 }
5580
5581 case SIOCSMIIREG:
5582 if (!capable(CAP_NET_ADMIN))
5583 return -EPERM;
5584
Michael Chanc770a652005-08-25 15:38:39 -07005585 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005586 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005587 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005588
5589 return err;
5590
5591 default:
5592 /* do nothing */
5593 break;
5594 }
5595 return -EOPNOTSUPP;
5596}
5597
5598/* Called with rtnl_lock */
5599static int
5600bnx2_change_mac_addr(struct net_device *dev, void *p)
5601{
5602 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005603 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005604
Michael Chan73eef4c2005-08-25 15:39:15 -07005605 if (!is_valid_ether_addr(addr->sa_data))
5606 return -EINVAL;
5607
Michael Chanb6016b72005-05-26 13:03:09 -07005608 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5609 if (netif_running(dev))
5610 bnx2_set_mac_addr(bp);
5611
5612 return 0;
5613}
5614
5615/* Called with rtnl_lock */
5616static int
5617bnx2_change_mtu(struct net_device *dev, int new_mtu)
5618{
Michael Chan972ec0d2006-01-23 16:12:43 -08005619 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005620
5621 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5622 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5623 return -EINVAL;
5624
5625 dev->mtu = new_mtu;
5626 if (netif_running(dev)) {
5627 bnx2_netif_stop(bp);
5628
5629 bnx2_init_nic(bp);
5630
5631 bnx2_netif_start(bp);
5632 }
5633 return 0;
5634}
5635
5636#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5637static void
5638poll_bnx2(struct net_device *dev)
5639{
Michael Chan972ec0d2006-01-23 16:12:43 -08005640 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005641
5642 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005643 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005644 enable_irq(bp->pdev->irq);
5645}
5646#endif
5647
5648static int __devinit
5649bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5650{
5651 struct bnx2 *bp;
5652 unsigned long mem_len;
5653 int rc;
5654 u32 reg;
5655
5656 SET_MODULE_OWNER(dev);
5657 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005658 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005659
5660 bp->flags = 0;
5661 bp->phy_flags = 0;
5662
5663 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5664 rc = pci_enable_device(pdev);
5665 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005666 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005667 goto err_out;
5668 }
5669
5670 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005671 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005672 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005673 rc = -ENODEV;
5674 goto err_out_disable;
5675 }
5676
5677 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5678 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005679 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005680 goto err_out_disable;
5681 }
5682
5683 pci_set_master(pdev);
5684
5685 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5686 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005687 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005688 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005689 rc = -EIO;
5690 goto err_out_release;
5691 }
5692
Michael Chanb6016b72005-05-26 13:03:09 -07005693 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5694 bp->flags |= USING_DAC_FLAG;
5695 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005696 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005697 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005698 rc = -EIO;
5699 goto err_out_release;
5700 }
5701 }
5702 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005703 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005704 rc = -EIO;
5705 goto err_out_release;
5706 }
5707
5708 bp->dev = dev;
5709 bp->pdev = pdev;
5710
5711 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00005712 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005713
5714 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005715 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005716 dev->mem_end = dev->mem_start + mem_len;
5717 dev->irq = pdev->irq;
5718
5719 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5720
5721 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005722 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005723 rc = -ENOMEM;
5724 goto err_out_release;
5725 }
5726
5727 /* Configure byte swap and enable write to the reg_window registers.
5728 * Rely on CPU to do target byte swapping on big endian systems
5729 * The chip's target access swapping will not swap all accesses
5730 */
5731 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5732 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5733 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5734
Pavel Machek829ca9a2005-09-03 15:56:56 -07005735 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005736
5737 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5738
Michael Chan59b47d82006-11-19 14:10:45 -08005739 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5740 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5741 if (bp->pcix_cap == 0) {
5742 dev_err(&pdev->dev,
5743 "Cannot find PCIX capability, aborting.\n");
5744 rc = -EIO;
5745 goto err_out_unmap;
5746 }
5747 }
5748
Michael Chanb6016b72005-05-26 13:03:09 -07005749 /* Get bus information. */
5750 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5751 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5752 u32 clkreg;
5753
5754 bp->flags |= PCIX_FLAG;
5755
5756 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005757
Michael Chanb6016b72005-05-26 13:03:09 -07005758 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5759 switch (clkreg) {
5760 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5761 bp->bus_speed_mhz = 133;
5762 break;
5763
5764 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5765 bp->bus_speed_mhz = 100;
5766 break;
5767
5768 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5769 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5770 bp->bus_speed_mhz = 66;
5771 break;
5772
5773 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5774 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5775 bp->bus_speed_mhz = 50;
5776 break;
5777
5778 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5779 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5780 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5781 bp->bus_speed_mhz = 33;
5782 break;
5783 }
5784 }
5785 else {
5786 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5787 bp->bus_speed_mhz = 66;
5788 else
5789 bp->bus_speed_mhz = 33;
5790 }
5791
5792 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5793 bp->flags |= PCI_32BIT_FLAG;
5794
5795 /* 5706A0 may falsely detect SERR and PERR. */
5796 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5797 reg = REG_RD(bp, PCI_COMMAND);
5798 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5799 REG_WR(bp, PCI_COMMAND, reg);
5800 }
5801 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5802 !(bp->flags & PCIX_FLAG)) {
5803
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005804 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005805 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005806 goto err_out_unmap;
5807 }
5808
5809 bnx2_init_nvram(bp);
5810
Michael Chane3648b32005-11-04 08:51:21 -08005811 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5812
5813 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5814 BNX2_SHM_HDR_SIGNATURE_SIG)
5815 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5816 else
5817 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5818
Michael Chanb6016b72005-05-26 13:03:09 -07005819 /* Get the permanent MAC address. First we need to make sure the
5820 * firmware is actually running.
5821 */
Michael Chane3648b32005-11-04 08:51:21 -08005822 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005823
5824 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5825 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005826 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005827 rc = -ENODEV;
5828 goto err_out_unmap;
5829 }
5830
Michael Chane3648b32005-11-04 08:51:21 -08005831 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005832
Michael Chane3648b32005-11-04 08:51:21 -08005833 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005834 bp->mac_addr[0] = (u8) (reg >> 8);
5835 bp->mac_addr[1] = (u8) reg;
5836
Michael Chane3648b32005-11-04 08:51:21 -08005837 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005838 bp->mac_addr[2] = (u8) (reg >> 24);
5839 bp->mac_addr[3] = (u8) (reg >> 16);
5840 bp->mac_addr[4] = (u8) (reg >> 8);
5841 bp->mac_addr[5] = (u8) reg;
5842
5843 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005844 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005845
5846 bp->rx_csum = 1;
5847
5848 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5849
5850 bp->tx_quick_cons_trip_int = 20;
5851 bp->tx_quick_cons_trip = 20;
5852 bp->tx_ticks_int = 80;
5853 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005854
Michael Chanb6016b72005-05-26 13:03:09 -07005855 bp->rx_quick_cons_trip_int = 6;
5856 bp->rx_quick_cons_trip = 6;
5857 bp->rx_ticks_int = 18;
5858 bp->rx_ticks = 18;
5859
5860 bp->stats_ticks = 1000000 & 0xffff00;
5861
5862 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005863 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005864
Michael Chan5b0c76a2005-11-04 08:45:49 -08005865 bp->phy_addr = 1;
5866
Michael Chanb6016b72005-05-26 13:03:09 -07005867 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chanbac0dff2006-11-19 14:15:05 -08005868 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5869 if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5870 bp->phy_flags |= PHY_SERDES_FLAG;
5871 } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07005872 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005873
5874 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07005875 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005876 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08005877 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005878 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005879 BNX2_SHARED_HW_CFG_CONFIG);
5880 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5881 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5882 }
Michael Chanb6016b72005-05-26 13:03:09 -07005883 }
5884
Michael Chan16088272006-06-12 22:16:43 -07005885 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5886 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5887 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005888 bp->flags |= NO_WOL_FLAG;
5889
Michael Chanb6016b72005-05-26 13:03:09 -07005890 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5891 bp->tx_quick_cons_trip_int =
5892 bp->tx_quick_cons_trip;
5893 bp->tx_ticks_int = bp->tx_ticks;
5894 bp->rx_quick_cons_trip_int =
5895 bp->rx_quick_cons_trip;
5896 bp->rx_ticks_int = bp->rx_ticks;
5897 bp->comp_prod_trip_int = bp->comp_prod_trip;
5898 bp->com_ticks_int = bp->com_ticks;
5899 bp->cmd_ticks_int = bp->cmd_ticks;
5900 }
5901
Michael Chanf9317a42006-09-29 17:06:23 -07005902 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5903 *
5904 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5905 * with byte enables disabled on the unused 32-bit word. This is legal
5906 * but causes problems on the AMD 8132 which will eventually stop
5907 * responding after a while.
5908 *
5909 * AMD believes this incompatibility is unique to the 5706, and
5910 * prefers to locally disable MSI rather than globally disabling it
5911 * using pci_msi_quirk.
5912 */
5913 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5914 struct pci_dev *amd_8132 = NULL;
5915
5916 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5917 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5918 amd_8132))) {
5919 u8 rev;
5920
5921 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5922 if (rev >= 0x10 && rev <= 0x13) {
5923 disable_msi = 1;
5924 pci_dev_put(amd_8132);
5925 break;
5926 }
5927 }
5928 }
5929
Michael Chanb6016b72005-05-26 13:03:09 -07005930 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5931 bp->req_line_speed = 0;
5932 if (bp->phy_flags & PHY_SERDES_FLAG) {
5933 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005934
Michael Chane3648b32005-11-04 08:51:21 -08005935 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005936 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5937 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5938 bp->autoneg = 0;
5939 bp->req_line_speed = bp->line_speed = SPEED_1000;
5940 bp->req_duplex = DUPLEX_FULL;
5941 }
Michael Chanb6016b72005-05-26 13:03:09 -07005942 }
5943 else {
5944 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5945 }
5946
5947 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5948
Michael Chancd339a02005-08-25 15:35:24 -07005949 init_timer(&bp->timer);
5950 bp->timer.expires = RUN_AT(bp->timer_interval);
5951 bp->timer.data = (unsigned long) bp;
5952 bp->timer.function = bnx2_timer;
5953
Michael Chanb6016b72005-05-26 13:03:09 -07005954 return 0;
5955
5956err_out_unmap:
5957 if (bp->regview) {
5958 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005959 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005960 }
5961
5962err_out_release:
5963 pci_release_regions(pdev);
5964
5965err_out_disable:
5966 pci_disable_device(pdev);
5967 pci_set_drvdata(pdev, NULL);
5968
5969err_out:
5970 return rc;
5971}
5972
5973static int __devinit
5974bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5975{
5976 static int version_printed = 0;
5977 struct net_device *dev = NULL;
5978 struct bnx2 *bp;
5979 int rc, i;
5980
5981 if (version_printed++ == 0)
5982 printk(KERN_INFO "%s", version);
5983
5984 /* dev zeroed in init_etherdev */
5985 dev = alloc_etherdev(sizeof(*bp));
5986
5987 if (!dev)
5988 return -ENOMEM;
5989
5990 rc = bnx2_init_board(pdev, dev);
5991 if (rc < 0) {
5992 free_netdev(dev);
5993 return rc;
5994 }
5995
5996 dev->open = bnx2_open;
5997 dev->hard_start_xmit = bnx2_start_xmit;
5998 dev->stop = bnx2_close;
5999 dev->get_stats = bnx2_get_stats;
6000 dev->set_multicast_list = bnx2_set_rx_mode;
6001 dev->do_ioctl = bnx2_ioctl;
6002 dev->set_mac_address = bnx2_change_mac_addr;
6003 dev->change_mtu = bnx2_change_mtu;
6004 dev->tx_timeout = bnx2_tx_timeout;
6005 dev->watchdog_timeo = TX_TIMEOUT;
6006#ifdef BCM_VLAN
6007 dev->vlan_rx_register = bnx2_vlan_rx_register;
6008 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6009#endif
6010 dev->poll = bnx2_poll;
6011 dev->ethtool_ops = &bnx2_ethtool_ops;
6012 dev->weight = 64;
6013
Michael Chan972ec0d2006-01-23 16:12:43 -08006014 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006015
6016#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6017 dev->poll_controller = poll_bnx2;
6018#endif
6019
6020 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006021 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006022 if (bp->regview)
6023 iounmap(bp->regview);
6024 pci_release_regions(pdev);
6025 pci_disable_device(pdev);
6026 pci_set_drvdata(pdev, NULL);
6027 free_netdev(dev);
6028 return rc;
6029 }
6030
6031 pci_set_drvdata(pdev, dev);
6032
6033 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07006034 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07006035 bp->name = board_info[ent->driver_data].name,
6036 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6037 "IRQ %d, ",
6038 dev->name,
6039 bp->name,
6040 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6041 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6042 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6043 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6044 bp->bus_speed_mhz,
6045 dev->base_addr,
6046 bp->pdev->irq);
6047
6048 printk("node addr ");
6049 for (i = 0; i < 6; i++)
6050 printk("%2.2x", dev->dev_addr[i]);
6051 printk("\n");
6052
6053 dev->features |= NETIF_F_SG;
6054 if (bp->flags & USING_DAC_FLAG)
6055 dev->features |= NETIF_F_HIGHDMA;
6056 dev->features |= NETIF_F_IP_CSUM;
6057#ifdef BCM_VLAN
6058 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6059#endif
6060#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07006061 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006062#endif
6063
6064 netif_carrier_off(bp->dev);
6065
6066 return 0;
6067}
6068
6069static void __devexit
6070bnx2_remove_one(struct pci_dev *pdev)
6071{
6072 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006073 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006074
Michael Chanafdc08b2005-08-25 15:34:29 -07006075 flush_scheduled_work();
6076
Michael Chanb6016b72005-05-26 13:03:09 -07006077 unregister_netdev(dev);
6078
6079 if (bp->regview)
6080 iounmap(bp->regview);
6081
6082 free_netdev(dev);
6083 pci_release_regions(pdev);
6084 pci_disable_device(pdev);
6085 pci_set_drvdata(pdev, NULL);
6086}
6087
6088static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006089bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006090{
6091 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006092 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006093 u32 reset_code;
6094
6095 if (!netif_running(dev))
6096 return 0;
6097
Michael Chan1d60290f2006-03-20 17:50:08 -08006098 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006099 bnx2_netif_stop(bp);
6100 netif_device_detach(dev);
6101 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006102 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006103 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006104 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006105 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6106 else
6107 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6108 bnx2_reset_chip(bp, reset_code);
6109 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006110 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006111 return 0;
6112}
6113
6114static int
6115bnx2_resume(struct pci_dev *pdev)
6116{
6117 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006118 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006119
6120 if (!netif_running(dev))
6121 return 0;
6122
Pavel Machek829ca9a2005-09-03 15:56:56 -07006123 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006124 netif_device_attach(dev);
6125 bnx2_init_nic(bp);
6126 bnx2_netif_start(bp);
6127 return 0;
6128}
6129
6130static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006131 .name = DRV_MODULE_NAME,
6132 .id_table = bnx2_pci_tbl,
6133 .probe = bnx2_init_one,
6134 .remove = __devexit_p(bnx2_remove_one),
6135 .suspend = bnx2_suspend,
6136 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006137};
6138
6139static int __init bnx2_init(void)
6140{
Jeff Garzik29917622006-08-19 17:48:59 -04006141 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006142}
6143
6144static void __exit bnx2_cleanup(void)
6145{
6146 pci_unregister_driver(&bnx2_pci_driver);
6147}
6148
6149module_init(bnx2_init);
6150module_exit(bnx2_cleanup);
6151
6152
6153