blob: cffdec3d5b5a3bc909b4eec25c358d38bb65afb0 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan68c9f752007-04-24 15:35:53 -070057#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chanb6016b72005-05-26 13:03:09 -070087} board_t;
88
89/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050090static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070091 char *name;
92} board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080098 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chanb6016b72005-05-26 13:03:09 -0700120 { 0, }
121};
122
123static struct flash_spec flash_table[] =
124{
125 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700127 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700132 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800133 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700138 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700144 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0100"},
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800154 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167 "Non-buffered flash (64kB)"},
168 /* Fast EEPROM */
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 "EEPROM - fast"},
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 1001"},
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 "Entry 1010"},
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 "Entry 1100"},
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 "Entry 1101"},
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700208};
209
210MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
Michael Chane89bbf12005-08-25 15:36:58 -0700212static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213{
Michael Chan2f8af122006-08-15 01:39:10 -0700214 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700215
Michael Chan2f8af122006-08-15 01:39:10 -0700216 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800217
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
220 */
221 diff = bp->tx_prod - bp->tx_cons;
222 if (unlikely(diff >= TX_DESC_CNT)) {
223 diff &= 0xffff;
224 if (diff == TX_DESC_CNT)
225 diff = MAX_TX_DESC_CNT;
226 }
Michael Chane89bbf12005-08-25 15:36:58 -0700227 return (bp->tx_ring_size - diff);
228}
229
Michael Chanb6016b72005-05-26 13:03:09 -0700230static u32
231bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232{
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235}
236
237static void
238bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239{
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242}
243
244static void
245bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246{
247 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800248 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249 int i;
250
251 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254 for (i = 0; i < 5; i++) {
255 u32 val;
256 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258 break;
259 udelay(5);
260 }
261 } else {
262 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263 REG_WR(bp, BNX2_CTX_DATA, val);
264 }
Michael Chanb6016b72005-05-26 13:03:09 -0700265}
266
267static int
268bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269{
270 u32 val1;
271 int i, ret;
272
273 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280 udelay(40);
281 }
282
283 val1 = (bp->phy_addr << 21) | (reg << 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285 BNX2_EMAC_MDIO_COMM_START_BUSY;
286 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288 for (i = 0; i < 50; i++) {
289 udelay(10);
290
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293 udelay(5);
294
295 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298 break;
299 }
300 }
301
302 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303 *val = 0x0;
304 ret = -EBUSY;
305 }
306 else {
307 *val = val1;
308 ret = 0;
309 }
310
311 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318 udelay(40);
319 }
320
321 return ret;
322}
323
324static int
325bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326{
327 u32 val1;
328 int i, ret;
329
330 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337 udelay(40);
338 }
339
340 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400344
Michael Chanb6016b72005-05-26 13:03:09 -0700345 for (i = 0; i < 50; i++) {
346 udelay(10);
347
348 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350 udelay(5);
351 break;
352 }
353 }
354
355 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356 ret = -EBUSY;
357 else
358 ret = 0;
359
360 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367 udelay(40);
368 }
369
370 return ret;
371}
372
373static void
374bnx2_disable_int(struct bnx2 *bp)
375{
376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379}
380
381static void
382bnx2_enable_int(struct bnx2 *bp)
383{
Michael Chanb6016b72005-05-26 13:03:09 -0700384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
Michael Chanbf5295b2006-03-23 01:11:56 -0800391 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700392}
393
394static void
395bnx2_disable_int_sync(struct bnx2 *bp)
396{
397 atomic_inc(&bp->intr_sem);
398 bnx2_disable_int(bp);
399 synchronize_irq(bp->pdev->irq);
400}
401
402static void
403bnx2_netif_stop(struct bnx2 *bp)
404{
405 bnx2_disable_int_sync(bp);
406 if (netif_running(bp->dev)) {
407 netif_poll_disable(bp->dev);
408 netif_tx_disable(bp->dev);
409 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410 }
411}
412
413static void
414bnx2_netif_start(struct bnx2 *bp)
415{
416 if (atomic_dec_and_test(&bp->intr_sem)) {
417 if (netif_running(bp->dev)) {
418 netif_wake_queue(bp->dev);
419 netif_poll_enable(bp->dev);
420 bnx2_enable_int(bp);
421 }
422 }
423}
424
425static void
426bnx2_free_mem(struct bnx2 *bp)
427{
Michael Chan13daffa2006-03-20 17:49:20 -0800428 int i;
429
Michael Chan59b47d82006-11-19 14:10:45 -0800430 for (i = 0; i < bp->ctx_pages; i++) {
431 if (bp->ctx_blk[i]) {
432 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433 bp->ctx_blk[i],
434 bp->ctx_blk_mapping[i]);
435 bp->ctx_blk[i] = NULL;
436 }
437 }
Michael Chanb6016b72005-05-26 13:03:09 -0700438 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800439 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700440 bp->status_blk, bp->status_blk_mapping);
441 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800442 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700443 }
444 if (bp->tx_desc_ring) {
445 pci_free_consistent(bp->pdev,
446 sizeof(struct tx_bd) * TX_DESC_CNT,
447 bp->tx_desc_ring, bp->tx_desc_mapping);
448 bp->tx_desc_ring = NULL;
449 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400450 kfree(bp->tx_buf_ring);
451 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800452 for (i = 0; i < bp->rx_max_ring; i++) {
453 if (bp->rx_desc_ring[i])
454 pci_free_consistent(bp->pdev,
455 sizeof(struct rx_bd) * RX_DESC_CNT,
456 bp->rx_desc_ring[i],
457 bp->rx_desc_mapping[i]);
458 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700459 }
Michael Chan13daffa2006-03-20 17:49:20 -0800460 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400461 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700462}
463
464static int
465bnx2_alloc_mem(struct bnx2 *bp)
466{
Michael Chan0f31f992006-03-23 01:12:38 -0800467 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800468
Michael Chan0f31f992006-03-23 01:12:38 -0800469 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700471 if (bp->tx_buf_ring == NULL)
472 return -ENOMEM;
473
Michael Chanb6016b72005-05-26 13:03:09 -0700474 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475 sizeof(struct tx_bd) *
476 TX_DESC_CNT,
477 &bp->tx_desc_mapping);
478 if (bp->tx_desc_ring == NULL)
479 goto alloc_mem_err;
480
Michael Chan13daffa2006-03-20 17:49:20 -0800481 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700483 if (bp->rx_buf_ring == NULL)
484 goto alloc_mem_err;
485
Michael Chan13daffa2006-03-20 17:49:20 -0800486 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487 bp->rx_max_ring);
488
489 for (i = 0; i < bp->rx_max_ring; i++) {
490 bp->rx_desc_ring[i] =
491 pci_alloc_consistent(bp->pdev,
492 sizeof(struct rx_bd) * RX_DESC_CNT,
493 &bp->rx_desc_mapping[i]);
494 if (bp->rx_desc_ring[i] == NULL)
495 goto alloc_mem_err;
496
497 }
Michael Chanb6016b72005-05-26 13:03:09 -0700498
Michael Chan0f31f992006-03-23 01:12:38 -0800499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501 bp->status_stats_size = status_blk_size +
502 sizeof(struct statistics_block);
503
504 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700505 &bp->status_blk_mapping);
506 if (bp->status_blk == NULL)
507 goto alloc_mem_err;
508
Michael Chan0f31f992006-03-23 01:12:38 -0800509 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700510
Michael Chan0f31f992006-03-23 01:12:38 -0800511 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700513
Michael Chan0f31f992006-03-23 01:12:38 -0800514 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700515
Michael Chan59b47d82006-11-19 14:10:45 -0800516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518 if (bp->ctx_pages == 0)
519 bp->ctx_pages = 1;
520 for (i = 0; i < bp->ctx_pages; i++) {
521 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522 BCM_PAGE_SIZE,
523 &bp->ctx_blk_mapping[i]);
524 if (bp->ctx_blk[i] == NULL)
525 goto alloc_mem_err;
526 }
527 }
Michael Chanb6016b72005-05-26 13:03:09 -0700528 return 0;
529
530alloc_mem_err:
531 bnx2_free_mem(bp);
532 return -ENOMEM;
533}
534
535static void
Michael Chane3648b32005-11-04 08:51:21 -0800536bnx2_report_fw_link(struct bnx2 *bp)
537{
538 u32 fw_link_status = 0;
539
540 if (bp->link_up) {
541 u32 bmsr;
542
543 switch (bp->line_speed) {
544 case SPEED_10:
545 if (bp->duplex == DUPLEX_HALF)
546 fw_link_status = BNX2_LINK_STATUS_10HALF;
547 else
548 fw_link_status = BNX2_LINK_STATUS_10FULL;
549 break;
550 case SPEED_100:
551 if (bp->duplex == DUPLEX_HALF)
552 fw_link_status = BNX2_LINK_STATUS_100HALF;
553 else
554 fw_link_status = BNX2_LINK_STATUS_100FULL;
555 break;
556 case SPEED_1000:
557 if (bp->duplex == DUPLEX_HALF)
558 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559 else
560 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561 break;
562 case SPEED_2500:
563 if (bp->duplex == DUPLEX_HALF)
564 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565 else
566 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567 break;
568 }
569
570 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572 if (bp->autoneg) {
573 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
575 bnx2_read_phy(bp, MII_BMSR, &bmsr);
576 bnx2_read_phy(bp, MII_BMSR, &bmsr);
577
578 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581 else
582 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583 }
584 }
585 else
586 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589}
590
591static void
Michael Chanb6016b72005-05-26 13:03:09 -0700592bnx2_report_link(struct bnx2 *bp)
593{
594 if (bp->link_up) {
595 netif_carrier_on(bp->dev);
596 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598 printk("%d Mbps ", bp->line_speed);
599
600 if (bp->duplex == DUPLEX_FULL)
601 printk("full duplex");
602 else
603 printk("half duplex");
604
605 if (bp->flow_ctrl) {
606 if (bp->flow_ctrl & FLOW_CTRL_RX) {
607 printk(", receive ");
608 if (bp->flow_ctrl & FLOW_CTRL_TX)
609 printk("& transmit ");
610 }
611 else {
612 printk(", transmit ");
613 }
614 printk("flow control ON");
615 }
616 printk("\n");
617 }
618 else {
619 netif_carrier_off(bp->dev);
620 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621 }
Michael Chane3648b32005-11-04 08:51:21 -0800622
623 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700624}
625
626static void
627bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628{
629 u32 local_adv, remote_adv;
630
631 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400632 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700633 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635 if (bp->duplex == DUPLEX_FULL) {
636 bp->flow_ctrl = bp->req_flow_ctrl;
637 }
638 return;
639 }
640
641 if (bp->duplex != DUPLEX_FULL) {
642 return;
643 }
644
Michael Chan5b0c76a2005-11-04 08:45:49 -0800645 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647 u32 val;
648
649 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651 bp->flow_ctrl |= FLOW_CTRL_TX;
652 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653 bp->flow_ctrl |= FLOW_CTRL_RX;
654 return;
655 }
656
Michael Chanb6016b72005-05-26 13:03:09 -0700657 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
658 bnx2_read_phy(bp, MII_LPA, &remote_adv);
659
660 if (bp->phy_flags & PHY_SERDES_FLAG) {
661 u32 new_local_adv = 0;
662 u32 new_remote_adv = 0;
663
664 if (local_adv & ADVERTISE_1000XPAUSE)
665 new_local_adv |= ADVERTISE_PAUSE_CAP;
666 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667 new_local_adv |= ADVERTISE_PAUSE_ASYM;
668 if (remote_adv & ADVERTISE_1000XPAUSE)
669 new_remote_adv |= ADVERTISE_PAUSE_CAP;
670 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673 local_adv = new_local_adv;
674 remote_adv = new_remote_adv;
675 }
676
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv & ADVERTISE_PAUSE_CAP) {
679 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682 }
683 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684 bp->flow_ctrl = FLOW_CTRL_RX;
685 }
686 }
687 else {
688 if (remote_adv & ADVERTISE_PAUSE_CAP) {
689 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690 }
691 }
692 }
693 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697 bp->flow_ctrl = FLOW_CTRL_TX;
698 }
699 }
700}
701
702static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800703bnx2_5708s_linkup(struct bnx2 *bp)
704{
705 u32 val;
706
707 bp->link_up = 1;
708 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710 case BCM5708S_1000X_STAT1_SPEED_10:
711 bp->line_speed = SPEED_10;
712 break;
713 case BCM5708S_1000X_STAT1_SPEED_100:
714 bp->line_speed = SPEED_100;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_1G:
717 bp->line_speed = SPEED_1000;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_2G5:
720 bp->line_speed = SPEED_2500;
721 break;
722 }
723 if (val & BCM5708S_1000X_STAT1_FD)
724 bp->duplex = DUPLEX_FULL;
725 else
726 bp->duplex = DUPLEX_HALF;
727
728 return 0;
729}
730
731static int
732bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700733{
734 u32 bmcr, local_adv, remote_adv, common;
735
736 bp->link_up = 1;
737 bp->line_speed = SPEED_1000;
738
739 bnx2_read_phy(bp, MII_BMCR, &bmcr);
740 if (bmcr & BMCR_FULLDPLX) {
741 bp->duplex = DUPLEX_FULL;
742 }
743 else {
744 bp->duplex = DUPLEX_HALF;
745 }
746
747 if (!(bmcr & BMCR_ANENABLE)) {
748 return 0;
749 }
750
751 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
752 bnx2_read_phy(bp, MII_LPA, &remote_adv);
753
754 common = local_adv & remote_adv;
755 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757 if (common & ADVERTISE_1000XFULL) {
758 bp->duplex = DUPLEX_FULL;
759 }
760 else {
761 bp->duplex = DUPLEX_HALF;
762 }
763 }
764
765 return 0;
766}
767
768static int
769bnx2_copper_linkup(struct bnx2 *bp)
770{
771 u32 bmcr;
772
773 bnx2_read_phy(bp, MII_BMCR, &bmcr);
774 if (bmcr & BMCR_ANENABLE) {
775 u32 local_adv, remote_adv, common;
776
777 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780 common = local_adv & (remote_adv >> 2);
781 if (common & ADVERTISE_1000FULL) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_FULL;
784 }
785 else if (common & ADVERTISE_1000HALF) {
786 bp->line_speed = SPEED_1000;
787 bp->duplex = DUPLEX_HALF;
788 }
789 else {
790 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
791 bnx2_read_phy(bp, MII_LPA, &remote_adv);
792
793 common = local_adv & remote_adv;
794 if (common & ADVERTISE_100FULL) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_FULL;
797 }
798 else if (common & ADVERTISE_100HALF) {
799 bp->line_speed = SPEED_100;
800 bp->duplex = DUPLEX_HALF;
801 }
802 else if (common & ADVERTISE_10FULL) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_FULL;
805 }
806 else if (common & ADVERTISE_10HALF) {
807 bp->line_speed = SPEED_10;
808 bp->duplex = DUPLEX_HALF;
809 }
810 else {
811 bp->line_speed = 0;
812 bp->link_up = 0;
813 }
814 }
815 }
816 else {
817 if (bmcr & BMCR_SPEED100) {
818 bp->line_speed = SPEED_100;
819 }
820 else {
821 bp->line_speed = SPEED_10;
822 }
823 if (bmcr & BMCR_FULLDPLX) {
824 bp->duplex = DUPLEX_FULL;
825 }
826 else {
827 bp->duplex = DUPLEX_HALF;
828 }
829 }
830
831 return 0;
832}
833
834static int
835bnx2_set_mac_link(struct bnx2 *bp)
836{
837 u32 val;
838
839 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841 (bp->duplex == DUPLEX_HALF)) {
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843 }
844
845 /* Configure the EMAC mode register. */
846 val = REG_RD(bp, BNX2_EMAC_MODE);
847
848 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800849 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800850 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700851
852 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800853 switch (bp->line_speed) {
854 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800855 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800857 break;
858 }
859 /* fall through */
860 case SPEED_100:
861 val |= BNX2_EMAC_MODE_PORT_MII;
862 break;
863 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800864 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800865 /* fall through */
866 case SPEED_1000:
867 val |= BNX2_EMAC_MODE_PORT_GMII;
868 break;
869 }
Michael Chanb6016b72005-05-26 13:03:09 -0700870 }
871 else {
872 val |= BNX2_EMAC_MODE_PORT_GMII;
873 }
874
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp->duplex == DUPLEX_HALF)
877 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878 REG_WR(bp, BNX2_EMAC_MODE, val);
879
880 /* Enable/disable rx PAUSE. */
881 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883 if (bp->flow_ctrl & FLOW_CTRL_RX)
884 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887 /* Enable/disable tx PAUSE. */
888 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891 if (bp->flow_ctrl & FLOW_CTRL_TX)
892 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895 /* Acknowledge the interrupt. */
896 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898 return 0;
899}
900
901static int
902bnx2_set_link(struct bnx2 *bp)
903{
904 u32 bmsr;
905 u8 link_up;
906
Michael Chan80be4432006-11-19 14:07:28 -0800907 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700908 bp->link_up = 1;
909 return 0;
910 }
911
912 link_up = bp->link_up;
913
914 bnx2_read_phy(bp, MII_BMSR, &bmsr);
915 bnx2_read_phy(bp, MII_BMSR, &bmsr);
916
917 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919 u32 val;
920
921 val = REG_RD(bp, BNX2_EMAC_STATUS);
922 if (val & BNX2_EMAC_STATUS_LINK)
923 bmsr |= BMSR_LSTATUS;
924 else
925 bmsr &= ~BMSR_LSTATUS;
926 }
927
928 if (bmsr & BMSR_LSTATUS) {
929 bp->link_up = 1;
930
931 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800932 if (CHIP_NUM(bp) == CHIP_NUM_5706)
933 bnx2_5706s_linkup(bp);
934 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700936 }
937 else {
938 bnx2_copper_linkup(bp);
939 }
940 bnx2_resolve_flow_ctrl(bp);
941 }
942 else {
943 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944 (bp->autoneg & AUTONEG_SPEED)) {
945
946 u32 bmcr;
947
948 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800949 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700950 if (!(bmcr & BMCR_ANENABLE)) {
951 bnx2_write_phy(bp, MII_BMCR, bmcr |
952 BMCR_ANENABLE);
953 }
954 }
955 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956 bp->link_up = 0;
957 }
958
959 if (bp->link_up != link_up) {
960 bnx2_report_link(bp);
961 }
962
963 bnx2_set_mac_link(bp);
964
965 return 0;
966}
967
968static int
969bnx2_reset_phy(struct bnx2 *bp)
970{
971 int i;
972 u32 reg;
973
974 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
975
976#define PHY_RESET_MAX_WAIT 100
977 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978 udelay(10);
979
980 bnx2_read_phy(bp, MII_BMCR, &reg);
981 if (!(reg & BMCR_RESET)) {
982 udelay(20);
983 break;
984 }
985 }
986 if (i == PHY_RESET_MAX_WAIT) {
987 return -EBUSY;
988 }
989 return 0;
990}
991
992static u32
993bnx2_phy_get_pause_adv(struct bnx2 *bp)
994{
995 u32 adv = 0;
996
997 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
999
1000 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001 adv = ADVERTISE_1000XPAUSE;
1002 }
1003 else {
1004 adv = ADVERTISE_PAUSE_CAP;
1005 }
1006 }
1007 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009 adv = ADVERTISE_1000XPSE_ASYM;
1010 }
1011 else {
1012 adv = ADVERTISE_PAUSE_ASYM;
1013 }
1014 }
1015 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1018 }
1019 else {
1020 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1021 }
1022 }
1023 return adv;
1024}
1025
1026static int
1027bnx2_setup_serdes_phy(struct bnx2 *bp)
1028{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001029 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001030 u32 new_adv = 0;
1031
1032 if (!(bp->autoneg & AUTONEG_SPEED)) {
1033 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001034 int force_link_down = 0;
1035
Michael Chan80be4432006-11-19 14:07:28 -08001036 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1037 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1038
1039 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1040 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041 new_bmcr |= BMCR_SPEED1000;
1042 if (bp->req_line_speed == SPEED_2500) {
1043 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 if (!(up1 & BCM5708S_UP1_2G5)) {
1046 up1 |= BCM5708S_UP1_2G5;
1047 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048 force_link_down = 1;
1049 }
1050 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001051 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052 if (up1 & BCM5708S_UP1_2G5) {
1053 up1 &= ~BCM5708S_UP1_2G5;
1054 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055 force_link_down = 1;
1056 }
1057 }
1058
Michael Chanb6016b72005-05-26 13:03:09 -07001059 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001060 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001061 new_bmcr |= BMCR_FULLDPLX;
1062 }
1063 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001064 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001065 new_bmcr &= ~BMCR_FULLDPLX;
1066 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001067 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001068 /* Force a link down visible on the other side */
1069 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001070 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1071 ~(ADVERTISE_1000XFULL |
1072 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001073 bnx2_write_phy(bp, MII_BMCR, bmcr |
1074 BMCR_ANRESTART | BMCR_ANENABLE);
1075
1076 bp->link_up = 0;
1077 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001078 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001079 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001080 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001081 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001082 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1083 }
1084 return 0;
1085 }
1086
Michael Chan5b0c76a2005-11-04 08:45:49 -08001087 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089 up1 |= BCM5708S_UP1_2G5;
1090 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1091 }
1092
Michael Chanb6016b72005-05-26 13:03:09 -07001093 if (bp->advertising & ADVERTISED_1000baseT_Full)
1094 new_adv |= ADVERTISE_1000XFULL;
1095
1096 new_adv |= bnx2_phy_get_pause_adv(bp);
1097
1098 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101 bp->serdes_an_pending = 0;
1102 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103 /* Force a link down visible on the other side */
1104 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001105 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001106 spin_unlock_bh(&bp->phy_lock);
1107 msleep(20);
1108 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001109 }
1110
1111 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1112 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1113 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001114 /* Speed up link-up time when the link partner
1115 * does not autonegotiate which is very common
1116 * in blade servers. Some blade servers use
1117 * IPMI for kerboard input and it's important
1118 * to minimize link disruptions. Autoneg. involves
1119 * exchanging base pages plus 3 next pages and
1120 * normally completes in about 120 msec.
1121 */
1122 bp->current_interval = SERDES_AN_TIMEOUT;
1123 bp->serdes_an_pending = 1;
1124 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001125 }
1126
1127 return 0;
1128}
1129
1130#define ETHTOOL_ALL_FIBRE_SPEED \
1131 (ADVERTISED_1000baseT_Full)
1132
1133#define ETHTOOL_ALL_COPPER_SPEED \
1134 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1135 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1136 ADVERTISED_1000baseT_Full)
1137
1138#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001140
Michael Chanb6016b72005-05-26 13:03:09 -07001141#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1142
1143static int
1144bnx2_setup_copper_phy(struct bnx2 *bp)
1145{
1146 u32 bmcr;
1147 u32 new_bmcr;
1148
1149 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1150
1151 if (bp->autoneg & AUTONEG_SPEED) {
1152 u32 adv_reg, adv1000_reg;
1153 u32 new_adv_reg = 0;
1154 u32 new_adv1000_reg = 0;
1155
1156 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1157 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158 ADVERTISE_PAUSE_ASYM);
1159
1160 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161 adv1000_reg &= PHY_ALL_1000_SPEED;
1162
1163 if (bp->advertising & ADVERTISED_10baseT_Half)
1164 new_adv_reg |= ADVERTISE_10HALF;
1165 if (bp->advertising & ADVERTISED_10baseT_Full)
1166 new_adv_reg |= ADVERTISE_10FULL;
1167 if (bp->advertising & ADVERTISED_100baseT_Half)
1168 new_adv_reg |= ADVERTISE_100HALF;
1169 if (bp->advertising & ADVERTISED_100baseT_Full)
1170 new_adv_reg |= ADVERTISE_100FULL;
1171 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001173
Michael Chanb6016b72005-05-26 13:03:09 -07001174 new_adv_reg |= ADVERTISE_CSMA;
1175
1176 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1177
1178 if ((adv1000_reg != new_adv1000_reg) ||
1179 (adv_reg != new_adv_reg) ||
1180 ((bmcr & BMCR_ANENABLE) == 0)) {
1181
1182 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1183 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1184 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1185 BMCR_ANENABLE);
1186 }
1187 else if (bp->link_up) {
1188 /* Flow ctrl may have changed from auto to forced */
1189 /* or vice-versa. */
1190
1191 bnx2_resolve_flow_ctrl(bp);
1192 bnx2_set_mac_link(bp);
1193 }
1194 return 0;
1195 }
1196
1197 new_bmcr = 0;
1198 if (bp->req_line_speed == SPEED_100) {
1199 new_bmcr |= BMCR_SPEED100;
1200 }
1201 if (bp->req_duplex == DUPLEX_FULL) {
1202 new_bmcr |= BMCR_FULLDPLX;
1203 }
1204 if (new_bmcr != bmcr) {
1205 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001206
1207 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001209
Michael Chanb6016b72005-05-26 13:03:09 -07001210 if (bmsr & BMSR_LSTATUS) {
1211 /* Force link down */
1212 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001213 spin_unlock_bh(&bp->phy_lock);
1214 msleep(50);
1215 spin_lock_bh(&bp->phy_lock);
1216
1217 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1218 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001219 }
1220
1221 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1222
1223 /* Normally, the new speed is setup after the link has
1224 * gone down and up again. In some cases, link will not go
1225 * down so we need to set up the new speed here.
1226 */
1227 if (bmsr & BMSR_LSTATUS) {
1228 bp->line_speed = bp->req_line_speed;
1229 bp->duplex = bp->req_duplex;
1230 bnx2_resolve_flow_ctrl(bp);
1231 bnx2_set_mac_link(bp);
1232 }
1233 }
1234 return 0;
1235}
1236
1237static int
1238bnx2_setup_phy(struct bnx2 *bp)
1239{
1240 if (bp->loopback == MAC_LOOPBACK)
1241 return 0;
1242
1243 if (bp->phy_flags & PHY_SERDES_FLAG) {
1244 return (bnx2_setup_serdes_phy(bp));
1245 }
1246 else {
1247 return (bnx2_setup_copper_phy(bp));
1248 }
1249}
1250
1251static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001252bnx2_init_5708s_phy(struct bnx2 *bp)
1253{
1254 u32 val;
1255
1256 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1259
1260 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1263
1264 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1267
1268 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270 val |= BCM5708S_UP1_2G5;
1271 bnx2_write_phy(bp, BCM5708S_UP1, val);
1272 }
1273
1274 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001275 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001277 /* increase tx signal amplitude */
1278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279 BCM5708S_BLK_ADDR_TX_MISC);
1280 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1284 }
1285
Michael Chane3648b32005-11-04 08:51:21 -08001286 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001287 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288
1289 if (val) {
1290 u32 is_backplane;
1291
Michael Chane3648b32005-11-04 08:51:21 -08001292 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001293 BNX2_SHARED_HW_CFG_CONFIG);
1294 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296 BCM5708S_BLK_ADDR_TX_MISC);
1297 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_DIG);
1300 }
1301 }
1302 return 0;
1303}
1304
1305static int
1306bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001307{
1308 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1309
Michael Chan59b47d82006-11-19 14:10:45 -08001310 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001312
1313 if (bp->dev->mtu > 1500) {
1314 u32 val;
1315
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1320
1321 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322 bnx2_read_phy(bp, 0x1c, &val);
1323 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324 }
1325 else {
1326 u32 val;
1327
1328 bnx2_write_phy(bp, 0x18, 0x7);
1329 bnx2_read_phy(bp, 0x18, &val);
1330 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333 bnx2_read_phy(bp, 0x1c, &val);
1334 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1335 }
1336
1337 return 0;
1338}
1339
1340static int
1341bnx2_init_copper_phy(struct bnx2 *bp)
1342{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001343 u32 val;
1344
Michael Chanb6016b72005-05-26 13:03:09 -07001345 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346 bnx2_write_phy(bp, 0x18, 0x0c00);
1347 bnx2_write_phy(bp, 0x17, 0x000a);
1348 bnx2_write_phy(bp, 0x15, 0x310b);
1349 bnx2_write_phy(bp, 0x17, 0x201f);
1350 bnx2_write_phy(bp, 0x15, 0x9506);
1351 bnx2_write_phy(bp, 0x17, 0x401f);
1352 bnx2_write_phy(bp, 0x15, 0x14e2);
1353 bnx2_write_phy(bp, 0x18, 0x0400);
1354 }
1355
Michael Chanb659f442007-02-02 00:46:35 -08001356 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358 MII_BNX2_DSP_EXPAND_REG | 0x8);
1359 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360 val &= ~(1 << 8);
1361 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362 }
1363
Michael Chanb6016b72005-05-26 13:03:09 -07001364 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001365 /* Set extended packet length bit */
1366 bnx2_write_phy(bp, 0x18, 0x7);
1367 bnx2_read_phy(bp, 0x18, &val);
1368 bnx2_write_phy(bp, 0x18, val | 0x4000);
1369
1370 bnx2_read_phy(bp, 0x10, &val);
1371 bnx2_write_phy(bp, 0x10, val | 0x1);
1372 }
1373 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001374 bnx2_write_phy(bp, 0x18, 0x7);
1375 bnx2_read_phy(bp, 0x18, &val);
1376 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1377
1378 bnx2_read_phy(bp, 0x10, &val);
1379 bnx2_write_phy(bp, 0x10, val & ~0x1);
1380 }
1381
Michael Chan5b0c76a2005-11-04 08:45:49 -08001382 /* ethernet@wirespeed */
1383 bnx2_write_phy(bp, 0x18, 0x7007);
1384 bnx2_read_phy(bp, 0x18, &val);
1385 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001386 return 0;
1387}
1388
1389
1390static int
1391bnx2_init_phy(struct bnx2 *bp)
1392{
1393 u32 val;
1394 int rc = 0;
1395
1396 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1398
1399 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1400
1401 bnx2_reset_phy(bp);
1402
1403 bnx2_read_phy(bp, MII_PHYSID1, &val);
1404 bp->phy_id = val << 16;
1405 bnx2_read_phy(bp, MII_PHYSID2, &val);
1406 bp->phy_id |= val & 0xffff;
1407
1408 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001409 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1410 rc = bnx2_init_5706s_phy(bp);
1411 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1412 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001413 }
1414 else {
1415 rc = bnx2_init_copper_phy(bp);
1416 }
1417
1418 bnx2_setup_phy(bp);
1419
1420 return rc;
1421}
1422
1423static int
1424bnx2_set_mac_loopback(struct bnx2 *bp)
1425{
1426 u32 mac_mode;
1427
1428 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1429 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1430 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1431 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1432 bp->link_up = 1;
1433 return 0;
1434}
1435
Michael Chanbc5a0692006-01-23 16:13:22 -08001436static int bnx2_test_link(struct bnx2 *);
1437
1438static int
1439bnx2_set_phy_loopback(struct bnx2 *bp)
1440{
1441 u32 mac_mode;
1442 int rc, i;
1443
1444 spin_lock_bh(&bp->phy_lock);
1445 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1446 BMCR_SPEED1000);
1447 spin_unlock_bh(&bp->phy_lock);
1448 if (rc)
1449 return rc;
1450
1451 for (i = 0; i < 10; i++) {
1452 if (bnx2_test_link(bp) == 0)
1453 break;
Michael Chan80be4432006-11-19 14:07:28 -08001454 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001455 }
1456
1457 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1458 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1459 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001460 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001461
1462 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1463 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464 bp->link_up = 1;
1465 return 0;
1466}
1467
Michael Chanb6016b72005-05-26 13:03:09 -07001468static int
Michael Chanb090ae22006-01-23 16:07:10 -08001469bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001470{
1471 int i;
1472 u32 val;
1473
Michael Chanb6016b72005-05-26 13:03:09 -07001474 bp->fw_wr_seq++;
1475 msg_data |= bp->fw_wr_seq;
1476
Michael Chane3648b32005-11-04 08:51:21 -08001477 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001478
1479 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001480 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1481 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001482
Michael Chane3648b32005-11-04 08:51:21 -08001483 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001484
1485 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1486 break;
1487 }
Michael Chanb090ae22006-01-23 16:07:10 -08001488 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1489 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001490
1491 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001492 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1493 if (!silent)
1494 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1495 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001496
1497 msg_data &= ~BNX2_DRV_MSG_CODE;
1498 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1499
Michael Chane3648b32005-11-04 08:51:21 -08001500 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001501
Michael Chanb6016b72005-05-26 13:03:09 -07001502 return -EBUSY;
1503 }
1504
Michael Chanb090ae22006-01-23 16:07:10 -08001505 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1506 return -EIO;
1507
Michael Chanb6016b72005-05-26 13:03:09 -07001508 return 0;
1509}
1510
Michael Chan59b47d82006-11-19 14:10:45 -08001511static int
1512bnx2_init_5709_context(struct bnx2 *bp)
1513{
1514 int i, ret = 0;
1515 u32 val;
1516
1517 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1518 val |= (BCM_PAGE_BITS - 8) << 16;
1519 REG_WR(bp, BNX2_CTX_COMMAND, val);
1520 for (i = 0; i < bp->ctx_pages; i++) {
1521 int j;
1522
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1524 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1527 (u64) bp->ctx_blk_mapping[i] >> 32);
1528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1530 for (j = 0; j < 10; j++) {
1531
1532 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1533 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534 break;
1535 udelay(5);
1536 }
1537 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1538 ret = -EBUSY;
1539 break;
1540 }
1541 }
1542 return ret;
1543}
1544
Michael Chanb6016b72005-05-26 13:03:09 -07001545static void
1546bnx2_init_context(struct bnx2 *bp)
1547{
1548 u32 vcid;
1549
1550 vcid = 96;
1551 while (vcid) {
1552 u32 vcid_addr, pcid_addr, offset;
1553
1554 vcid--;
1555
1556 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1557 u32 new_vcid;
1558
1559 vcid_addr = GET_PCID_ADDR(vcid);
1560 if (vcid & 0x8) {
1561 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1562 }
1563 else {
1564 new_vcid = vcid;
1565 }
1566 pcid_addr = GET_PCID_ADDR(new_vcid);
1567 }
1568 else {
1569 vcid_addr = GET_CID_ADDR(vcid);
1570 pcid_addr = vcid_addr;
1571 }
1572
1573 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1574 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1575
1576 /* Zero out the context. */
1577 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1578 CTX_WR(bp, 0x00, offset, 0);
1579 }
1580
1581 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1582 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1583 }
1584}
1585
1586static int
1587bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1588{
1589 u16 *good_mbuf;
1590 u32 good_mbuf_cnt;
1591 u32 val;
1592
1593 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1594 if (good_mbuf == NULL) {
1595 printk(KERN_ERR PFX "Failed to allocate memory in "
1596 "bnx2_alloc_bad_rbuf\n");
1597 return -ENOMEM;
1598 }
1599
1600 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1601 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1602
1603 good_mbuf_cnt = 0;
1604
1605 /* Allocate a bunch of mbufs and save the good ones in an array. */
1606 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1608 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1609
1610 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1611
1612 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1613
1614 /* The addresses with Bit 9 set are bad memory blocks. */
1615 if (!(val & (1 << 9))) {
1616 good_mbuf[good_mbuf_cnt] = (u16) val;
1617 good_mbuf_cnt++;
1618 }
1619
1620 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1621 }
1622
1623 /* Free the good ones back to the mbuf pool thus discarding
1624 * all the bad ones. */
1625 while (good_mbuf_cnt) {
1626 good_mbuf_cnt--;
1627
1628 val = good_mbuf[good_mbuf_cnt];
1629 val = (val << 9) | val | 1;
1630
1631 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1632 }
1633 kfree(good_mbuf);
1634 return 0;
1635}
1636
1637static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001638bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001639{
1640 u32 val;
1641 u8 *mac_addr = bp->dev->dev_addr;
1642
1643 val = (mac_addr[0] << 8) | mac_addr[1];
1644
1645 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1646
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001647 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001648 (mac_addr[4] << 8) | mac_addr[5];
1649
1650 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1651}
1652
1653static inline int
1654bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1655{
1656 struct sk_buff *skb;
1657 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1658 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001659 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001660 unsigned long align;
1661
Michael Chan932f3772006-08-15 01:39:36 -07001662 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001663 if (skb == NULL) {
1664 return -ENOMEM;
1665 }
1666
Michael Chan59b47d82006-11-19 14:10:45 -08001667 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1668 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001669
Michael Chanb6016b72005-05-26 13:03:09 -07001670 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1671 PCI_DMA_FROMDEVICE);
1672
1673 rx_buf->skb = skb;
1674 pci_unmap_addr_set(rx_buf, mapping, mapping);
1675
1676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1678
1679 bp->rx_prod_bseq += bp->rx_buf_use_size;
1680
1681 return 0;
1682}
1683
1684static void
1685bnx2_phy_int(struct bnx2 *bp)
1686{
1687 u32 new_link_state, old_link_state;
1688
1689 new_link_state = bp->status_blk->status_attn_bits &
1690 STATUS_ATTN_BITS_LINK_STATE;
1691 old_link_state = bp->status_blk->status_attn_bits_ack &
1692 STATUS_ATTN_BITS_LINK_STATE;
1693 if (new_link_state != old_link_state) {
1694 if (new_link_state) {
1695 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1696 STATUS_ATTN_BITS_LINK_STATE);
1697 }
1698 else {
1699 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1700 STATUS_ATTN_BITS_LINK_STATE);
1701 }
1702 bnx2_set_link(bp);
1703 }
1704}
1705
1706static void
1707bnx2_tx_int(struct bnx2 *bp)
1708{
Michael Chanf4e418f2005-11-04 08:53:48 -08001709 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001710 u16 hw_cons, sw_cons, sw_ring_cons;
1711 int tx_free_bd = 0;
1712
Michael Chanf4e418f2005-11-04 08:53:48 -08001713 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001714 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1715 hw_cons++;
1716 }
1717 sw_cons = bp->tx_cons;
1718
1719 while (sw_cons != hw_cons) {
1720 struct sw_bd *tx_buf;
1721 struct sk_buff *skb;
1722 int i, last;
1723
1724 sw_ring_cons = TX_RING_IDX(sw_cons);
1725
1726 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1727 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001728
Michael Chanb6016b72005-05-26 13:03:09 -07001729 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001730 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001731 u16 last_idx, last_ring_idx;
1732
1733 last_idx = sw_cons +
1734 skb_shinfo(skb)->nr_frags + 1;
1735 last_ring_idx = sw_ring_cons +
1736 skb_shinfo(skb)->nr_frags + 1;
1737 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1738 last_idx++;
1739 }
1740 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1741 break;
1742 }
1743 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001744
Michael Chanb6016b72005-05-26 13:03:09 -07001745 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1746 skb_headlen(skb), PCI_DMA_TODEVICE);
1747
1748 tx_buf->skb = NULL;
1749 last = skb_shinfo(skb)->nr_frags;
1750
1751 for (i = 0; i < last; i++) {
1752 sw_cons = NEXT_TX_BD(sw_cons);
1753
1754 pci_unmap_page(bp->pdev,
1755 pci_unmap_addr(
1756 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1757 mapping),
1758 skb_shinfo(skb)->frags[i].size,
1759 PCI_DMA_TODEVICE);
1760 }
1761
1762 sw_cons = NEXT_TX_BD(sw_cons);
1763
1764 tx_free_bd += last + 1;
1765
Michael Chan745720e2006-06-29 12:37:41 -07001766 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001767
Michael Chanf4e418f2005-11-04 08:53:48 -08001768 hw_cons = bp->hw_tx_cons =
1769 sblk->status_tx_quick_consumer_index0;
1770
Michael Chanb6016b72005-05-26 13:03:09 -07001771 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1772 hw_cons++;
1773 }
1774 }
1775
Michael Chane89bbf12005-08-25 15:36:58 -07001776 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001777 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778 * before checking for netif_queue_stopped(). Without the
1779 * memory barrier, there is a small possibility that bnx2_start_xmit()
1780 * will miss it and cause the queue to be stopped forever.
1781 */
1782 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001783
Michael Chan2f8af122006-08-15 01:39:10 -07001784 if (unlikely(netif_queue_stopped(bp->dev)) &&
1785 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1786 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001787 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001788 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001789 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001790 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001791 }
Michael Chanb6016b72005-05-26 13:03:09 -07001792}
1793
1794static inline void
1795bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1796 u16 cons, u16 prod)
1797{
Michael Chan236b6392006-03-20 17:49:02 -08001798 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1799 struct rx_bd *cons_bd, *prod_bd;
1800
1801 cons_rx_buf = &bp->rx_buf_ring[cons];
1802 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001803
1804 pci_dma_sync_single_for_device(bp->pdev,
1805 pci_unmap_addr(cons_rx_buf, mapping),
1806 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1807
Michael Chan236b6392006-03-20 17:49:02 -08001808 bp->rx_prod_bseq += bp->rx_buf_use_size;
1809
1810 prod_rx_buf->skb = skb;
1811
1812 if (cons == prod)
1813 return;
1814
Michael Chanb6016b72005-05-26 13:03:09 -07001815 pci_unmap_addr_set(prod_rx_buf, mapping,
1816 pci_unmap_addr(cons_rx_buf, mapping));
1817
Michael Chan3fdfcc22006-03-20 17:49:49 -08001818 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1819 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001820 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1821 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001822}
1823
1824static int
1825bnx2_rx_int(struct bnx2 *bp, int budget)
1826{
Michael Chanf4e418f2005-11-04 08:53:48 -08001827 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001828 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1829 struct l2_fhdr *rx_hdr;
1830 int rx_pkt = 0;
1831
Michael Chanf4e418f2005-11-04 08:53:48 -08001832 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001833 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1834 hw_cons++;
1835 }
1836 sw_cons = bp->rx_cons;
1837 sw_prod = bp->rx_prod;
1838
1839 /* Memory barrier necessary as speculative reads of the rx
1840 * buffer can be ahead of the index in the status block
1841 */
1842 rmb();
1843 while (sw_cons != hw_cons) {
1844 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001845 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001846 struct sw_bd *rx_buf;
1847 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001848 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001849
1850 sw_ring_cons = RX_RING_IDX(sw_cons);
1851 sw_ring_prod = RX_RING_IDX(sw_prod);
1852
1853 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1854 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001855
1856 rx_buf->skb = NULL;
1857
1858 dma_addr = pci_unmap_addr(rx_buf, mapping);
1859
1860 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001861 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1862
1863 rx_hdr = (struct l2_fhdr *) skb->data;
1864 len = rx_hdr->l2_fhdr_pkt_len - 4;
1865
Michael Chanade2bfe2006-01-23 16:09:51 -08001866 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001867 (L2_FHDR_ERRORS_BAD_CRC |
1868 L2_FHDR_ERRORS_PHY_DECODE |
1869 L2_FHDR_ERRORS_ALIGNMENT |
1870 L2_FHDR_ERRORS_TOO_SHORT |
1871 L2_FHDR_ERRORS_GIANT_FRAME)) {
1872
1873 goto reuse_rx;
1874 }
1875
1876 /* Since we don't have a jumbo ring, copy small packets
1877 * if mtu > 1500
1878 */
1879 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1880 struct sk_buff *new_skb;
1881
Michael Chan932f3772006-08-15 01:39:36 -07001882 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001883 if (new_skb == NULL)
1884 goto reuse_rx;
1885
1886 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001887 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1888 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001889 skb_reserve(new_skb, 2);
1890 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001891
1892 bnx2_reuse_rx_skb(bp, skb,
1893 sw_ring_cons, sw_ring_prod);
1894
1895 skb = new_skb;
1896 }
1897 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001898 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001899 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1900
1901 skb_reserve(skb, bp->rx_offset);
1902 skb_put(skb, len);
1903 }
1904 else {
1905reuse_rx:
1906 bnx2_reuse_rx_skb(bp, skb,
1907 sw_ring_cons, sw_ring_prod);
1908 goto next_rx;
1909 }
1910
1911 skb->protocol = eth_type_trans(skb, bp->dev);
1912
1913 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001914 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001915
Michael Chan745720e2006-06-29 12:37:41 -07001916 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001917 goto next_rx;
1918
1919 }
1920
Michael Chanb6016b72005-05-26 13:03:09 -07001921 skb->ip_summed = CHECKSUM_NONE;
1922 if (bp->rx_csum &&
1923 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1924 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1925
Michael Chanade2bfe2006-01-23 16:09:51 -08001926 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1927 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001928 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929 }
1930
1931#ifdef BCM_VLAN
1932 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1933 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1934 rx_hdr->l2_fhdr_vlan_tag);
1935 }
1936 else
1937#endif
1938 netif_receive_skb(skb);
1939
1940 bp->dev->last_rx = jiffies;
1941 rx_pkt++;
1942
1943next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001944 sw_cons = NEXT_RX_BD(sw_cons);
1945 sw_prod = NEXT_RX_BD(sw_prod);
1946
1947 if ((rx_pkt == budget))
1948 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001949
1950 /* Refresh hw_cons to see if there is new work */
1951 if (sw_cons == hw_cons) {
1952 hw_cons = bp->hw_rx_cons =
1953 sblk->status_rx_quick_consumer_index0;
1954 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1955 hw_cons++;
1956 rmb();
1957 }
Michael Chanb6016b72005-05-26 13:03:09 -07001958 }
1959 bp->rx_cons = sw_cons;
1960 bp->rx_prod = sw_prod;
1961
1962 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1963
1964 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1965
1966 mmiowb();
1967
1968 return rx_pkt;
1969
1970}
1971
1972/* MSI ISR - The only difference between this and the INTx ISR
1973 * is that the MSI interrupt is always serviced.
1974 */
1975static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001976bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001977{
1978 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001979 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001980
Michael Chanc921e4c2005-09-08 13:15:32 -07001981 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001982 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1983 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1984 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1985
1986 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001987 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1988 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001989
Michael Chan73eef4c2005-08-25 15:39:15 -07001990 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001991
Michael Chan73eef4c2005-08-25 15:39:15 -07001992 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001993}
1994
1995static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001996bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001997{
1998 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001999 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002000
2001 /* When using INTx, it is possible for the interrupt to arrive
2002 * at the CPU before the status block posted prior to the
2003 * interrupt. Reading a register will flush the status block.
2004 * When using MSI, the MSI message will always complete after
2005 * the status block write.
2006 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002007 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002008 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2009 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002010 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002011
2012 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2013 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2014 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2015
2016 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002017 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2018 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002019
Michael Chan73eef4c2005-08-25 15:39:15 -07002020 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002021
Michael Chan73eef4c2005-08-25 15:39:15 -07002022 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002023}
2024
Michael Chanf4e418f2005-11-04 08:53:48 -08002025static inline int
2026bnx2_has_work(struct bnx2 *bp)
2027{
2028 struct status_block *sblk = bp->status_blk;
2029
2030 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2031 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2032 return 1;
2033
Michael Chandb8b2252007-03-28 14:17:36 -07002034 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2035 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
Michael Chanf4e418f2005-11-04 08:53:48 -08002036 return 1;
2037
2038 return 0;
2039}
2040
Michael Chanb6016b72005-05-26 13:03:09 -07002041static int
2042bnx2_poll(struct net_device *dev, int *budget)
2043{
Michael Chan972ec0d2006-01-23 16:12:43 -08002044 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002045
Michael Chanb6016b72005-05-26 13:03:09 -07002046 if ((bp->status_blk->status_attn_bits &
2047 STATUS_ATTN_BITS_LINK_STATE) !=
2048 (bp->status_blk->status_attn_bits_ack &
2049 STATUS_ATTN_BITS_LINK_STATE)) {
2050
Michael Chanc770a652005-08-25 15:38:39 -07002051 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002052 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002053 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002054
2055 /* This is needed to take care of transient status
2056 * during link changes.
2057 */
2058 REG_WR(bp, BNX2_HC_COMMAND,
2059 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2060 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002061 }
2062
Michael Chanf4e418f2005-11-04 08:53:48 -08002063 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002064 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002065
Michael Chanf4e418f2005-11-04 08:53:48 -08002066 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002067 int orig_budget = *budget;
2068 int work_done;
2069
2070 if (orig_budget > dev->quota)
2071 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002072
Michael Chanb6016b72005-05-26 13:03:09 -07002073 work_done = bnx2_rx_int(bp, orig_budget);
2074 *budget -= work_done;
2075 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002076 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002077
Michael Chanf4e418f2005-11-04 08:53:48 -08002078 bp->last_status_idx = bp->status_blk->status_idx;
2079 rmb();
2080
2081 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002082 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002083 if (likely(bp->flags & USING_MSI_FLAG)) {
2084 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2085 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2086 bp->last_status_idx);
2087 return 0;
2088 }
Michael Chanb6016b72005-05-26 13:03:09 -07002089 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002090 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2091 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2092 bp->last_status_idx);
2093
2094 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2095 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2096 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002097 return 0;
2098 }
2099
2100 return 1;
2101}
2102
Herbert Xu932ff272006-06-09 12:20:56 -07002103/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002104 * from set_multicast.
2105 */
2106static void
2107bnx2_set_rx_mode(struct net_device *dev)
2108{
Michael Chan972ec0d2006-01-23 16:12:43 -08002109 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002110 u32 rx_mode, sort_mode;
2111 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002112
Michael Chanc770a652005-08-25 15:38:39 -07002113 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002114
2115 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2116 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2117 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2118#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002119 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002120 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002121#else
Michael Chane29054f2006-01-23 16:06:06 -08002122 if (!(bp->flags & ASF_ENABLE_FLAG))
2123 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002124#endif
2125 if (dev->flags & IFF_PROMISC) {
2126 /* Promiscuous mode. */
2127 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002128 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2129 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002130 }
2131 else if (dev->flags & IFF_ALLMULTI) {
2132 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2133 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2134 0xffffffff);
2135 }
2136 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2137 }
2138 else {
2139 /* Accept one or more multicast(s). */
2140 struct dev_mc_list *mclist;
2141 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2142 u32 regidx;
2143 u32 bit;
2144 u32 crc;
2145
2146 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2147
2148 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2149 i++, mclist = mclist->next) {
2150
2151 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2152 bit = crc & 0xff;
2153 regidx = (bit & 0xe0) >> 5;
2154 bit &= 0x1f;
2155 mc_filter[regidx] |= (1 << bit);
2156 }
2157
2158 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2159 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2160 mc_filter[i]);
2161 }
2162
2163 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2164 }
2165
2166 if (rx_mode != bp->rx_mode) {
2167 bp->rx_mode = rx_mode;
2168 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2169 }
2170
2171 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2172 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2173 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2174
Michael Chanc770a652005-08-25 15:38:39 -07002175 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002176}
2177
Michael Chanfba9fe92006-06-12 22:21:25 -07002178#define FW_BUF_SIZE 0x8000
2179
2180static int
2181bnx2_gunzip_init(struct bnx2 *bp)
2182{
2183 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2184 goto gunzip_nomem1;
2185
2186 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2187 goto gunzip_nomem2;
2188
2189 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2190 if (bp->strm->workspace == NULL)
2191 goto gunzip_nomem3;
2192
2193 return 0;
2194
2195gunzip_nomem3:
2196 kfree(bp->strm);
2197 bp->strm = NULL;
2198
2199gunzip_nomem2:
2200 vfree(bp->gunzip_buf);
2201 bp->gunzip_buf = NULL;
2202
2203gunzip_nomem1:
2204 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2205 "uncompression.\n", bp->dev->name);
2206 return -ENOMEM;
2207}
2208
2209static void
2210bnx2_gunzip_end(struct bnx2 *bp)
2211{
2212 kfree(bp->strm->workspace);
2213
2214 kfree(bp->strm);
2215 bp->strm = NULL;
2216
2217 if (bp->gunzip_buf) {
2218 vfree(bp->gunzip_buf);
2219 bp->gunzip_buf = NULL;
2220 }
2221}
2222
2223static int
2224bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2225{
2226 int n, rc;
2227
2228 /* check gzip header */
2229 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2230 return -EINVAL;
2231
2232 n = 10;
2233
2234#define FNAME 0x8
2235 if (zbuf[3] & FNAME)
2236 while ((zbuf[n++] != 0) && (n < len));
2237
2238 bp->strm->next_in = zbuf + n;
2239 bp->strm->avail_in = len - n;
2240 bp->strm->next_out = bp->gunzip_buf;
2241 bp->strm->avail_out = FW_BUF_SIZE;
2242
2243 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2244 if (rc != Z_OK)
2245 return rc;
2246
2247 rc = zlib_inflate(bp->strm, Z_FINISH);
2248
2249 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2250 *outbuf = bp->gunzip_buf;
2251
2252 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2253 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2254 bp->dev->name, bp->strm->msg);
2255
2256 zlib_inflateEnd(bp->strm);
2257
2258 if (rc == Z_STREAM_END)
2259 return 0;
2260
2261 return rc;
2262}
2263
Michael Chanb6016b72005-05-26 13:03:09 -07002264static void
2265load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2266 u32 rv2p_proc)
2267{
2268 int i;
2269 u32 val;
2270
2271
2272 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002273 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002274 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002275 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002276 rv2p_code++;
2277
2278 if (rv2p_proc == RV2P_PROC1) {
2279 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2280 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2281 }
2282 else {
2283 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2284 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2285 }
2286 }
2287
2288 /* Reset the processor, un-stall is done later. */
2289 if (rv2p_proc == RV2P_PROC1) {
2290 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2291 }
2292 else {
2293 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2294 }
2295}
2296
Michael Chanaf3ee512006-11-19 14:09:25 -08002297static int
Michael Chanb6016b72005-05-26 13:03:09 -07002298load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2299{
2300 u32 offset;
2301 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002302 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002303
2304 /* Halt the CPU. */
2305 val = REG_RD_IND(bp, cpu_reg->mode);
2306 val |= cpu_reg->mode_value_halt;
2307 REG_WR_IND(bp, cpu_reg->mode, val);
2308 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2309
2310 /* Load the Text area. */
2311 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002312 if (fw->gz_text) {
2313 u32 text_len;
2314 void *text;
2315
2316 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2317 &text_len);
2318 if (rc)
2319 return rc;
2320
2321 fw->text = text;
2322 }
2323 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002324 int j;
2325
2326 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002327 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002328 }
2329 }
2330
2331 /* Load the Data area. */
2332 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2333 if (fw->data) {
2334 int j;
2335
2336 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2337 REG_WR_IND(bp, offset, fw->data[j]);
2338 }
2339 }
2340
2341 /* Load the SBSS area. */
2342 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2343 if (fw->sbss) {
2344 int j;
2345
2346 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2347 REG_WR_IND(bp, offset, fw->sbss[j]);
2348 }
2349 }
2350
2351 /* Load the BSS area. */
2352 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2353 if (fw->bss) {
2354 int j;
2355
2356 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2357 REG_WR_IND(bp, offset, fw->bss[j]);
2358 }
2359 }
2360
2361 /* Load the Read-Only area. */
2362 offset = cpu_reg->spad_base +
2363 (fw->rodata_addr - cpu_reg->mips_view_base);
2364 if (fw->rodata) {
2365 int j;
2366
2367 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2368 REG_WR_IND(bp, offset, fw->rodata[j]);
2369 }
2370 }
2371
2372 /* Clear the pre-fetch instruction. */
2373 REG_WR_IND(bp, cpu_reg->inst, 0);
2374 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2375
2376 /* Start the CPU. */
2377 val = REG_RD_IND(bp, cpu_reg->mode);
2378 val &= ~cpu_reg->mode_value_halt;
2379 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2380 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002381
2382 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002383}
2384
Michael Chanfba9fe92006-06-12 22:21:25 -07002385static int
Michael Chanb6016b72005-05-26 13:03:09 -07002386bnx2_init_cpus(struct bnx2 *bp)
2387{
2388 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002389 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002390 int rc = 0;
2391 void *text;
2392 u32 text_len;
2393
2394 if ((rc = bnx2_gunzip_init(bp)) != 0)
2395 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002396
2397 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002398 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2399 &text_len);
2400 if (rc)
2401 goto init_cpu_err;
2402
2403 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2404
2405 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2406 &text_len);
2407 if (rc)
2408 goto init_cpu_err;
2409
2410 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002411
2412 /* Initialize the RX Processor. */
2413 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2414 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2415 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2416 cpu_reg.state = BNX2_RXP_CPU_STATE;
2417 cpu_reg.state_value_clear = 0xffffff;
2418 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2419 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2420 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2421 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2422 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2423 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2424 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002425
Michael Chand43584c2006-11-19 14:14:35 -08002426 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2427 fw = &bnx2_rxp_fw_09;
2428 else
2429 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002430
Michael Chanaf3ee512006-11-19 14:09:25 -08002431 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002432 if (rc)
2433 goto init_cpu_err;
2434
Michael Chanb6016b72005-05-26 13:03:09 -07002435 /* Initialize the TX Processor. */
2436 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2437 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2438 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2439 cpu_reg.state = BNX2_TXP_CPU_STATE;
2440 cpu_reg.state_value_clear = 0xffffff;
2441 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2442 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2443 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2444 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2445 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2446 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2447 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002448
Michael Chand43584c2006-11-19 14:14:35 -08002449 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2450 fw = &bnx2_txp_fw_09;
2451 else
2452 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002453
Michael Chanaf3ee512006-11-19 14:09:25 -08002454 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002455 if (rc)
2456 goto init_cpu_err;
2457
Michael Chanb6016b72005-05-26 13:03:09 -07002458 /* Initialize the TX Patch-up Processor. */
2459 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2460 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2461 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2462 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2463 cpu_reg.state_value_clear = 0xffffff;
2464 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2465 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2466 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2467 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2468 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2469 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2470 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002471
Michael Chand43584c2006-11-19 14:14:35 -08002472 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2473 fw = &bnx2_tpat_fw_09;
2474 else
2475 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002476
Michael Chanaf3ee512006-11-19 14:09:25 -08002477 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002478 if (rc)
2479 goto init_cpu_err;
2480
Michael Chanb6016b72005-05-26 13:03:09 -07002481 /* Initialize the Completion Processor. */
2482 cpu_reg.mode = BNX2_COM_CPU_MODE;
2483 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2484 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2485 cpu_reg.state = BNX2_COM_CPU_STATE;
2486 cpu_reg.state_value_clear = 0xffffff;
2487 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2488 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2489 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2490 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2491 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2492 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2493 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002494
Michael Chand43584c2006-11-19 14:14:35 -08002495 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2496 fw = &bnx2_com_fw_09;
2497 else
2498 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002499
Michael Chanaf3ee512006-11-19 14:09:25 -08002500 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002501 if (rc)
2502 goto init_cpu_err;
2503
Michael Chand43584c2006-11-19 14:14:35 -08002504 /* Initialize the Command Processor. */
2505 cpu_reg.mode = BNX2_CP_CPU_MODE;
2506 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2507 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2508 cpu_reg.state = BNX2_CP_CPU_STATE;
2509 cpu_reg.state_value_clear = 0xffffff;
2510 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2511 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2512 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2513 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2514 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2515 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2516 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002517
Michael Chand43584c2006-11-19 14:14:35 -08002518 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2519 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002520
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002521 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002522 if (rc)
2523 goto init_cpu_err;
2524 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002525init_cpu_err:
2526 bnx2_gunzip_end(bp);
2527 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002528}
2529
2530static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002531bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002532{
2533 u16 pmcsr;
2534
2535 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2536
2537 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002538 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002539 u32 val;
2540
2541 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2542 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2543 PCI_PM_CTRL_PME_STATUS);
2544
2545 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2546 /* delay required during transition out of D3hot */
2547 msleep(20);
2548
2549 val = REG_RD(bp, BNX2_EMAC_MODE);
2550 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2551 val &= ~BNX2_EMAC_MODE_MPKT;
2552 REG_WR(bp, BNX2_EMAC_MODE, val);
2553
2554 val = REG_RD(bp, BNX2_RPM_CONFIG);
2555 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2556 REG_WR(bp, BNX2_RPM_CONFIG, val);
2557 break;
2558 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002559 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002560 int i;
2561 u32 val, wol_msg;
2562
2563 if (bp->wol) {
2564 u32 advertising;
2565 u8 autoneg;
2566
2567 autoneg = bp->autoneg;
2568 advertising = bp->advertising;
2569
2570 bp->autoneg = AUTONEG_SPEED;
2571 bp->advertising = ADVERTISED_10baseT_Half |
2572 ADVERTISED_10baseT_Full |
2573 ADVERTISED_100baseT_Half |
2574 ADVERTISED_100baseT_Full |
2575 ADVERTISED_Autoneg;
2576
2577 bnx2_setup_copper_phy(bp);
2578
2579 bp->autoneg = autoneg;
2580 bp->advertising = advertising;
2581
2582 bnx2_set_mac_addr(bp);
2583
2584 val = REG_RD(bp, BNX2_EMAC_MODE);
2585
2586 /* Enable port mode. */
2587 val &= ~BNX2_EMAC_MODE_PORT;
2588 val |= BNX2_EMAC_MODE_PORT_MII |
2589 BNX2_EMAC_MODE_MPKT_RCVD |
2590 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002591 BNX2_EMAC_MODE_MPKT;
2592
2593 REG_WR(bp, BNX2_EMAC_MODE, val);
2594
2595 /* receive all multicast */
2596 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2597 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2598 0xffffffff);
2599 }
2600 REG_WR(bp, BNX2_EMAC_RX_MODE,
2601 BNX2_EMAC_RX_MODE_SORT_MODE);
2602
2603 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2604 BNX2_RPM_SORT_USER0_MC_EN;
2605 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2606 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2608 BNX2_RPM_SORT_USER0_ENA);
2609
2610 /* Need to enable EMAC and RPM for WOL. */
2611 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2612 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2613 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2614 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2615
2616 val = REG_RD(bp, BNX2_RPM_CONFIG);
2617 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2618 REG_WR(bp, BNX2_RPM_CONFIG, val);
2619
2620 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2621 }
2622 else {
2623 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2624 }
2625
Michael Chandda1e392006-01-23 16:08:14 -08002626 if (!(bp->flags & NO_WOL_FLAG))
2627 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002628
2629 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2630 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2631 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2632
2633 if (bp->wol)
2634 pmcsr |= 3;
2635 }
2636 else {
2637 pmcsr |= 3;
2638 }
2639 if (bp->wol) {
2640 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2641 }
2642 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2643 pmcsr);
2644
2645 /* No more memory access after this point until
2646 * device is brought back to D0.
2647 */
2648 udelay(50);
2649 break;
2650 }
2651 default:
2652 return -EINVAL;
2653 }
2654 return 0;
2655}
2656
2657static int
2658bnx2_acquire_nvram_lock(struct bnx2 *bp)
2659{
2660 u32 val;
2661 int j;
2662
2663 /* Request access to the flash interface. */
2664 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2665 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2666 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2667 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2668 break;
2669
2670 udelay(5);
2671 }
2672
2673 if (j >= NVRAM_TIMEOUT_COUNT)
2674 return -EBUSY;
2675
2676 return 0;
2677}
2678
2679static int
2680bnx2_release_nvram_lock(struct bnx2 *bp)
2681{
2682 int j;
2683 u32 val;
2684
2685 /* Relinquish nvram interface. */
2686 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2687
2688 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2689 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2690 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2691 break;
2692
2693 udelay(5);
2694 }
2695
2696 if (j >= NVRAM_TIMEOUT_COUNT)
2697 return -EBUSY;
2698
2699 return 0;
2700}
2701
2702
2703static int
2704bnx2_enable_nvram_write(struct bnx2 *bp)
2705{
2706 u32 val;
2707
2708 val = REG_RD(bp, BNX2_MISC_CFG);
2709 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2710
2711 if (!bp->flash_info->buffered) {
2712 int j;
2713
2714 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2715 REG_WR(bp, BNX2_NVM_COMMAND,
2716 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2717
2718 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2719 udelay(5);
2720
2721 val = REG_RD(bp, BNX2_NVM_COMMAND);
2722 if (val & BNX2_NVM_COMMAND_DONE)
2723 break;
2724 }
2725
2726 if (j >= NVRAM_TIMEOUT_COUNT)
2727 return -EBUSY;
2728 }
2729 return 0;
2730}
2731
2732static void
2733bnx2_disable_nvram_write(struct bnx2 *bp)
2734{
2735 u32 val;
2736
2737 val = REG_RD(bp, BNX2_MISC_CFG);
2738 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2739}
2740
2741
2742static void
2743bnx2_enable_nvram_access(struct bnx2 *bp)
2744{
2745 u32 val;
2746
2747 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2748 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002749 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002750 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2751}
2752
2753static void
2754bnx2_disable_nvram_access(struct bnx2 *bp)
2755{
2756 u32 val;
2757
2758 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2759 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002760 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002761 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2762 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2763}
2764
2765static int
2766bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2767{
2768 u32 cmd;
2769 int j;
2770
2771 if (bp->flash_info->buffered)
2772 /* Buffered flash, no erase needed */
2773 return 0;
2774
2775 /* Build an erase command */
2776 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2777 BNX2_NVM_COMMAND_DOIT;
2778
2779 /* Need to clear DONE bit separately. */
2780 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2781
2782 /* Address of the NVRAM to read from. */
2783 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2784
2785 /* Issue an erase command. */
2786 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2787
2788 /* Wait for completion. */
2789 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2790 u32 val;
2791
2792 udelay(5);
2793
2794 val = REG_RD(bp, BNX2_NVM_COMMAND);
2795 if (val & BNX2_NVM_COMMAND_DONE)
2796 break;
2797 }
2798
2799 if (j >= NVRAM_TIMEOUT_COUNT)
2800 return -EBUSY;
2801
2802 return 0;
2803}
2804
2805static int
2806bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2807{
2808 u32 cmd;
2809 int j;
2810
2811 /* Build the command word. */
2812 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2813
2814 /* Calculate an offset of a buffered flash. */
2815 if (bp->flash_info->buffered) {
2816 offset = ((offset / bp->flash_info->page_size) <<
2817 bp->flash_info->page_bits) +
2818 (offset % bp->flash_info->page_size);
2819 }
2820
2821 /* Need to clear DONE bit separately. */
2822 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2823
2824 /* Address of the NVRAM to read from. */
2825 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2826
2827 /* Issue a read command. */
2828 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2829
2830 /* Wait for completion. */
2831 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2832 u32 val;
2833
2834 udelay(5);
2835
2836 val = REG_RD(bp, BNX2_NVM_COMMAND);
2837 if (val & BNX2_NVM_COMMAND_DONE) {
2838 val = REG_RD(bp, BNX2_NVM_READ);
2839
2840 val = be32_to_cpu(val);
2841 memcpy(ret_val, &val, 4);
2842 break;
2843 }
2844 }
2845 if (j >= NVRAM_TIMEOUT_COUNT)
2846 return -EBUSY;
2847
2848 return 0;
2849}
2850
2851
2852static int
2853bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2854{
2855 u32 cmd, val32;
2856 int j;
2857
2858 /* Build the command word. */
2859 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2860
2861 /* Calculate an offset of a buffered flash. */
2862 if (bp->flash_info->buffered) {
2863 offset = ((offset / bp->flash_info->page_size) <<
2864 bp->flash_info->page_bits) +
2865 (offset % bp->flash_info->page_size);
2866 }
2867
2868 /* Need to clear DONE bit separately. */
2869 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2870
2871 memcpy(&val32, val, 4);
2872 val32 = cpu_to_be32(val32);
2873
2874 /* Write the data. */
2875 REG_WR(bp, BNX2_NVM_WRITE, val32);
2876
2877 /* Address of the NVRAM to write to. */
2878 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2879
2880 /* Issue the write command. */
2881 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2882
2883 /* Wait for completion. */
2884 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2885 udelay(5);
2886
2887 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2888 break;
2889 }
2890 if (j >= NVRAM_TIMEOUT_COUNT)
2891 return -EBUSY;
2892
2893 return 0;
2894}
2895
2896static int
2897bnx2_init_nvram(struct bnx2 *bp)
2898{
2899 u32 val;
2900 int j, entry_count, rc;
2901 struct flash_spec *flash;
2902
2903 /* Determine the selected interface. */
2904 val = REG_RD(bp, BNX2_NVM_CFG1);
2905
2906 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2907
2908 rc = 0;
2909 if (val & 0x40000000) {
2910
2911 /* Flash interface has been reconfigured */
2912 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002913 j++, flash++) {
2914 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2915 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002916 bp->flash_info = flash;
2917 break;
2918 }
2919 }
2920 }
2921 else {
Michael Chan37137702005-11-04 08:49:17 -08002922 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002923 /* Not yet been reconfigured */
2924
Michael Chan37137702005-11-04 08:49:17 -08002925 if (val & (1 << 23))
2926 mask = FLASH_BACKUP_STRAP_MASK;
2927 else
2928 mask = FLASH_STRAP_MASK;
2929
Michael Chanb6016b72005-05-26 13:03:09 -07002930 for (j = 0, flash = &flash_table[0]; j < entry_count;
2931 j++, flash++) {
2932
Michael Chan37137702005-11-04 08:49:17 -08002933 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002934 bp->flash_info = flash;
2935
2936 /* Request access to the flash interface. */
2937 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2938 return rc;
2939
2940 /* Enable access to flash interface */
2941 bnx2_enable_nvram_access(bp);
2942
2943 /* Reconfigure the flash interface */
2944 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2945 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2946 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2947 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2948
2949 /* Disable access to flash interface */
2950 bnx2_disable_nvram_access(bp);
2951 bnx2_release_nvram_lock(bp);
2952
2953 break;
2954 }
2955 }
2956 } /* if (val & 0x40000000) */
2957
2958 if (j == entry_count) {
2959 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002960 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002961 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002962 }
2963
Michael Chan1122db72006-01-23 16:11:42 -08002964 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2965 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2966 if (val)
2967 bp->flash_size = val;
2968 else
2969 bp->flash_size = bp->flash_info->total_size;
2970
Michael Chanb6016b72005-05-26 13:03:09 -07002971 return rc;
2972}
2973
2974static int
2975bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2976 int buf_size)
2977{
2978 int rc = 0;
2979 u32 cmd_flags, offset32, len32, extra;
2980
2981 if (buf_size == 0)
2982 return 0;
2983
2984 /* Request access to the flash interface. */
2985 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2986 return rc;
2987
2988 /* Enable access to flash interface */
2989 bnx2_enable_nvram_access(bp);
2990
2991 len32 = buf_size;
2992 offset32 = offset;
2993 extra = 0;
2994
2995 cmd_flags = 0;
2996
2997 if (offset32 & 3) {
2998 u8 buf[4];
2999 u32 pre_len;
3000
3001 offset32 &= ~3;
3002 pre_len = 4 - (offset & 3);
3003
3004 if (pre_len >= len32) {
3005 pre_len = len32;
3006 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3007 BNX2_NVM_COMMAND_LAST;
3008 }
3009 else {
3010 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3011 }
3012
3013 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3014
3015 if (rc)
3016 return rc;
3017
3018 memcpy(ret_buf, buf + (offset & 3), pre_len);
3019
3020 offset32 += 4;
3021 ret_buf += pre_len;
3022 len32 -= pre_len;
3023 }
3024 if (len32 & 3) {
3025 extra = 4 - (len32 & 3);
3026 len32 = (len32 + 4) & ~3;
3027 }
3028
3029 if (len32 == 4) {
3030 u8 buf[4];
3031
3032 if (cmd_flags)
3033 cmd_flags = BNX2_NVM_COMMAND_LAST;
3034 else
3035 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3036 BNX2_NVM_COMMAND_LAST;
3037
3038 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3039
3040 memcpy(ret_buf, buf, 4 - extra);
3041 }
3042 else if (len32 > 0) {
3043 u8 buf[4];
3044
3045 /* Read the first word. */
3046 if (cmd_flags)
3047 cmd_flags = 0;
3048 else
3049 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3050
3051 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3052
3053 /* Advance to the next dword. */
3054 offset32 += 4;
3055 ret_buf += 4;
3056 len32 -= 4;
3057
3058 while (len32 > 4 && rc == 0) {
3059 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3060
3061 /* Advance to the next dword. */
3062 offset32 += 4;
3063 ret_buf += 4;
3064 len32 -= 4;
3065 }
3066
3067 if (rc)
3068 return rc;
3069
3070 cmd_flags = BNX2_NVM_COMMAND_LAST;
3071 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3072
3073 memcpy(ret_buf, buf, 4 - extra);
3074 }
3075
3076 /* Disable access to flash interface */
3077 bnx2_disable_nvram_access(bp);
3078
3079 bnx2_release_nvram_lock(bp);
3080
3081 return rc;
3082}
3083
3084static int
3085bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3086 int buf_size)
3087{
3088 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003089 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003090 int rc = 0;
3091 int align_start, align_end;
3092
3093 buf = data_buf;
3094 offset32 = offset;
3095 len32 = buf_size;
3096 align_start = align_end = 0;
3097
3098 if ((align_start = (offset32 & 3))) {
3099 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003100 len32 += align_start;
3101 if (len32 < 4)
3102 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003103 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3104 return rc;
3105 }
3106
3107 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003108 align_end = 4 - (len32 & 3);
3109 len32 += align_end;
3110 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3111 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003112 }
3113
3114 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003115 align_buf = kmalloc(len32, GFP_KERNEL);
3116 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003117 return -ENOMEM;
3118 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003119 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003120 }
3121 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003122 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003123 }
Michael Chane6be7632007-01-08 19:56:13 -08003124 memcpy(align_buf + align_start, data_buf, buf_size);
3125 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003126 }
3127
Michael Chanae181bc2006-05-22 16:39:20 -07003128 if (bp->flash_info->buffered == 0) {
3129 flash_buffer = kmalloc(264, GFP_KERNEL);
3130 if (flash_buffer == NULL) {
3131 rc = -ENOMEM;
3132 goto nvram_write_end;
3133 }
3134 }
3135
Michael Chanb6016b72005-05-26 13:03:09 -07003136 written = 0;
3137 while ((written < len32) && (rc == 0)) {
3138 u32 page_start, page_end, data_start, data_end;
3139 u32 addr, cmd_flags;
3140 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003141
3142 /* Find the page_start addr */
3143 page_start = offset32 + written;
3144 page_start -= (page_start % bp->flash_info->page_size);
3145 /* Find the page_end addr */
3146 page_end = page_start + bp->flash_info->page_size;
3147 /* Find the data_start addr */
3148 data_start = (written == 0) ? offset32 : page_start;
3149 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003150 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003151 (offset32 + len32) : page_end;
3152
3153 /* Request access to the flash interface. */
3154 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3155 goto nvram_write_end;
3156
3157 /* Enable access to flash interface */
3158 bnx2_enable_nvram_access(bp);
3159
3160 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3161 if (bp->flash_info->buffered == 0) {
3162 int j;
3163
3164 /* Read the whole page into the buffer
3165 * (non-buffer flash only) */
3166 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3167 if (j == (bp->flash_info->page_size - 4)) {
3168 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3169 }
3170 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003171 page_start + j,
3172 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003173 cmd_flags);
3174
3175 if (rc)
3176 goto nvram_write_end;
3177
3178 cmd_flags = 0;
3179 }
3180 }
3181
3182 /* Enable writes to flash interface (unlock write-protect) */
3183 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3184 goto nvram_write_end;
3185
Michael Chanb6016b72005-05-26 13:03:09 -07003186 /* Loop to write back the buffer data from page_start to
3187 * data_start */
3188 i = 0;
3189 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003190 /* Erase the page */
3191 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3192 goto nvram_write_end;
3193
3194 /* Re-enable the write again for the actual write */
3195 bnx2_enable_nvram_write(bp);
3196
Michael Chanb6016b72005-05-26 13:03:09 -07003197 for (addr = page_start; addr < data_start;
3198 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003199
Michael Chanb6016b72005-05-26 13:03:09 -07003200 rc = bnx2_nvram_write_dword(bp, addr,
3201 &flash_buffer[i], cmd_flags);
3202
3203 if (rc != 0)
3204 goto nvram_write_end;
3205
3206 cmd_flags = 0;
3207 }
3208 }
3209
3210 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003211 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003212 if ((addr == page_end - 4) ||
3213 ((bp->flash_info->buffered) &&
3214 (addr == data_end - 4))) {
3215
3216 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3217 }
3218 rc = bnx2_nvram_write_dword(bp, addr, buf,
3219 cmd_flags);
3220
3221 if (rc != 0)
3222 goto nvram_write_end;
3223
3224 cmd_flags = 0;
3225 buf += 4;
3226 }
3227
3228 /* Loop to write back the buffer data from data_end
3229 * to page_end */
3230 if (bp->flash_info->buffered == 0) {
3231 for (addr = data_end; addr < page_end;
3232 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003233
Michael Chanb6016b72005-05-26 13:03:09 -07003234 if (addr == page_end-4) {
3235 cmd_flags = BNX2_NVM_COMMAND_LAST;
3236 }
3237 rc = bnx2_nvram_write_dword(bp, addr,
3238 &flash_buffer[i], cmd_flags);
3239
3240 if (rc != 0)
3241 goto nvram_write_end;
3242
3243 cmd_flags = 0;
3244 }
3245 }
3246
3247 /* Disable writes to flash interface (lock write-protect) */
3248 bnx2_disable_nvram_write(bp);
3249
3250 /* Disable access to flash interface */
3251 bnx2_disable_nvram_access(bp);
3252 bnx2_release_nvram_lock(bp);
3253
3254 /* Increment written */
3255 written += data_end - data_start;
3256 }
3257
3258nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003259 kfree(flash_buffer);
3260 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003261 return rc;
3262}
3263
3264static int
3265bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3266{
3267 u32 val;
3268 int i, rc = 0;
3269
3270 /* Wait for the current PCI transaction to complete before
3271 * issuing a reset. */
3272 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3273 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3274 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3277 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3278 udelay(5);
3279
Michael Chanb090ae22006-01-23 16:07:10 -08003280 /* Wait for the firmware to tell us it is ok to issue a reset. */
3281 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3282
Michael Chanb6016b72005-05-26 13:03:09 -07003283 /* Deposit a driver reset signature so the firmware knows that
3284 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003285 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003286 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3287
Michael Chanb6016b72005-05-26 13:03:09 -07003288 /* Do a dummy read to force the chip to complete all current transaction
3289 * before we issue a reset. */
3290 val = REG_RD(bp, BNX2_MISC_ID);
3291
Michael Chan234754d2006-11-19 14:11:41 -08003292 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3293 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3294 REG_RD(bp, BNX2_MISC_COMMAND);
3295 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003296
Michael Chan234754d2006-11-19 14:11:41 -08003297 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3298 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003299
Michael Chan234754d2006-11-19 14:11:41 -08003300 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003301
Michael Chan234754d2006-11-19 14:11:41 -08003302 } else {
3303 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3304 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3305 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3306
3307 /* Chip reset. */
3308 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3309
3310 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3311 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3312 current->state = TASK_UNINTERRUPTIBLE;
3313 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003314 }
Michael Chanb6016b72005-05-26 13:03:09 -07003315
Michael Chan234754d2006-11-19 14:11:41 -08003316 /* Reset takes approximate 30 usec */
3317 for (i = 0; i < 10; i++) {
3318 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3319 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3320 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3321 break;
3322 udelay(10);
3323 }
3324
3325 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3326 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3327 printk(KERN_ERR PFX "Chip reset did not complete\n");
3328 return -EBUSY;
3329 }
Michael Chanb6016b72005-05-26 13:03:09 -07003330 }
3331
3332 /* Make sure byte swapping is properly configured. */
3333 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3334 if (val != 0x01020304) {
3335 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3336 return -ENODEV;
3337 }
3338
Michael Chanb6016b72005-05-26 13:03:09 -07003339 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003340 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3341 if (rc)
3342 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003343
3344 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3345 /* Adjust the voltage regular to two steps lower. The default
3346 * of this register is 0x0000000e. */
3347 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3348
3349 /* Remove bad rbuf memory from the free pool. */
3350 rc = bnx2_alloc_bad_rbuf(bp);
3351 }
3352
3353 return rc;
3354}
3355
3356static int
3357bnx2_init_chip(struct bnx2 *bp)
3358{
3359 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003360 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003361
3362 /* Make sure the interrupt is not active. */
3363 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3364
3365 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3366 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3367#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003368 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003369#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003370 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003371 DMA_READ_CHANS << 12 |
3372 DMA_WRITE_CHANS << 16;
3373
3374 val |= (0x2 << 20) | (1 << 11);
3375
Michael Chandda1e392006-01-23 16:08:14 -08003376 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003377 val |= (1 << 23);
3378
3379 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3380 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3381 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3382
3383 REG_WR(bp, BNX2_DMA_CONFIG, val);
3384
3385 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3386 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3387 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3388 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3389 }
3390
3391 if (bp->flags & PCIX_FLAG) {
3392 u16 val16;
3393
3394 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3395 &val16);
3396 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3397 val16 & ~PCI_X_CMD_ERO);
3398 }
3399
3400 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3401 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3402 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3403 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3404
3405 /* Initialize context mapping and zero out the quick contexts. The
3406 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003407 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3408 bnx2_init_5709_context(bp);
3409 else
3410 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003411
Michael Chanfba9fe92006-06-12 22:21:25 -07003412 if ((rc = bnx2_init_cpus(bp)) != 0)
3413 return rc;
3414
Michael Chanb6016b72005-05-26 13:03:09 -07003415 bnx2_init_nvram(bp);
3416
3417 bnx2_set_mac_addr(bp);
3418
3419 val = REG_RD(bp, BNX2_MQ_CONFIG);
3420 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3421 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003422 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3423 val |= BNX2_MQ_CONFIG_HALT_DIS;
3424
Michael Chanb6016b72005-05-26 13:03:09 -07003425 REG_WR(bp, BNX2_MQ_CONFIG, val);
3426
3427 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3428 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3429 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3430
3431 val = (BCM_PAGE_BITS - 8) << 24;
3432 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3433
3434 /* Configure page size. */
3435 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3436 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3437 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3438 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3439
3440 val = bp->mac_addr[0] +
3441 (bp->mac_addr[1] << 8) +
3442 (bp->mac_addr[2] << 16) +
3443 bp->mac_addr[3] +
3444 (bp->mac_addr[4] << 8) +
3445 (bp->mac_addr[5] << 16);
3446 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3447
3448 /* Program the MTU. Also include 4 bytes for CRC32. */
3449 val = bp->dev->mtu + ETH_HLEN + 4;
3450 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3451 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3452 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3453
3454 bp->last_status_idx = 0;
3455 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3456
3457 /* Set up how to generate a link change interrupt. */
3458 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3459
3460 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3461 (u64) bp->status_blk_mapping & 0xffffffff);
3462 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3463
3464 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3465 (u64) bp->stats_blk_mapping & 0xffffffff);
3466 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3467 (u64) bp->stats_blk_mapping >> 32);
3468
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003469 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003470 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3471
3472 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3473 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3474
3475 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3476 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3477
3478 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3479
3480 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3481
3482 REG_WR(bp, BNX2_HC_COM_TICKS,
3483 (bp->com_ticks_int << 16) | bp->com_ticks);
3484
3485 REG_WR(bp, BNX2_HC_CMD_TICKS,
3486 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3487
3488 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3489 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3490
3491 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3492 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3493 else {
3494 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3495 BNX2_HC_CONFIG_TX_TMR_MODE |
3496 BNX2_HC_CONFIG_COLLECT_STATS);
3497 }
3498
3499 /* Clear internal stats counters. */
3500 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3501
3502 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3503
Michael Chane29054f2006-01-23 16:06:06 -08003504 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3505 BNX2_PORT_FEATURE_ASF_ENABLED)
3506 bp->flags |= ASF_ENABLE_FLAG;
3507
Michael Chanb6016b72005-05-26 13:03:09 -07003508 /* Initialize the receive filter. */
3509 bnx2_set_rx_mode(bp->dev);
3510
Michael Chanb090ae22006-01-23 16:07:10 -08003511 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3512 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003513
3514 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3515 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3516
3517 udelay(20);
3518
Michael Chanbf5295b2006-03-23 01:11:56 -08003519 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3520
Michael Chanb090ae22006-01-23 16:07:10 -08003521 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003522}
3523
Michael Chan59b47d82006-11-19 14:10:45 -08003524static void
3525bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3526{
3527 u32 val, offset0, offset1, offset2, offset3;
3528
3529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530 offset0 = BNX2_L2CTX_TYPE_XI;
3531 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3532 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3533 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3534 } else {
3535 offset0 = BNX2_L2CTX_TYPE;
3536 offset1 = BNX2_L2CTX_CMD_TYPE;
3537 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3538 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3539 }
3540 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3541 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3542
3543 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3544 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3545
3546 val = (u64) bp->tx_desc_mapping >> 32;
3547 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3548
3549 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3550 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3551}
Michael Chanb6016b72005-05-26 13:03:09 -07003552
3553static void
3554bnx2_init_tx_ring(struct bnx2 *bp)
3555{
3556 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003557 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003558
Michael Chan2f8af122006-08-15 01:39:10 -07003559 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3560
Michael Chanb6016b72005-05-26 13:03:09 -07003561 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003562
Michael Chanb6016b72005-05-26 13:03:09 -07003563 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3564 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3565
3566 bp->tx_prod = 0;
3567 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003568 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003569 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003570
Michael Chan59b47d82006-11-19 14:10:45 -08003571 cid = TX_CID;
3572 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3573 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003574
Michael Chan59b47d82006-11-19 14:10:45 -08003575 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003576}
3577
3578static void
3579bnx2_init_rx_ring(struct bnx2 *bp)
3580{
3581 struct rx_bd *rxbd;
3582 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003583 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003584 u32 val;
3585
3586 /* 8 for CRC and VLAN */
3587 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003588 /* hw alignment */
3589 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003590
3591 ring_prod = prod = bp->rx_prod = 0;
3592 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003593 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003594 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003595
Michael Chan13daffa2006-03-20 17:49:20 -08003596 for (i = 0; i < bp->rx_max_ring; i++) {
3597 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003598
Michael Chan13daffa2006-03-20 17:49:20 -08003599 rxbd = &bp->rx_desc_ring[i][0];
3600 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3601 rxbd->rx_bd_len = bp->rx_buf_use_size;
3602 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3603 }
3604 if (i == (bp->rx_max_ring - 1))
3605 j = 0;
3606 else
3607 j = i + 1;
3608 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3609 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3610 0xffffffff;
3611 }
Michael Chanb6016b72005-05-26 13:03:09 -07003612
3613 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3614 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3615 val |= 0x02 << 8;
3616 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3617
Michael Chan13daffa2006-03-20 17:49:20 -08003618 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003619 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3620
Michael Chan13daffa2006-03-20 17:49:20 -08003621 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003622 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3623
Michael Chan236b6392006-03-20 17:49:02 -08003624 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003625 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3626 break;
3627 }
3628 prod = NEXT_RX_BD(prod);
3629 ring_prod = RX_RING_IDX(prod);
3630 }
3631 bp->rx_prod = prod;
3632
3633 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3634
3635 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3636}
3637
3638static void
Michael Chan13daffa2006-03-20 17:49:20 -08003639bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3640{
3641 u32 num_rings, max;
3642
3643 bp->rx_ring_size = size;
3644 num_rings = 1;
3645 while (size > MAX_RX_DESC_CNT) {
3646 size -= MAX_RX_DESC_CNT;
3647 num_rings++;
3648 }
3649 /* round to next power of 2 */
3650 max = MAX_RX_RINGS;
3651 while ((max & num_rings) == 0)
3652 max >>= 1;
3653
3654 if (num_rings != max)
3655 max <<= 1;
3656
3657 bp->rx_max_ring = max;
3658 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3659}
3660
3661static void
Michael Chanb6016b72005-05-26 13:03:09 -07003662bnx2_free_tx_skbs(struct bnx2 *bp)
3663{
3664 int i;
3665
3666 if (bp->tx_buf_ring == NULL)
3667 return;
3668
3669 for (i = 0; i < TX_DESC_CNT; ) {
3670 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3671 struct sk_buff *skb = tx_buf->skb;
3672 int j, last;
3673
3674 if (skb == NULL) {
3675 i++;
3676 continue;
3677 }
3678
3679 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3680 skb_headlen(skb), PCI_DMA_TODEVICE);
3681
3682 tx_buf->skb = NULL;
3683
3684 last = skb_shinfo(skb)->nr_frags;
3685 for (j = 0; j < last; j++) {
3686 tx_buf = &bp->tx_buf_ring[i + j + 1];
3687 pci_unmap_page(bp->pdev,
3688 pci_unmap_addr(tx_buf, mapping),
3689 skb_shinfo(skb)->frags[j].size,
3690 PCI_DMA_TODEVICE);
3691 }
Michael Chan745720e2006-06-29 12:37:41 -07003692 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003693 i += j + 1;
3694 }
3695
3696}
3697
3698static void
3699bnx2_free_rx_skbs(struct bnx2 *bp)
3700{
3701 int i;
3702
3703 if (bp->rx_buf_ring == NULL)
3704 return;
3705
Michael Chan13daffa2006-03-20 17:49:20 -08003706 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003707 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3708 struct sk_buff *skb = rx_buf->skb;
3709
Michael Chan05d0f1c2005-11-04 08:53:48 -08003710 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003711 continue;
3712
3713 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3714 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3715
3716 rx_buf->skb = NULL;
3717
Michael Chan745720e2006-06-29 12:37:41 -07003718 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003719 }
3720}
3721
3722static void
3723bnx2_free_skbs(struct bnx2 *bp)
3724{
3725 bnx2_free_tx_skbs(bp);
3726 bnx2_free_rx_skbs(bp);
3727}
3728
3729static int
3730bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3731{
3732 int rc;
3733
3734 rc = bnx2_reset_chip(bp, reset_code);
3735 bnx2_free_skbs(bp);
3736 if (rc)
3737 return rc;
3738
Michael Chanfba9fe92006-06-12 22:21:25 -07003739 if ((rc = bnx2_init_chip(bp)) != 0)
3740 return rc;
3741
Michael Chanb6016b72005-05-26 13:03:09 -07003742 bnx2_init_tx_ring(bp);
3743 bnx2_init_rx_ring(bp);
3744 return 0;
3745}
3746
3747static int
3748bnx2_init_nic(struct bnx2 *bp)
3749{
3750 int rc;
3751
3752 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3753 return rc;
3754
Michael Chan80be4432006-11-19 14:07:28 -08003755 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003756 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003757 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003758 bnx2_set_link(bp);
3759 return 0;
3760}
3761
3762static int
3763bnx2_test_registers(struct bnx2 *bp)
3764{
3765 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07003766 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003767 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003768 u16 offset;
3769 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07003770#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07003771 u32 rw_mask;
3772 u32 ro_mask;
3773 } reg_tbl[] = {
3774 { 0x006c, 0, 0x00000000, 0x0000003f },
3775 { 0x0090, 0, 0xffffffff, 0x00000000 },
3776 { 0x0094, 0, 0x00000000, 0x00000000 },
3777
Michael Chan5bae30c2007-05-03 13:18:46 -07003778 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
3779 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3780 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3781 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
3782 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
3783 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3784 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
3785 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3786 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07003787
Michael Chan5bae30c2007-05-03 13:18:46 -07003788 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3789 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3790 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3791 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3792 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3793 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07003794
Michael Chan5bae30c2007-05-03 13:18:46 -07003795 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3796 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
3797 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003798
3799 { 0x1000, 0, 0x00000000, 0x00000001 },
3800 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003801
3802 { 0x1408, 0, 0x01c00800, 0x00000000 },
3803 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3804 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003805 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003806 { 0x14b0, 0, 0x00000002, 0x00000001 },
3807 { 0x14b8, 0, 0x00000000, 0x00000000 },
3808 { 0x14c0, 0, 0x00000000, 0x00000009 },
3809 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3810 { 0x14cc, 0, 0x00000000, 0x00000001 },
3811 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003812
3813 { 0x1800, 0, 0x00000000, 0x00000001 },
3814 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003815
3816 { 0x2800, 0, 0x00000000, 0x00000001 },
3817 { 0x2804, 0, 0x00000000, 0x00003f01 },
3818 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3819 { 0x2810, 0, 0xffff0000, 0x00000000 },
3820 { 0x2814, 0, 0xffff0000, 0x00000000 },
3821 { 0x2818, 0, 0xffff0000, 0x00000000 },
3822 { 0x281c, 0, 0xffff0000, 0x00000000 },
3823 { 0x2834, 0, 0xffffffff, 0x00000000 },
3824 { 0x2840, 0, 0x00000000, 0xffffffff },
3825 { 0x2844, 0, 0x00000000, 0xffffffff },
3826 { 0x2848, 0, 0xffffffff, 0x00000000 },
3827 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3828
3829 { 0x2c00, 0, 0x00000000, 0x00000011 },
3830 { 0x2c04, 0, 0x00000000, 0x00030007 },
3831
Michael Chanb6016b72005-05-26 13:03:09 -07003832 { 0x3c00, 0, 0x00000000, 0x00000001 },
3833 { 0x3c04, 0, 0x00000000, 0x00070000 },
3834 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3835 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3836 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3837 { 0x3c14, 0, 0x00000000, 0xffffffff },
3838 { 0x3c18, 0, 0x00000000, 0xffffffff },
3839 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3840 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003841
3842 { 0x5004, 0, 0x00000000, 0x0000007f },
3843 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003844
Michael Chanb6016b72005-05-26 13:03:09 -07003845 { 0x5c00, 0, 0x00000000, 0x00000001 },
3846 { 0x5c04, 0, 0x00000000, 0x0003000f },
3847 { 0x5c08, 0, 0x00000003, 0x00000000 },
3848 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3849 { 0x5c10, 0, 0x00000000, 0xffffffff },
3850 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3851 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3852 { 0x5c88, 0, 0x00000000, 0x00077373 },
3853 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3854
3855 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3856 { 0x680c, 0, 0xffffffff, 0x00000000 },
3857 { 0x6810, 0, 0xffffffff, 0x00000000 },
3858 { 0x6814, 0, 0xffffffff, 0x00000000 },
3859 { 0x6818, 0, 0xffffffff, 0x00000000 },
3860 { 0x681c, 0, 0xffffffff, 0x00000000 },
3861 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3862 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3863 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3865 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3869 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3870 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3871 { 0x684c, 0, 0xffffffff, 0x00000000 },
3872 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3873 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3877 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3878
3879 { 0xffff, 0, 0x00000000, 0x00000000 },
3880 };
3881
3882 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07003883 is_5709 = 0;
3884 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3885 is_5709 = 1;
3886
Michael Chanb6016b72005-05-26 13:03:09 -07003887 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3888 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07003889 u16 flags = reg_tbl[i].flags;
3890
3891 if (is_5709 && (flags & BNX2_FL_NOT_5709))
3892 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07003893
3894 offset = (u32) reg_tbl[i].offset;
3895 rw_mask = reg_tbl[i].rw_mask;
3896 ro_mask = reg_tbl[i].ro_mask;
3897
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003898 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003899
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003900 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003901
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003902 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003903 if ((val & rw_mask) != 0) {
3904 goto reg_test_err;
3905 }
3906
3907 if ((val & ro_mask) != (save_val & ro_mask)) {
3908 goto reg_test_err;
3909 }
3910
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003911 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003912
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003913 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003914 if ((val & rw_mask) != rw_mask) {
3915 goto reg_test_err;
3916 }
3917
3918 if ((val & ro_mask) != (save_val & ro_mask)) {
3919 goto reg_test_err;
3920 }
3921
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003922 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003923 continue;
3924
3925reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003926 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003927 ret = -ENODEV;
3928 break;
3929 }
3930 return ret;
3931}
3932
3933static int
3934bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3935{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003936 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003937 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3938 int i;
3939
3940 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3941 u32 offset;
3942
3943 for (offset = 0; offset < size; offset += 4) {
3944
3945 REG_WR_IND(bp, start + offset, test_pattern[i]);
3946
3947 if (REG_RD_IND(bp, start + offset) !=
3948 test_pattern[i]) {
3949 return -ENODEV;
3950 }
3951 }
3952 }
3953 return 0;
3954}
3955
3956static int
3957bnx2_test_memory(struct bnx2 *bp)
3958{
3959 int ret = 0;
3960 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07003961 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07003962 u32 offset;
3963 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07003964 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07003965 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003966 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003967 { 0xe0000, 0x4000 },
3968 { 0x120000, 0x4000 },
3969 { 0x1a0000, 0x4000 },
3970 { 0x160000, 0x4000 },
3971 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07003972 },
3973 mem_tbl_5709[] = {
3974 { 0x60000, 0x4000 },
3975 { 0xa0000, 0x3000 },
3976 { 0xe0000, 0x4000 },
3977 { 0x120000, 0x4000 },
3978 { 0x1a0000, 0x4000 },
3979 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07003980 };
Michael Chan5bae30c2007-05-03 13:18:46 -07003981 struct mem_entry *mem_tbl;
3982
3983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3984 mem_tbl = mem_tbl_5709;
3985 else
3986 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07003987
3988 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3989 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3990 mem_tbl[i].len)) != 0) {
3991 return ret;
3992 }
3993 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003994
Michael Chanb6016b72005-05-26 13:03:09 -07003995 return ret;
3996}
3997
Michael Chanbc5a0692006-01-23 16:13:22 -08003998#define BNX2_MAC_LOOPBACK 0
3999#define BNX2_PHY_LOOPBACK 1
4000
Michael Chanb6016b72005-05-26 13:03:09 -07004001static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004002bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004003{
4004 unsigned int pkt_size, num_pkts, i;
4005 struct sk_buff *skb, *rx_skb;
4006 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004007 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004008 dma_addr_t map;
4009 struct tx_bd *txbd;
4010 struct sw_bd *rx_buf;
4011 struct l2_fhdr *rx_hdr;
4012 int ret = -ENODEV;
4013
Michael Chanbc5a0692006-01-23 16:13:22 -08004014 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4015 bp->loopback = MAC_LOOPBACK;
4016 bnx2_set_mac_loopback(bp);
4017 }
4018 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004019 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004020 bnx2_set_phy_loopback(bp);
4021 }
4022 else
4023 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004024
4025 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004026 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004027 if (!skb)
4028 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004029 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004030 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004031 memset(packet + 6, 0x0, 8);
4032 for (i = 14; i < pkt_size; i++)
4033 packet[i] = (unsigned char) (i & 0xff);
4034
4035 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4036 PCI_DMA_TODEVICE);
4037
Michael Chanbf5295b2006-03-23 01:11:56 -08004038 REG_WR(bp, BNX2_HC_COMMAND,
4039 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4040
Michael Chanb6016b72005-05-26 13:03:09 -07004041 REG_RD(bp, BNX2_HC_COMMAND);
4042
4043 udelay(5);
4044 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4045
Michael Chanb6016b72005-05-26 13:03:09 -07004046 num_pkts = 0;
4047
Michael Chanbc5a0692006-01-23 16:13:22 -08004048 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004049
4050 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4051 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4052 txbd->tx_bd_mss_nbytes = pkt_size;
4053 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4054
4055 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004056 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4057 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004058
Michael Chan234754d2006-11-19 14:11:41 -08004059 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4060 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004061
4062 udelay(100);
4063
Michael Chanbf5295b2006-03-23 01:11:56 -08004064 REG_WR(bp, BNX2_HC_COMMAND,
4065 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4066
Michael Chanb6016b72005-05-26 13:03:09 -07004067 REG_RD(bp, BNX2_HC_COMMAND);
4068
4069 udelay(5);
4070
4071 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004072 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004073
Michael Chanbc5a0692006-01-23 16:13:22 -08004074 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004075 goto loopback_test_done;
4076 }
4077
4078 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4079 if (rx_idx != rx_start_idx + num_pkts) {
4080 goto loopback_test_done;
4081 }
4082
4083 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4084 rx_skb = rx_buf->skb;
4085
4086 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4087 skb_reserve(rx_skb, bp->rx_offset);
4088
4089 pci_dma_sync_single_for_cpu(bp->pdev,
4090 pci_unmap_addr(rx_buf, mapping),
4091 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4092
Michael Chanade2bfe2006-01-23 16:09:51 -08004093 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004094 (L2_FHDR_ERRORS_BAD_CRC |
4095 L2_FHDR_ERRORS_PHY_DECODE |
4096 L2_FHDR_ERRORS_ALIGNMENT |
4097 L2_FHDR_ERRORS_TOO_SHORT |
4098 L2_FHDR_ERRORS_GIANT_FRAME)) {
4099
4100 goto loopback_test_done;
4101 }
4102
4103 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4104 goto loopback_test_done;
4105 }
4106
4107 for (i = 14; i < pkt_size; i++) {
4108 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4109 goto loopback_test_done;
4110 }
4111 }
4112
4113 ret = 0;
4114
4115loopback_test_done:
4116 bp->loopback = 0;
4117 return ret;
4118}
4119
Michael Chanbc5a0692006-01-23 16:13:22 -08004120#define BNX2_MAC_LOOPBACK_FAILED 1
4121#define BNX2_PHY_LOOPBACK_FAILED 2
4122#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4123 BNX2_PHY_LOOPBACK_FAILED)
4124
4125static int
4126bnx2_test_loopback(struct bnx2 *bp)
4127{
4128 int rc = 0;
4129
4130 if (!netif_running(bp->dev))
4131 return BNX2_LOOPBACK_FAILED;
4132
4133 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4134 spin_lock_bh(&bp->phy_lock);
4135 bnx2_init_phy(bp);
4136 spin_unlock_bh(&bp->phy_lock);
4137 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4138 rc |= BNX2_MAC_LOOPBACK_FAILED;
4139 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4140 rc |= BNX2_PHY_LOOPBACK_FAILED;
4141 return rc;
4142}
4143
Michael Chanb6016b72005-05-26 13:03:09 -07004144#define NVRAM_SIZE 0x200
4145#define CRC32_RESIDUAL 0xdebb20e3
4146
4147static int
4148bnx2_test_nvram(struct bnx2 *bp)
4149{
4150 u32 buf[NVRAM_SIZE / 4];
4151 u8 *data = (u8 *) buf;
4152 int rc = 0;
4153 u32 magic, csum;
4154
4155 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4156 goto test_nvram_done;
4157
4158 magic = be32_to_cpu(buf[0]);
4159 if (magic != 0x669955aa) {
4160 rc = -ENODEV;
4161 goto test_nvram_done;
4162 }
4163
4164 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4165 goto test_nvram_done;
4166
4167 csum = ether_crc_le(0x100, data);
4168 if (csum != CRC32_RESIDUAL) {
4169 rc = -ENODEV;
4170 goto test_nvram_done;
4171 }
4172
4173 csum = ether_crc_le(0x100, data + 0x100);
4174 if (csum != CRC32_RESIDUAL) {
4175 rc = -ENODEV;
4176 }
4177
4178test_nvram_done:
4179 return rc;
4180}
4181
4182static int
4183bnx2_test_link(struct bnx2 *bp)
4184{
4185 u32 bmsr;
4186
Michael Chanc770a652005-08-25 15:38:39 -07004187 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004188 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4189 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004190 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004191
Michael Chanb6016b72005-05-26 13:03:09 -07004192 if (bmsr & BMSR_LSTATUS) {
4193 return 0;
4194 }
4195 return -ENODEV;
4196}
4197
4198static int
4199bnx2_test_intr(struct bnx2 *bp)
4200{
4201 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004202 u16 status_idx;
4203
4204 if (!netif_running(bp->dev))
4205 return -ENODEV;
4206
4207 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4208
4209 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004210 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004211 REG_RD(bp, BNX2_HC_COMMAND);
4212
4213 for (i = 0; i < 10; i++) {
4214 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4215 status_idx) {
4216
4217 break;
4218 }
4219
4220 msleep_interruptible(10);
4221 }
4222 if (i < 10)
4223 return 0;
4224
4225 return -ENODEV;
4226}
4227
4228static void
Michael Chan48b01e22006-11-19 14:08:00 -08004229bnx2_5706_serdes_timer(struct bnx2 *bp)
4230{
4231 spin_lock(&bp->phy_lock);
4232 if (bp->serdes_an_pending)
4233 bp->serdes_an_pending--;
4234 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4235 u32 bmcr;
4236
4237 bp->current_interval = bp->timer_interval;
4238
4239 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4240
4241 if (bmcr & BMCR_ANENABLE) {
4242 u32 phy1, phy2;
4243
4244 bnx2_write_phy(bp, 0x1c, 0x7c00);
4245 bnx2_read_phy(bp, 0x1c, &phy1);
4246
4247 bnx2_write_phy(bp, 0x17, 0x0f01);
4248 bnx2_read_phy(bp, 0x15, &phy2);
4249 bnx2_write_phy(bp, 0x17, 0x0f01);
4250 bnx2_read_phy(bp, 0x15, &phy2);
4251
4252 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4253 !(phy2 & 0x20)) { /* no CONFIG */
4254
4255 bmcr &= ~BMCR_ANENABLE;
4256 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4257 bnx2_write_phy(bp, MII_BMCR, bmcr);
4258 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4259 }
4260 }
4261 }
4262 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4263 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4264 u32 phy2;
4265
4266 bnx2_write_phy(bp, 0x17, 0x0f01);
4267 bnx2_read_phy(bp, 0x15, &phy2);
4268 if (phy2 & 0x20) {
4269 u32 bmcr;
4270
4271 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4272 bmcr |= BMCR_ANENABLE;
4273 bnx2_write_phy(bp, MII_BMCR, bmcr);
4274
4275 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4276 }
4277 } else
4278 bp->current_interval = bp->timer_interval;
4279
4280 spin_unlock(&bp->phy_lock);
4281}
4282
4283static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004284bnx2_5708_serdes_timer(struct bnx2 *bp)
4285{
4286 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4287 bp->serdes_an_pending = 0;
4288 return;
4289 }
4290
4291 spin_lock(&bp->phy_lock);
4292 if (bp->serdes_an_pending)
4293 bp->serdes_an_pending--;
4294 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4295 u32 bmcr;
4296
4297 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4298
4299 if (bmcr & BMCR_ANENABLE) {
4300 bmcr &= ~BMCR_ANENABLE;
4301 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4302 bnx2_write_phy(bp, MII_BMCR, bmcr);
4303 bp->current_interval = SERDES_FORCED_TIMEOUT;
4304 } else {
4305 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4306 bmcr |= BMCR_ANENABLE;
4307 bnx2_write_phy(bp, MII_BMCR, bmcr);
4308 bp->serdes_an_pending = 2;
4309 bp->current_interval = bp->timer_interval;
4310 }
4311
4312 } else
4313 bp->current_interval = bp->timer_interval;
4314
4315 spin_unlock(&bp->phy_lock);
4316}
4317
4318static void
Michael Chanb6016b72005-05-26 13:03:09 -07004319bnx2_timer(unsigned long data)
4320{
4321 struct bnx2 *bp = (struct bnx2 *) data;
4322 u32 msg;
4323
Michael Chancd339a02005-08-25 15:35:24 -07004324 if (!netif_running(bp->dev))
4325 return;
4326
Michael Chanb6016b72005-05-26 13:03:09 -07004327 if (atomic_read(&bp->intr_sem) != 0)
4328 goto bnx2_restart_timer;
4329
4330 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004331 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004332
Michael Chancea94db2006-06-12 22:16:13 -07004333 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4334
Michael Chanf8dd0642006-11-19 14:08:29 -08004335 if (bp->phy_flags & PHY_SERDES_FLAG) {
4336 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4337 bnx2_5706_serdes_timer(bp);
4338 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4339 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004340 }
4341
4342bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004343 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004344}
4345
4346/* Called with rtnl_lock */
4347static int
4348bnx2_open(struct net_device *dev)
4349{
Michael Chan972ec0d2006-01-23 16:12:43 -08004350 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004351 int rc;
4352
Michael Chan1b2f9222007-05-03 13:20:19 -07004353 netif_carrier_off(dev);
4354
Pavel Machek829ca9a2005-09-03 15:56:56 -07004355 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004356 bnx2_disable_int(bp);
4357
4358 rc = bnx2_alloc_mem(bp);
4359 if (rc)
4360 return rc;
4361
4362 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4363 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4364 !disable_msi) {
4365
4366 if (pci_enable_msi(bp->pdev) == 0) {
4367 bp->flags |= USING_MSI_FLAG;
4368 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4369 dev);
4370 }
4371 else {
4372 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004373 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004374 }
4375 }
4376 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004377 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004378 dev->name, dev);
4379 }
4380 if (rc) {
4381 bnx2_free_mem(bp);
4382 return rc;
4383 }
4384
4385 rc = bnx2_init_nic(bp);
4386
4387 if (rc) {
4388 free_irq(bp->pdev->irq, dev);
4389 if (bp->flags & USING_MSI_FLAG) {
4390 pci_disable_msi(bp->pdev);
4391 bp->flags &= ~USING_MSI_FLAG;
4392 }
4393 bnx2_free_skbs(bp);
4394 bnx2_free_mem(bp);
4395 return rc;
4396 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004397
Michael Chancd339a02005-08-25 15:35:24 -07004398 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004399
4400 atomic_set(&bp->intr_sem, 0);
4401
4402 bnx2_enable_int(bp);
4403
4404 if (bp->flags & USING_MSI_FLAG) {
4405 /* Test MSI to make sure it is working
4406 * If MSI test fails, go back to INTx mode
4407 */
4408 if (bnx2_test_intr(bp) != 0) {
4409 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4410 " using MSI, switching to INTx mode. Please"
4411 " report this failure to the PCI maintainer"
4412 " and include system chipset information.\n",
4413 bp->dev->name);
4414
4415 bnx2_disable_int(bp);
4416 free_irq(bp->pdev->irq, dev);
4417 pci_disable_msi(bp->pdev);
4418 bp->flags &= ~USING_MSI_FLAG;
4419
4420 rc = bnx2_init_nic(bp);
4421
4422 if (!rc) {
4423 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004424 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004425 }
4426 if (rc) {
4427 bnx2_free_skbs(bp);
4428 bnx2_free_mem(bp);
4429 del_timer_sync(&bp->timer);
4430 return rc;
4431 }
4432 bnx2_enable_int(bp);
4433 }
4434 }
4435 if (bp->flags & USING_MSI_FLAG) {
4436 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4437 }
4438
4439 netif_start_queue(dev);
4440
4441 return 0;
4442}
4443
4444static void
David Howellsc4028952006-11-22 14:57:56 +00004445bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004446{
David Howellsc4028952006-11-22 14:57:56 +00004447 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004448
Michael Chanafdc08b2005-08-25 15:34:29 -07004449 if (!netif_running(bp->dev))
4450 return;
4451
4452 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004453 bnx2_netif_stop(bp);
4454
4455 bnx2_init_nic(bp);
4456
4457 atomic_set(&bp->intr_sem, 1);
4458 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004459 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004460}
4461
4462static void
4463bnx2_tx_timeout(struct net_device *dev)
4464{
Michael Chan972ec0d2006-01-23 16:12:43 -08004465 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004466
4467 /* This allows the netif to be shutdown gracefully before resetting */
4468 schedule_work(&bp->reset_task);
4469}
4470
4471#ifdef BCM_VLAN
4472/* Called with rtnl_lock */
4473static void
4474bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4475{
Michael Chan972ec0d2006-01-23 16:12:43 -08004476 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004477
4478 bnx2_netif_stop(bp);
4479
4480 bp->vlgrp = vlgrp;
4481 bnx2_set_rx_mode(dev);
4482
4483 bnx2_netif_start(bp);
4484}
4485
4486/* Called with rtnl_lock */
4487static void
4488bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4489{
Michael Chan972ec0d2006-01-23 16:12:43 -08004490 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004491
4492 bnx2_netif_stop(bp);
Dan Aloni5c15bde2007-03-02 20:44:51 -08004493 vlan_group_set_device(bp->vlgrp, vid, NULL);
Michael Chanb6016b72005-05-26 13:03:09 -07004494 bnx2_set_rx_mode(dev);
4495
4496 bnx2_netif_start(bp);
4497}
4498#endif
4499
Herbert Xu932ff272006-06-09 12:20:56 -07004500/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004501 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4502 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004503 */
4504static int
4505bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4506{
Michael Chan972ec0d2006-01-23 16:12:43 -08004507 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004508 dma_addr_t mapping;
4509 struct tx_bd *txbd;
4510 struct sw_bd *tx_buf;
4511 u32 len, vlan_tag_flags, last_frag, mss;
4512 u16 prod, ring_prod;
4513 int i;
4514
Michael Chane89bbf12005-08-25 15:36:58 -07004515 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004516 netif_stop_queue(dev);
4517 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4518 dev->name);
4519
4520 return NETDEV_TX_BUSY;
4521 }
4522 len = skb_headlen(skb);
4523 prod = bp->tx_prod;
4524 ring_prod = TX_RING_IDX(prod);
4525
4526 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004527 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004528 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4529 }
4530
4531 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4532 vlan_tag_flags |=
4533 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4534 }
Herbert Xu79671682006-06-22 02:40:14 -07004535 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004536 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4537 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004538 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004539
4540 if (skb_header_cloned(skb) &&
4541 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4542 dev_kfree_skb(skb);
4543 return NETDEV_TX_OK;
4544 }
4545
Michael Chanb6016b72005-05-26 13:03:09 -07004546 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4547
4548 tcp_opt_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004549 if (tcp_hdr(skb)->doff > 5)
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004550 tcp_opt_len = tcp_optlen(skb);
4551
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004552 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb6016b72005-05-26 13:03:09 -07004553
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004554 iph = ip_hdr(skb);
4555 iph->check = 0;
4556 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004557 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4558 iph->daddr, 0,
4559 IPPROTO_TCP, 0);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004560 if (tcp_opt_len || (iph->ihl > 5)) {
4561 vlan_tag_flags |= ((iph->ihl - 5) +
4562 (tcp_opt_len >> 2)) << 8;
Michael Chanb6016b72005-05-26 13:03:09 -07004563 }
4564 }
4565 else
Michael Chanb6016b72005-05-26 13:03:09 -07004566 {
4567 mss = 0;
4568 }
4569
4570 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004571
Michael Chanb6016b72005-05-26 13:03:09 -07004572 tx_buf = &bp->tx_buf_ring[ring_prod];
4573 tx_buf->skb = skb;
4574 pci_unmap_addr_set(tx_buf, mapping, mapping);
4575
4576 txbd = &bp->tx_desc_ring[ring_prod];
4577
4578 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4579 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4580 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4581 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4582
4583 last_frag = skb_shinfo(skb)->nr_frags;
4584
4585 for (i = 0; i < last_frag; i++) {
4586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4587
4588 prod = NEXT_TX_BD(prod);
4589 ring_prod = TX_RING_IDX(prod);
4590 txbd = &bp->tx_desc_ring[ring_prod];
4591
4592 len = frag->size;
4593 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4594 len, PCI_DMA_TODEVICE);
4595 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4596 mapping, mapping);
4597
4598 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4599 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4600 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4601 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4602
4603 }
4604 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4605
4606 prod = NEXT_TX_BD(prod);
4607 bp->tx_prod_bseq += skb->len;
4608
Michael Chan234754d2006-11-19 14:11:41 -08004609 REG_WR16(bp, bp->tx_bidx_addr, prod);
4610 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004611
4612 mmiowb();
4613
4614 bp->tx_prod = prod;
4615 dev->trans_start = jiffies;
4616
Michael Chane89bbf12005-08-25 15:36:58 -07004617 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004618 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004619 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004620 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004621 }
4622
4623 return NETDEV_TX_OK;
4624}
4625
4626/* Called with rtnl_lock */
4627static int
4628bnx2_close(struct net_device *dev)
4629{
Michael Chan972ec0d2006-01-23 16:12:43 -08004630 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004631 u32 reset_code;
4632
Michael Chanafdc08b2005-08-25 15:34:29 -07004633 /* Calling flush_scheduled_work() may deadlock because
4634 * linkwatch_event() may be on the workqueue and it will try to get
4635 * the rtnl_lock which we are holding.
4636 */
4637 while (bp->in_reset_task)
4638 msleep(1);
4639
Michael Chanb6016b72005-05-26 13:03:09 -07004640 bnx2_netif_stop(bp);
4641 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004642 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004643 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004644 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004645 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4646 else
4647 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4648 bnx2_reset_chip(bp, reset_code);
4649 free_irq(bp->pdev->irq, dev);
4650 if (bp->flags & USING_MSI_FLAG) {
4651 pci_disable_msi(bp->pdev);
4652 bp->flags &= ~USING_MSI_FLAG;
4653 }
4654 bnx2_free_skbs(bp);
4655 bnx2_free_mem(bp);
4656 bp->link_up = 0;
4657 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004658 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004659 return 0;
4660}
4661
4662#define GET_NET_STATS64(ctr) \
4663 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4664 (unsigned long) (ctr##_lo)
4665
4666#define GET_NET_STATS32(ctr) \
4667 (ctr##_lo)
4668
4669#if (BITS_PER_LONG == 64)
4670#define GET_NET_STATS GET_NET_STATS64
4671#else
4672#define GET_NET_STATS GET_NET_STATS32
4673#endif
4674
4675static struct net_device_stats *
4676bnx2_get_stats(struct net_device *dev)
4677{
Michael Chan972ec0d2006-01-23 16:12:43 -08004678 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004679 struct statistics_block *stats_blk = bp->stats_blk;
4680 struct net_device_stats *net_stats = &bp->net_stats;
4681
4682 if (bp->stats_blk == NULL) {
4683 return net_stats;
4684 }
4685 net_stats->rx_packets =
4686 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4687 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4688 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4689
4690 net_stats->tx_packets =
4691 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4692 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4693 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4694
4695 net_stats->rx_bytes =
4696 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4697
4698 net_stats->tx_bytes =
4699 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4700
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004701 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004702 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4703
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004704 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004705 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4706
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004707 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004708 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4709 stats_blk->stat_EtherStatsOverrsizePkts);
4710
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004711 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004712 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4713
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004714 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004715 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4716
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004717 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004718 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4719
4720 net_stats->rx_errors = net_stats->rx_length_errors +
4721 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4722 net_stats->rx_crc_errors;
4723
4724 net_stats->tx_aborted_errors =
4725 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4726 stats_blk->stat_Dot3StatsLateCollisions);
4727
Michael Chan5b0c76a2005-11-04 08:45:49 -08004728 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4729 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004730 net_stats->tx_carrier_errors = 0;
4731 else {
4732 net_stats->tx_carrier_errors =
4733 (unsigned long)
4734 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4735 }
4736
4737 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004738 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004739 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4740 +
4741 net_stats->tx_aborted_errors +
4742 net_stats->tx_carrier_errors;
4743
Michael Chancea94db2006-06-12 22:16:13 -07004744 net_stats->rx_missed_errors =
4745 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4746 stats_blk->stat_FwRxDrop);
4747
Michael Chanb6016b72005-05-26 13:03:09 -07004748 return net_stats;
4749}
4750
4751/* All ethtool functions called with rtnl_lock */
4752
4753static int
4754bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4755{
Michael Chan972ec0d2006-01-23 16:12:43 -08004756 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004757
4758 cmd->supported = SUPPORTED_Autoneg;
4759 if (bp->phy_flags & PHY_SERDES_FLAG) {
4760 cmd->supported |= SUPPORTED_1000baseT_Full |
4761 SUPPORTED_FIBRE;
4762
4763 cmd->port = PORT_FIBRE;
4764 }
4765 else {
4766 cmd->supported |= SUPPORTED_10baseT_Half |
4767 SUPPORTED_10baseT_Full |
4768 SUPPORTED_100baseT_Half |
4769 SUPPORTED_100baseT_Full |
4770 SUPPORTED_1000baseT_Full |
4771 SUPPORTED_TP;
4772
4773 cmd->port = PORT_TP;
4774 }
4775
4776 cmd->advertising = bp->advertising;
4777
4778 if (bp->autoneg & AUTONEG_SPEED) {
4779 cmd->autoneg = AUTONEG_ENABLE;
4780 }
4781 else {
4782 cmd->autoneg = AUTONEG_DISABLE;
4783 }
4784
4785 if (netif_carrier_ok(dev)) {
4786 cmd->speed = bp->line_speed;
4787 cmd->duplex = bp->duplex;
4788 }
4789 else {
4790 cmd->speed = -1;
4791 cmd->duplex = -1;
4792 }
4793
4794 cmd->transceiver = XCVR_INTERNAL;
4795 cmd->phy_address = bp->phy_addr;
4796
4797 return 0;
4798}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004799
Michael Chanb6016b72005-05-26 13:03:09 -07004800static int
4801bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4802{
Michael Chan972ec0d2006-01-23 16:12:43 -08004803 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004804 u8 autoneg = bp->autoneg;
4805 u8 req_duplex = bp->req_duplex;
4806 u16 req_line_speed = bp->req_line_speed;
4807 u32 advertising = bp->advertising;
4808
4809 if (cmd->autoneg == AUTONEG_ENABLE) {
4810 autoneg |= AUTONEG_SPEED;
4811
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004812 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004813
4814 /* allow advertising 1 speed */
4815 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4816 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4817 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4818 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4819
4820 if (bp->phy_flags & PHY_SERDES_FLAG)
4821 return -EINVAL;
4822
4823 advertising = cmd->advertising;
4824
4825 }
4826 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4827 advertising = cmd->advertising;
4828 }
4829 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4830 return -EINVAL;
4831 }
4832 else {
4833 if (bp->phy_flags & PHY_SERDES_FLAG) {
4834 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4835 }
4836 else {
4837 advertising = ETHTOOL_ALL_COPPER_SPEED;
4838 }
4839 }
4840 advertising |= ADVERTISED_Autoneg;
4841 }
4842 else {
4843 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004844 if ((cmd->speed != SPEED_1000 &&
4845 cmd->speed != SPEED_2500) ||
4846 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004847 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004848
4849 if (cmd->speed == SPEED_2500 &&
4850 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4851 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004852 }
4853 else if (cmd->speed == SPEED_1000) {
4854 return -EINVAL;
4855 }
4856 autoneg &= ~AUTONEG_SPEED;
4857 req_line_speed = cmd->speed;
4858 req_duplex = cmd->duplex;
4859 advertising = 0;
4860 }
4861
4862 bp->autoneg = autoneg;
4863 bp->advertising = advertising;
4864 bp->req_line_speed = req_line_speed;
4865 bp->req_duplex = req_duplex;
4866
Michael Chanc770a652005-08-25 15:38:39 -07004867 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004868
4869 bnx2_setup_phy(bp);
4870
Michael Chanc770a652005-08-25 15:38:39 -07004871 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004872
4873 return 0;
4874}
4875
4876static void
4877bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4878{
Michael Chan972ec0d2006-01-23 16:12:43 -08004879 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004880
4881 strcpy(info->driver, DRV_MODULE_NAME);
4882 strcpy(info->version, DRV_MODULE_VERSION);
4883 strcpy(info->bus_info, pci_name(bp->pdev));
4884 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4885 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4886 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004887 info->fw_version[1] = info->fw_version[3] = '.';
4888 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004889}
4890
Michael Chan244ac4f2006-03-20 17:48:46 -08004891#define BNX2_REGDUMP_LEN (32 * 1024)
4892
4893static int
4894bnx2_get_regs_len(struct net_device *dev)
4895{
4896 return BNX2_REGDUMP_LEN;
4897}
4898
4899static void
4900bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4901{
4902 u32 *p = _p, i, offset;
4903 u8 *orig_p = _p;
4904 struct bnx2 *bp = netdev_priv(dev);
4905 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4906 0x0800, 0x0880, 0x0c00, 0x0c10,
4907 0x0c30, 0x0d08, 0x1000, 0x101c,
4908 0x1040, 0x1048, 0x1080, 0x10a4,
4909 0x1400, 0x1490, 0x1498, 0x14f0,
4910 0x1500, 0x155c, 0x1580, 0x15dc,
4911 0x1600, 0x1658, 0x1680, 0x16d8,
4912 0x1800, 0x1820, 0x1840, 0x1854,
4913 0x1880, 0x1894, 0x1900, 0x1984,
4914 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4915 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4916 0x2000, 0x2030, 0x23c0, 0x2400,
4917 0x2800, 0x2820, 0x2830, 0x2850,
4918 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4919 0x3c00, 0x3c94, 0x4000, 0x4010,
4920 0x4080, 0x4090, 0x43c0, 0x4458,
4921 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4922 0x4fc0, 0x5010, 0x53c0, 0x5444,
4923 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4924 0x5fc0, 0x6000, 0x6400, 0x6428,
4925 0x6800, 0x6848, 0x684c, 0x6860,
4926 0x6888, 0x6910, 0x8000 };
4927
4928 regs->version = 0;
4929
4930 memset(p, 0, BNX2_REGDUMP_LEN);
4931
4932 if (!netif_running(bp->dev))
4933 return;
4934
4935 i = 0;
4936 offset = reg_boundaries[0];
4937 p += offset;
4938 while (offset < BNX2_REGDUMP_LEN) {
4939 *p++ = REG_RD(bp, offset);
4940 offset += 4;
4941 if (offset == reg_boundaries[i + 1]) {
4942 offset = reg_boundaries[i + 2];
4943 p = (u32 *) (orig_p + offset);
4944 i += 2;
4945 }
4946 }
4947}
4948
Michael Chanb6016b72005-05-26 13:03:09 -07004949static void
4950bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4951{
Michael Chan972ec0d2006-01-23 16:12:43 -08004952 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004953
4954 if (bp->flags & NO_WOL_FLAG) {
4955 wol->supported = 0;
4956 wol->wolopts = 0;
4957 }
4958 else {
4959 wol->supported = WAKE_MAGIC;
4960 if (bp->wol)
4961 wol->wolopts = WAKE_MAGIC;
4962 else
4963 wol->wolopts = 0;
4964 }
4965 memset(&wol->sopass, 0, sizeof(wol->sopass));
4966}
4967
4968static int
4969bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4970{
Michael Chan972ec0d2006-01-23 16:12:43 -08004971 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004972
4973 if (wol->wolopts & ~WAKE_MAGIC)
4974 return -EINVAL;
4975
4976 if (wol->wolopts & WAKE_MAGIC) {
4977 if (bp->flags & NO_WOL_FLAG)
4978 return -EINVAL;
4979
4980 bp->wol = 1;
4981 }
4982 else {
4983 bp->wol = 0;
4984 }
4985 return 0;
4986}
4987
4988static int
4989bnx2_nway_reset(struct net_device *dev)
4990{
Michael Chan972ec0d2006-01-23 16:12:43 -08004991 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004992 u32 bmcr;
4993
4994 if (!(bp->autoneg & AUTONEG_SPEED)) {
4995 return -EINVAL;
4996 }
4997
Michael Chanc770a652005-08-25 15:38:39 -07004998 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004999
5000 /* Force a link down visible on the other side */
5001 if (bp->phy_flags & PHY_SERDES_FLAG) {
5002 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005003 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005004
5005 msleep(20);
5006
Michael Chanc770a652005-08-25 15:38:39 -07005007 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005008
5009 bp->current_interval = SERDES_AN_TIMEOUT;
5010 bp->serdes_an_pending = 1;
5011 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005012 }
5013
5014 bnx2_read_phy(bp, MII_BMCR, &bmcr);
5015 bmcr &= ~BMCR_LOOPBACK;
5016 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5017
Michael Chanc770a652005-08-25 15:38:39 -07005018 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005019
5020 return 0;
5021}
5022
5023static int
5024bnx2_get_eeprom_len(struct net_device *dev)
5025{
Michael Chan972ec0d2006-01-23 16:12:43 -08005026 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005027
Michael Chan1122db72006-01-23 16:11:42 -08005028 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005029 return 0;
5030
Michael Chan1122db72006-01-23 16:11:42 -08005031 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005032}
5033
5034static int
5035bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5036 u8 *eebuf)
5037{
Michael Chan972ec0d2006-01-23 16:12:43 -08005038 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005039 int rc;
5040
John W. Linville1064e942005-11-10 12:58:24 -08005041 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005042
5043 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5044
5045 return rc;
5046}
5047
5048static int
5049bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5050 u8 *eebuf)
5051{
Michael Chan972ec0d2006-01-23 16:12:43 -08005052 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005053 int rc;
5054
John W. Linville1064e942005-11-10 12:58:24 -08005055 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005056
5057 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5058
5059 return rc;
5060}
5061
5062static int
5063bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5064{
Michael Chan972ec0d2006-01-23 16:12:43 -08005065 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005066
5067 memset(coal, 0, sizeof(struct ethtool_coalesce));
5068
5069 coal->rx_coalesce_usecs = bp->rx_ticks;
5070 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5071 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5072 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5073
5074 coal->tx_coalesce_usecs = bp->tx_ticks;
5075 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5076 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5077 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5078
5079 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5080
5081 return 0;
5082}
5083
5084static int
5085bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5086{
Michael Chan972ec0d2006-01-23 16:12:43 -08005087 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005088
5089 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5090 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5091
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005092 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005093 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5094
5095 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5096 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5097
5098 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5099 if (bp->rx_quick_cons_trip_int > 0xff)
5100 bp->rx_quick_cons_trip_int = 0xff;
5101
5102 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5103 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5104
5105 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5106 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5107
5108 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5109 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5110
5111 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5112 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5113 0xff;
5114
5115 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5116 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5117 bp->stats_ticks &= 0xffff00;
5118
5119 if (netif_running(bp->dev)) {
5120 bnx2_netif_stop(bp);
5121 bnx2_init_nic(bp);
5122 bnx2_netif_start(bp);
5123 }
5124
5125 return 0;
5126}
5127
5128static void
5129bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5130{
Michael Chan972ec0d2006-01-23 16:12:43 -08005131 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005132
Michael Chan13daffa2006-03-20 17:49:20 -08005133 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005134 ering->rx_mini_max_pending = 0;
5135 ering->rx_jumbo_max_pending = 0;
5136
5137 ering->rx_pending = bp->rx_ring_size;
5138 ering->rx_mini_pending = 0;
5139 ering->rx_jumbo_pending = 0;
5140
5141 ering->tx_max_pending = MAX_TX_DESC_CNT;
5142 ering->tx_pending = bp->tx_ring_size;
5143}
5144
5145static int
5146bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5147{
Michael Chan972ec0d2006-01-23 16:12:43 -08005148 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005149
Michael Chan13daffa2006-03-20 17:49:20 -08005150 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005151 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5152 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5153
5154 return -EINVAL;
5155 }
Michael Chan13daffa2006-03-20 17:49:20 -08005156 if (netif_running(bp->dev)) {
5157 bnx2_netif_stop(bp);
5158 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5159 bnx2_free_skbs(bp);
5160 bnx2_free_mem(bp);
5161 }
5162
5163 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005164 bp->tx_ring_size = ering->tx_pending;
5165
5166 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005167 int rc;
5168
5169 rc = bnx2_alloc_mem(bp);
5170 if (rc)
5171 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005172 bnx2_init_nic(bp);
5173 bnx2_netif_start(bp);
5174 }
5175
5176 return 0;
5177}
5178
5179static void
5180bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5181{
Michael Chan972ec0d2006-01-23 16:12:43 -08005182 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005183
5184 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5185 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5186 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5187}
5188
5189static int
5190bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5191{
Michael Chan972ec0d2006-01-23 16:12:43 -08005192 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005193
5194 bp->req_flow_ctrl = 0;
5195 if (epause->rx_pause)
5196 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5197 if (epause->tx_pause)
5198 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5199
5200 if (epause->autoneg) {
5201 bp->autoneg |= AUTONEG_FLOW_CTRL;
5202 }
5203 else {
5204 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5205 }
5206
Michael Chanc770a652005-08-25 15:38:39 -07005207 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005208
5209 bnx2_setup_phy(bp);
5210
Michael Chanc770a652005-08-25 15:38:39 -07005211 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005212
5213 return 0;
5214}
5215
5216static u32
5217bnx2_get_rx_csum(struct net_device *dev)
5218{
Michael Chan972ec0d2006-01-23 16:12:43 -08005219 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005220
5221 return bp->rx_csum;
5222}
5223
5224static int
5225bnx2_set_rx_csum(struct net_device *dev, u32 data)
5226{
Michael Chan972ec0d2006-01-23 16:12:43 -08005227 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005228
5229 bp->rx_csum = data;
5230 return 0;
5231}
5232
Michael Chanb11d6212006-06-29 12:31:21 -07005233static int
5234bnx2_set_tso(struct net_device *dev, u32 data)
5235{
5236 if (data)
5237 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5238 else
5239 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5240 return 0;
5241}
5242
Michael Chancea94db2006-06-12 22:16:13 -07005243#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005244
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005245static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005246 char string[ETH_GSTRING_LEN];
5247} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5248 { "rx_bytes" },
5249 { "rx_error_bytes" },
5250 { "tx_bytes" },
5251 { "tx_error_bytes" },
5252 { "rx_ucast_packets" },
5253 { "rx_mcast_packets" },
5254 { "rx_bcast_packets" },
5255 { "tx_ucast_packets" },
5256 { "tx_mcast_packets" },
5257 { "tx_bcast_packets" },
5258 { "tx_mac_errors" },
5259 { "tx_carrier_errors" },
5260 { "rx_crc_errors" },
5261 { "rx_align_errors" },
5262 { "tx_single_collisions" },
5263 { "tx_multi_collisions" },
5264 { "tx_deferred" },
5265 { "tx_excess_collisions" },
5266 { "tx_late_collisions" },
5267 { "tx_total_collisions" },
5268 { "rx_fragments" },
5269 { "rx_jabbers" },
5270 { "rx_undersize_packets" },
5271 { "rx_oversize_packets" },
5272 { "rx_64_byte_packets" },
5273 { "rx_65_to_127_byte_packets" },
5274 { "rx_128_to_255_byte_packets" },
5275 { "rx_256_to_511_byte_packets" },
5276 { "rx_512_to_1023_byte_packets" },
5277 { "rx_1024_to_1522_byte_packets" },
5278 { "rx_1523_to_9022_byte_packets" },
5279 { "tx_64_byte_packets" },
5280 { "tx_65_to_127_byte_packets" },
5281 { "tx_128_to_255_byte_packets" },
5282 { "tx_256_to_511_byte_packets" },
5283 { "tx_512_to_1023_byte_packets" },
5284 { "tx_1024_to_1522_byte_packets" },
5285 { "tx_1523_to_9022_byte_packets" },
5286 { "rx_xon_frames" },
5287 { "rx_xoff_frames" },
5288 { "tx_xon_frames" },
5289 { "tx_xoff_frames" },
5290 { "rx_mac_ctrl_frames" },
5291 { "rx_filtered_packets" },
5292 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005293 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005294};
5295
5296#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5297
Arjan van de Venf71e1302006-03-03 21:33:57 -05005298static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005299 STATS_OFFSET32(stat_IfHCInOctets_hi),
5300 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5301 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5302 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5303 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5304 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5305 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5306 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5307 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5308 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5309 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005310 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5311 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5312 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5313 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5314 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5315 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5316 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5317 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5318 STATS_OFFSET32(stat_EtherStatsCollisions),
5319 STATS_OFFSET32(stat_EtherStatsFragments),
5320 STATS_OFFSET32(stat_EtherStatsJabbers),
5321 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5322 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5323 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5324 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5325 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5326 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5327 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5328 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5329 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5330 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5331 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5332 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5333 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5334 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5335 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5336 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5337 STATS_OFFSET32(stat_XonPauseFramesReceived),
5338 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5339 STATS_OFFSET32(stat_OutXonSent),
5340 STATS_OFFSET32(stat_OutXoffSent),
5341 STATS_OFFSET32(stat_MacControlFramesReceived),
5342 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5343 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005344 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005345};
5346
5347/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5348 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005349 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005350static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005351 8,0,8,8,8,8,8,8,8,8,
5352 4,0,4,4,4,4,4,4,4,4,
5353 4,4,4,4,4,4,4,4,4,4,
5354 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005355 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005356};
5357
Michael Chan5b0c76a2005-11-04 08:45:49 -08005358static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5359 8,0,8,8,8,8,8,8,8,8,
5360 4,4,4,4,4,4,4,4,4,4,
5361 4,4,4,4,4,4,4,4,4,4,
5362 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005363 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005364};
5365
Michael Chanb6016b72005-05-26 13:03:09 -07005366#define BNX2_NUM_TESTS 6
5367
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005368static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005369 char string[ETH_GSTRING_LEN];
5370} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5371 { "register_test (offline)" },
5372 { "memory_test (offline)" },
5373 { "loopback_test (offline)" },
5374 { "nvram_test (online)" },
5375 { "interrupt_test (online)" },
5376 { "link_test (online)" },
5377};
5378
5379static int
5380bnx2_self_test_count(struct net_device *dev)
5381{
5382 return BNX2_NUM_TESTS;
5383}
5384
5385static void
5386bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5387{
Michael Chan972ec0d2006-01-23 16:12:43 -08005388 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005389
5390 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5391 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005392 int i;
5393
Michael Chanb6016b72005-05-26 13:03:09 -07005394 bnx2_netif_stop(bp);
5395 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5396 bnx2_free_skbs(bp);
5397
5398 if (bnx2_test_registers(bp) != 0) {
5399 buf[0] = 1;
5400 etest->flags |= ETH_TEST_FL_FAILED;
5401 }
5402 if (bnx2_test_memory(bp) != 0) {
5403 buf[1] = 1;
5404 etest->flags |= ETH_TEST_FL_FAILED;
5405 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005406 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005407 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005408
5409 if (!netif_running(bp->dev)) {
5410 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5411 }
5412 else {
5413 bnx2_init_nic(bp);
5414 bnx2_netif_start(bp);
5415 }
5416
5417 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005418 for (i = 0; i < 7; i++) {
5419 if (bp->link_up)
5420 break;
5421 msleep_interruptible(1000);
5422 }
Michael Chanb6016b72005-05-26 13:03:09 -07005423 }
5424
5425 if (bnx2_test_nvram(bp) != 0) {
5426 buf[3] = 1;
5427 etest->flags |= ETH_TEST_FL_FAILED;
5428 }
5429 if (bnx2_test_intr(bp) != 0) {
5430 buf[4] = 1;
5431 etest->flags |= ETH_TEST_FL_FAILED;
5432 }
5433
5434 if (bnx2_test_link(bp) != 0) {
5435 buf[5] = 1;
5436 etest->flags |= ETH_TEST_FL_FAILED;
5437
5438 }
5439}
5440
5441static void
5442bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5443{
5444 switch (stringset) {
5445 case ETH_SS_STATS:
5446 memcpy(buf, bnx2_stats_str_arr,
5447 sizeof(bnx2_stats_str_arr));
5448 break;
5449 case ETH_SS_TEST:
5450 memcpy(buf, bnx2_tests_str_arr,
5451 sizeof(bnx2_tests_str_arr));
5452 break;
5453 }
5454}
5455
5456static int
5457bnx2_get_stats_count(struct net_device *dev)
5458{
5459 return BNX2_NUM_STATS;
5460}
5461
5462static void
5463bnx2_get_ethtool_stats(struct net_device *dev,
5464 struct ethtool_stats *stats, u64 *buf)
5465{
Michael Chan972ec0d2006-01-23 16:12:43 -08005466 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005467 int i;
5468 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005469 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005470
5471 if (hw_stats == NULL) {
5472 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5473 return;
5474 }
5475
Michael Chan5b0c76a2005-11-04 08:45:49 -08005476 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5477 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5478 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5479 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005480 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005481 else
5482 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005483
5484 for (i = 0; i < BNX2_NUM_STATS; i++) {
5485 if (stats_len_arr[i] == 0) {
5486 /* skip this counter */
5487 buf[i] = 0;
5488 continue;
5489 }
5490 if (stats_len_arr[i] == 4) {
5491 /* 4-byte counter */
5492 buf[i] = (u64)
5493 *(hw_stats + bnx2_stats_offset_arr[i]);
5494 continue;
5495 }
5496 /* 8-byte counter */
5497 buf[i] = (((u64) *(hw_stats +
5498 bnx2_stats_offset_arr[i])) << 32) +
5499 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5500 }
5501}
5502
5503static int
5504bnx2_phys_id(struct net_device *dev, u32 data)
5505{
Michael Chan972ec0d2006-01-23 16:12:43 -08005506 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005507 int i;
5508 u32 save;
5509
5510 if (data == 0)
5511 data = 2;
5512
5513 save = REG_RD(bp, BNX2_MISC_CFG);
5514 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5515
5516 for (i = 0; i < (data * 2); i++) {
5517 if ((i % 2) == 0) {
5518 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5519 }
5520 else {
5521 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5522 BNX2_EMAC_LED_1000MB_OVERRIDE |
5523 BNX2_EMAC_LED_100MB_OVERRIDE |
5524 BNX2_EMAC_LED_10MB_OVERRIDE |
5525 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5526 BNX2_EMAC_LED_TRAFFIC);
5527 }
5528 msleep_interruptible(500);
5529 if (signal_pending(current))
5530 break;
5531 }
5532 REG_WR(bp, BNX2_EMAC_LED, 0);
5533 REG_WR(bp, BNX2_MISC_CFG, save);
5534 return 0;
5535}
5536
Jeff Garzik7282d492006-09-13 14:30:00 -04005537static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005538 .get_settings = bnx2_get_settings,
5539 .set_settings = bnx2_set_settings,
5540 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005541 .get_regs_len = bnx2_get_regs_len,
5542 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005543 .get_wol = bnx2_get_wol,
5544 .set_wol = bnx2_set_wol,
5545 .nway_reset = bnx2_nway_reset,
5546 .get_link = ethtool_op_get_link,
5547 .get_eeprom_len = bnx2_get_eeprom_len,
5548 .get_eeprom = bnx2_get_eeprom,
5549 .set_eeprom = bnx2_set_eeprom,
5550 .get_coalesce = bnx2_get_coalesce,
5551 .set_coalesce = bnx2_set_coalesce,
5552 .get_ringparam = bnx2_get_ringparam,
5553 .set_ringparam = bnx2_set_ringparam,
5554 .get_pauseparam = bnx2_get_pauseparam,
5555 .set_pauseparam = bnx2_set_pauseparam,
5556 .get_rx_csum = bnx2_get_rx_csum,
5557 .set_rx_csum = bnx2_set_rx_csum,
5558 .get_tx_csum = ethtool_op_get_tx_csum,
5559 .set_tx_csum = ethtool_op_set_tx_csum,
5560 .get_sg = ethtool_op_get_sg,
5561 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005562 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005563 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005564 .self_test_count = bnx2_self_test_count,
5565 .self_test = bnx2_self_test,
5566 .get_strings = bnx2_get_strings,
5567 .phys_id = bnx2_phys_id,
5568 .get_stats_count = bnx2_get_stats_count,
5569 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005570 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005571};
5572
5573/* Called with rtnl_lock */
5574static int
5575bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5576{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005577 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005578 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005579 int err;
5580
5581 switch(cmd) {
5582 case SIOCGMIIPHY:
5583 data->phy_id = bp->phy_addr;
5584
5585 /* fallthru */
5586 case SIOCGMIIREG: {
5587 u32 mii_regval;
5588
Michael Chandad3e452007-05-03 13:18:03 -07005589 if (!netif_running(dev))
5590 return -EAGAIN;
5591
Michael Chanc770a652005-08-25 15:38:39 -07005592 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005593 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005594 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005595
5596 data->val_out = mii_regval;
5597
5598 return err;
5599 }
5600
5601 case SIOCSMIIREG:
5602 if (!capable(CAP_NET_ADMIN))
5603 return -EPERM;
5604
Michael Chandad3e452007-05-03 13:18:03 -07005605 if (!netif_running(dev))
5606 return -EAGAIN;
5607
Michael Chanc770a652005-08-25 15:38:39 -07005608 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005609 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005610 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005611
5612 return err;
5613
5614 default:
5615 /* do nothing */
5616 break;
5617 }
5618 return -EOPNOTSUPP;
5619}
5620
5621/* Called with rtnl_lock */
5622static int
5623bnx2_change_mac_addr(struct net_device *dev, void *p)
5624{
5625 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005626 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005627
Michael Chan73eef4c2005-08-25 15:39:15 -07005628 if (!is_valid_ether_addr(addr->sa_data))
5629 return -EINVAL;
5630
Michael Chanb6016b72005-05-26 13:03:09 -07005631 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5632 if (netif_running(dev))
5633 bnx2_set_mac_addr(bp);
5634
5635 return 0;
5636}
5637
5638/* Called with rtnl_lock */
5639static int
5640bnx2_change_mtu(struct net_device *dev, int new_mtu)
5641{
Michael Chan972ec0d2006-01-23 16:12:43 -08005642 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005643
5644 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5645 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5646 return -EINVAL;
5647
5648 dev->mtu = new_mtu;
5649 if (netif_running(dev)) {
5650 bnx2_netif_stop(bp);
5651
5652 bnx2_init_nic(bp);
5653
5654 bnx2_netif_start(bp);
5655 }
5656 return 0;
5657}
5658
5659#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5660static void
5661poll_bnx2(struct net_device *dev)
5662{
Michael Chan972ec0d2006-01-23 16:12:43 -08005663 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005664
5665 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005666 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005667 enable_irq(bp->pdev->irq);
5668}
5669#endif
5670
Michael Chan253c8b72007-01-08 19:56:01 -08005671static void __devinit
5672bnx2_get_5709_media(struct bnx2 *bp)
5673{
5674 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5675 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5676 u32 strap;
5677
5678 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5679 return;
5680 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5681 bp->phy_flags |= PHY_SERDES_FLAG;
5682 return;
5683 }
5684
5685 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5686 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5687 else
5688 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5689
5690 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5691 switch (strap) {
5692 case 0x4:
5693 case 0x5:
5694 case 0x6:
5695 bp->phy_flags |= PHY_SERDES_FLAG;
5696 return;
5697 }
5698 } else {
5699 switch (strap) {
5700 case 0x1:
5701 case 0x2:
5702 case 0x4:
5703 bp->phy_flags |= PHY_SERDES_FLAG;
5704 return;
5705 }
5706 }
5707}
5708
Michael Chanb6016b72005-05-26 13:03:09 -07005709static int __devinit
5710bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5711{
5712 struct bnx2 *bp;
5713 unsigned long mem_len;
5714 int rc;
5715 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07005716 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07005717
5718 SET_MODULE_OWNER(dev);
5719 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005720 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005721
5722 bp->flags = 0;
5723 bp->phy_flags = 0;
5724
5725 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5726 rc = pci_enable_device(pdev);
5727 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005728 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005729 goto err_out;
5730 }
5731
5732 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005733 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005734 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005735 rc = -ENODEV;
5736 goto err_out_disable;
5737 }
5738
5739 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5740 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005741 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005742 goto err_out_disable;
5743 }
5744
5745 pci_set_master(pdev);
5746
5747 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5748 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005749 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005750 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005751 rc = -EIO;
5752 goto err_out_release;
5753 }
5754
Michael Chanb6016b72005-05-26 13:03:09 -07005755 bp->dev = dev;
5756 bp->pdev = pdev;
5757
5758 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00005759 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005760
5761 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005762 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005763 dev->mem_end = dev->mem_start + mem_len;
5764 dev->irq = pdev->irq;
5765
5766 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5767
5768 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005769 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005770 rc = -ENOMEM;
5771 goto err_out_release;
5772 }
5773
5774 /* Configure byte swap and enable write to the reg_window registers.
5775 * Rely on CPU to do target byte swapping on big endian systems
5776 * The chip's target access swapping will not swap all accesses
5777 */
5778 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5779 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5780 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5781
Pavel Machek829ca9a2005-09-03 15:56:56 -07005782 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005783
5784 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5785
Michael Chan59b47d82006-11-19 14:10:45 -08005786 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5787 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5788 if (bp->pcix_cap == 0) {
5789 dev_err(&pdev->dev,
5790 "Cannot find PCIX capability, aborting.\n");
5791 rc = -EIO;
5792 goto err_out_unmap;
5793 }
5794 }
5795
Michael Chan40453c82007-05-03 13:19:18 -07005796 /* 5708 cannot support DMA addresses > 40-bit. */
5797 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5798 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5799 else
5800 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5801
5802 /* Configure DMA attributes. */
5803 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5804 dev->features |= NETIF_F_HIGHDMA;
5805 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5806 if (rc) {
5807 dev_err(&pdev->dev,
5808 "pci_set_consistent_dma_mask failed, aborting.\n");
5809 goto err_out_unmap;
5810 }
5811 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5812 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5813 goto err_out_unmap;
5814 }
5815
Michael Chanb6016b72005-05-26 13:03:09 -07005816 /* Get bus information. */
5817 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5818 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5819 u32 clkreg;
5820
5821 bp->flags |= PCIX_FLAG;
5822
5823 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005824
Michael Chanb6016b72005-05-26 13:03:09 -07005825 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5826 switch (clkreg) {
5827 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5828 bp->bus_speed_mhz = 133;
5829 break;
5830
5831 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5832 bp->bus_speed_mhz = 100;
5833 break;
5834
5835 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5836 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5837 bp->bus_speed_mhz = 66;
5838 break;
5839
5840 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5841 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5842 bp->bus_speed_mhz = 50;
5843 break;
5844
5845 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5846 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5847 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5848 bp->bus_speed_mhz = 33;
5849 break;
5850 }
5851 }
5852 else {
5853 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5854 bp->bus_speed_mhz = 66;
5855 else
5856 bp->bus_speed_mhz = 33;
5857 }
5858
5859 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5860 bp->flags |= PCI_32BIT_FLAG;
5861
5862 /* 5706A0 may falsely detect SERR and PERR. */
5863 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5864 reg = REG_RD(bp, PCI_COMMAND);
5865 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5866 REG_WR(bp, PCI_COMMAND, reg);
5867 }
5868 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5869 !(bp->flags & PCIX_FLAG)) {
5870
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005871 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005872 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005873 goto err_out_unmap;
5874 }
5875
5876 bnx2_init_nvram(bp);
5877
Michael Chane3648b32005-11-04 08:51:21 -08005878 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5879
5880 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08005881 BNX2_SHM_HDR_SIGNATURE_SIG) {
5882 u32 off = PCI_FUNC(pdev->devfn) << 2;
5883
5884 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5885 } else
Michael Chane3648b32005-11-04 08:51:21 -08005886 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5887
Michael Chanb6016b72005-05-26 13:03:09 -07005888 /* Get the permanent MAC address. First we need to make sure the
5889 * firmware is actually running.
5890 */
Michael Chane3648b32005-11-04 08:51:21 -08005891 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005892
5893 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5894 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005895 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005896 rc = -ENODEV;
5897 goto err_out_unmap;
5898 }
5899
Michael Chane3648b32005-11-04 08:51:21 -08005900 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005901
Michael Chane3648b32005-11-04 08:51:21 -08005902 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005903 bp->mac_addr[0] = (u8) (reg >> 8);
5904 bp->mac_addr[1] = (u8) reg;
5905
Michael Chane3648b32005-11-04 08:51:21 -08005906 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005907 bp->mac_addr[2] = (u8) (reg >> 24);
5908 bp->mac_addr[3] = (u8) (reg >> 16);
5909 bp->mac_addr[4] = (u8) (reg >> 8);
5910 bp->mac_addr[5] = (u8) reg;
5911
5912 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005913 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005914
5915 bp->rx_csum = 1;
5916
5917 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5918
5919 bp->tx_quick_cons_trip_int = 20;
5920 bp->tx_quick_cons_trip = 20;
5921 bp->tx_ticks_int = 80;
5922 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005923
Michael Chanb6016b72005-05-26 13:03:09 -07005924 bp->rx_quick_cons_trip_int = 6;
5925 bp->rx_quick_cons_trip = 6;
5926 bp->rx_ticks_int = 18;
5927 bp->rx_ticks = 18;
5928
5929 bp->stats_ticks = 1000000 & 0xffff00;
5930
5931 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005932 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005933
Michael Chan5b0c76a2005-11-04 08:45:49 -08005934 bp->phy_addr = 1;
5935
Michael Chanb6016b72005-05-26 13:03:09 -07005936 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08005937 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5938 bnx2_get_5709_media(bp);
5939 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07005940 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005941
5942 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07005943 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005944 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08005945 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005946 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005947 BNX2_SHARED_HW_CFG_CONFIG);
5948 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5949 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5950 }
Michael Chan261dd5c2007-01-08 19:55:46 -08005951 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5952 CHIP_NUM(bp) == CHIP_NUM_5708)
5953 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08005954 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5955 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005956
Michael Chan16088272006-06-12 22:16:43 -07005957 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5958 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5959 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005960 bp->flags |= NO_WOL_FLAG;
5961
Michael Chanb6016b72005-05-26 13:03:09 -07005962 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5963 bp->tx_quick_cons_trip_int =
5964 bp->tx_quick_cons_trip;
5965 bp->tx_ticks_int = bp->tx_ticks;
5966 bp->rx_quick_cons_trip_int =
5967 bp->rx_quick_cons_trip;
5968 bp->rx_ticks_int = bp->rx_ticks;
5969 bp->comp_prod_trip_int = bp->comp_prod_trip;
5970 bp->com_ticks_int = bp->com_ticks;
5971 bp->cmd_ticks_int = bp->cmd_ticks;
5972 }
5973
Michael Chanf9317a42006-09-29 17:06:23 -07005974 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5975 *
5976 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5977 * with byte enables disabled on the unused 32-bit word. This is legal
5978 * but causes problems on the AMD 8132 which will eventually stop
5979 * responding after a while.
5980 *
5981 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11005982 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07005983 */
5984 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5985 struct pci_dev *amd_8132 = NULL;
5986
5987 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5988 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5989 amd_8132))) {
5990 u8 rev;
5991
5992 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5993 if (rev >= 0x10 && rev <= 0x13) {
5994 disable_msi = 1;
5995 pci_dev_put(amd_8132);
5996 break;
5997 }
5998 }
5999 }
6000
Michael Chanb6016b72005-05-26 13:03:09 -07006001 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6002 bp->req_line_speed = 0;
6003 if (bp->phy_flags & PHY_SERDES_FLAG) {
6004 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006005
Michael Chane3648b32005-11-04 08:51:21 -08006006 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006007 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6008 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6009 bp->autoneg = 0;
6010 bp->req_line_speed = bp->line_speed = SPEED_1000;
6011 bp->req_duplex = DUPLEX_FULL;
6012 }
Michael Chanb6016b72005-05-26 13:03:09 -07006013 }
6014 else {
6015 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6016 }
6017
6018 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6019
Michael Chancd339a02005-08-25 15:35:24 -07006020 init_timer(&bp->timer);
6021 bp->timer.expires = RUN_AT(bp->timer_interval);
6022 bp->timer.data = (unsigned long) bp;
6023 bp->timer.function = bnx2_timer;
6024
Michael Chanb6016b72005-05-26 13:03:09 -07006025 return 0;
6026
6027err_out_unmap:
6028 if (bp->regview) {
6029 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006030 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006031 }
6032
6033err_out_release:
6034 pci_release_regions(pdev);
6035
6036err_out_disable:
6037 pci_disable_device(pdev);
6038 pci_set_drvdata(pdev, NULL);
6039
6040err_out:
6041 return rc;
6042}
6043
6044static int __devinit
6045bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6046{
6047 static int version_printed = 0;
6048 struct net_device *dev = NULL;
6049 struct bnx2 *bp;
6050 int rc, i;
6051
6052 if (version_printed++ == 0)
6053 printk(KERN_INFO "%s", version);
6054
6055 /* dev zeroed in init_etherdev */
6056 dev = alloc_etherdev(sizeof(*bp));
6057
6058 if (!dev)
6059 return -ENOMEM;
6060
6061 rc = bnx2_init_board(pdev, dev);
6062 if (rc < 0) {
6063 free_netdev(dev);
6064 return rc;
6065 }
6066
6067 dev->open = bnx2_open;
6068 dev->hard_start_xmit = bnx2_start_xmit;
6069 dev->stop = bnx2_close;
6070 dev->get_stats = bnx2_get_stats;
6071 dev->set_multicast_list = bnx2_set_rx_mode;
6072 dev->do_ioctl = bnx2_ioctl;
6073 dev->set_mac_address = bnx2_change_mac_addr;
6074 dev->change_mtu = bnx2_change_mtu;
6075 dev->tx_timeout = bnx2_tx_timeout;
6076 dev->watchdog_timeo = TX_TIMEOUT;
6077#ifdef BCM_VLAN
6078 dev->vlan_rx_register = bnx2_vlan_rx_register;
6079 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6080#endif
6081 dev->poll = bnx2_poll;
6082 dev->ethtool_ops = &bnx2_ethtool_ops;
6083 dev->weight = 64;
6084
Michael Chan972ec0d2006-01-23 16:12:43 -08006085 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006086
6087#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6088 dev->poll_controller = poll_bnx2;
6089#endif
6090
Michael Chan1b2f9222007-05-03 13:20:19 -07006091 pci_set_drvdata(pdev, dev);
6092
6093 memcpy(dev->dev_addr, bp->mac_addr, 6);
6094 memcpy(dev->perm_addr, bp->mac_addr, 6);
6095 bp->name = board_info[ent->driver_data].name;
6096
6097 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6098#ifdef BCM_VLAN
6099 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6100#endif
6101 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6102
Michael Chanb6016b72005-05-26 13:03:09 -07006103 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006104 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006105 if (bp->regview)
6106 iounmap(bp->regview);
6107 pci_release_regions(pdev);
6108 pci_disable_device(pdev);
6109 pci_set_drvdata(pdev, NULL);
6110 free_netdev(dev);
6111 return rc;
6112 }
6113
Michael Chanb6016b72005-05-26 13:03:09 -07006114 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6115 "IRQ %d, ",
6116 dev->name,
6117 bp->name,
6118 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6119 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6120 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6121 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6122 bp->bus_speed_mhz,
6123 dev->base_addr,
6124 bp->pdev->irq);
6125
6126 printk("node addr ");
6127 for (i = 0; i < 6; i++)
6128 printk("%2.2x", dev->dev_addr[i]);
6129 printk("\n");
6130
Michael Chanb6016b72005-05-26 13:03:09 -07006131 return 0;
6132}
6133
6134static void __devexit
6135bnx2_remove_one(struct pci_dev *pdev)
6136{
6137 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006138 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006139
Michael Chanafdc08b2005-08-25 15:34:29 -07006140 flush_scheduled_work();
6141
Michael Chanb6016b72005-05-26 13:03:09 -07006142 unregister_netdev(dev);
6143
6144 if (bp->regview)
6145 iounmap(bp->regview);
6146
6147 free_netdev(dev);
6148 pci_release_regions(pdev);
6149 pci_disable_device(pdev);
6150 pci_set_drvdata(pdev, NULL);
6151}
6152
6153static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006154bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006155{
6156 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006157 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006158 u32 reset_code;
6159
6160 if (!netif_running(dev))
6161 return 0;
6162
Michael Chan1d60290f2006-03-20 17:50:08 -08006163 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006164 bnx2_netif_stop(bp);
6165 netif_device_detach(dev);
6166 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006167 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006168 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006169 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006170 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6171 else
6172 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6173 bnx2_reset_chip(bp, reset_code);
6174 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006175 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006176 return 0;
6177}
6178
6179static int
6180bnx2_resume(struct pci_dev *pdev)
6181{
6182 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006183 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006184
6185 if (!netif_running(dev))
6186 return 0;
6187
Pavel Machek829ca9a2005-09-03 15:56:56 -07006188 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006189 netif_device_attach(dev);
6190 bnx2_init_nic(bp);
6191 bnx2_netif_start(bp);
6192 return 0;
6193}
6194
6195static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006196 .name = DRV_MODULE_NAME,
6197 .id_table = bnx2_pci_tbl,
6198 .probe = bnx2_init_one,
6199 .remove = __devexit_p(bnx2_remove_one),
6200 .suspend = bnx2_suspend,
6201 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006202};
6203
6204static int __init bnx2_init(void)
6205{
Jeff Garzik29917622006-08-19 17:48:59 -04006206 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006207}
6208
6209static void __exit bnx2_cleanup(void)
6210{
6211 pci_unregister_driver(&bnx2_pci_driver);
6212}
6213
6214module_init(bnx2_init);
6215module_exit(bnx2_cleanup);
6216
6217
6218