blob: a63431526ce4c050dffe1bffa247f23bd3e560a7 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan68c9f752007-04-24 15:35:53 -070057#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chanb6016b72005-05-26 13:03:09 -070087} board_t;
88
89/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050090static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070091 char *name;
92} board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080098 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chanb6016b72005-05-26 13:03:09 -0700120 { 0, }
121};
122
123static struct flash_spec flash_table[] =
124{
125 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700127 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700132 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800133 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700138 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700144 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0100"},
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800154 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167 "Non-buffered flash (64kB)"},
168 /* Fast EEPROM */
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 "EEPROM - fast"},
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 1001"},
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 "Entry 1010"},
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 "Entry 1100"},
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 "Entry 1101"},
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700208};
209
210MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
Michael Chane89bbf12005-08-25 15:36:58 -0700212static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213{
Michael Chan2f8af122006-08-15 01:39:10 -0700214 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700215
Michael Chan2f8af122006-08-15 01:39:10 -0700216 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800217
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
220 */
221 diff = bp->tx_prod - bp->tx_cons;
222 if (unlikely(diff >= TX_DESC_CNT)) {
223 diff &= 0xffff;
224 if (diff == TX_DESC_CNT)
225 diff = MAX_TX_DESC_CNT;
226 }
Michael Chane89bbf12005-08-25 15:36:58 -0700227 return (bp->tx_ring_size - diff);
228}
229
Michael Chanb6016b72005-05-26 13:03:09 -0700230static u32
231bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232{
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235}
236
237static void
238bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239{
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242}
243
244static void
245bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246{
247 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800248 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249 int i;
250
251 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254 for (i = 0; i < 5; i++) {
255 u32 val;
256 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258 break;
259 udelay(5);
260 }
261 } else {
262 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263 REG_WR(bp, BNX2_CTX_DATA, val);
264 }
Michael Chanb6016b72005-05-26 13:03:09 -0700265}
266
267static int
268bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269{
270 u32 val1;
271 int i, ret;
272
273 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280 udelay(40);
281 }
282
283 val1 = (bp->phy_addr << 21) | (reg << 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285 BNX2_EMAC_MDIO_COMM_START_BUSY;
286 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288 for (i = 0; i < 50; i++) {
289 udelay(10);
290
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293 udelay(5);
294
295 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298 break;
299 }
300 }
301
302 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303 *val = 0x0;
304 ret = -EBUSY;
305 }
306 else {
307 *val = val1;
308 ret = 0;
309 }
310
311 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318 udelay(40);
319 }
320
321 return ret;
322}
323
324static int
325bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326{
327 u32 val1;
328 int i, ret;
329
330 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337 udelay(40);
338 }
339
340 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400344
Michael Chanb6016b72005-05-26 13:03:09 -0700345 for (i = 0; i < 50; i++) {
346 udelay(10);
347
348 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350 udelay(5);
351 break;
352 }
353 }
354
355 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356 ret = -EBUSY;
357 else
358 ret = 0;
359
360 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367 udelay(40);
368 }
369
370 return ret;
371}
372
373static void
374bnx2_disable_int(struct bnx2 *bp)
375{
376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379}
380
381static void
382bnx2_enable_int(struct bnx2 *bp)
383{
Michael Chanb6016b72005-05-26 13:03:09 -0700384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
Michael Chanbf5295b2006-03-23 01:11:56 -0800391 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700392}
393
394static void
395bnx2_disable_int_sync(struct bnx2 *bp)
396{
397 atomic_inc(&bp->intr_sem);
398 bnx2_disable_int(bp);
399 synchronize_irq(bp->pdev->irq);
400}
401
402static void
403bnx2_netif_stop(struct bnx2 *bp)
404{
405 bnx2_disable_int_sync(bp);
406 if (netif_running(bp->dev)) {
407 netif_poll_disable(bp->dev);
408 netif_tx_disable(bp->dev);
409 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410 }
411}
412
413static void
414bnx2_netif_start(struct bnx2 *bp)
415{
416 if (atomic_dec_and_test(&bp->intr_sem)) {
417 if (netif_running(bp->dev)) {
418 netif_wake_queue(bp->dev);
419 netif_poll_enable(bp->dev);
420 bnx2_enable_int(bp);
421 }
422 }
423}
424
425static void
426bnx2_free_mem(struct bnx2 *bp)
427{
Michael Chan13daffa2006-03-20 17:49:20 -0800428 int i;
429
Michael Chan59b47d82006-11-19 14:10:45 -0800430 for (i = 0; i < bp->ctx_pages; i++) {
431 if (bp->ctx_blk[i]) {
432 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433 bp->ctx_blk[i],
434 bp->ctx_blk_mapping[i]);
435 bp->ctx_blk[i] = NULL;
436 }
437 }
Michael Chanb6016b72005-05-26 13:03:09 -0700438 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800439 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700440 bp->status_blk, bp->status_blk_mapping);
441 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800442 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700443 }
444 if (bp->tx_desc_ring) {
445 pci_free_consistent(bp->pdev,
446 sizeof(struct tx_bd) * TX_DESC_CNT,
447 bp->tx_desc_ring, bp->tx_desc_mapping);
448 bp->tx_desc_ring = NULL;
449 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400450 kfree(bp->tx_buf_ring);
451 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800452 for (i = 0; i < bp->rx_max_ring; i++) {
453 if (bp->rx_desc_ring[i])
454 pci_free_consistent(bp->pdev,
455 sizeof(struct rx_bd) * RX_DESC_CNT,
456 bp->rx_desc_ring[i],
457 bp->rx_desc_mapping[i]);
458 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700459 }
Michael Chan13daffa2006-03-20 17:49:20 -0800460 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400461 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700462}
463
464static int
465bnx2_alloc_mem(struct bnx2 *bp)
466{
Michael Chan0f31f992006-03-23 01:12:38 -0800467 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800468
Michael Chan0f31f992006-03-23 01:12:38 -0800469 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700471 if (bp->tx_buf_ring == NULL)
472 return -ENOMEM;
473
Michael Chanb6016b72005-05-26 13:03:09 -0700474 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475 sizeof(struct tx_bd) *
476 TX_DESC_CNT,
477 &bp->tx_desc_mapping);
478 if (bp->tx_desc_ring == NULL)
479 goto alloc_mem_err;
480
Michael Chan13daffa2006-03-20 17:49:20 -0800481 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700483 if (bp->rx_buf_ring == NULL)
484 goto alloc_mem_err;
485
Michael Chan13daffa2006-03-20 17:49:20 -0800486 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487 bp->rx_max_ring);
488
489 for (i = 0; i < bp->rx_max_ring; i++) {
490 bp->rx_desc_ring[i] =
491 pci_alloc_consistent(bp->pdev,
492 sizeof(struct rx_bd) * RX_DESC_CNT,
493 &bp->rx_desc_mapping[i]);
494 if (bp->rx_desc_ring[i] == NULL)
495 goto alloc_mem_err;
496
497 }
Michael Chanb6016b72005-05-26 13:03:09 -0700498
Michael Chan0f31f992006-03-23 01:12:38 -0800499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501 bp->status_stats_size = status_blk_size +
502 sizeof(struct statistics_block);
503
504 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700505 &bp->status_blk_mapping);
506 if (bp->status_blk == NULL)
507 goto alloc_mem_err;
508
Michael Chan0f31f992006-03-23 01:12:38 -0800509 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700510
Michael Chan0f31f992006-03-23 01:12:38 -0800511 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700513
Michael Chan0f31f992006-03-23 01:12:38 -0800514 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700515
Michael Chan59b47d82006-11-19 14:10:45 -0800516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518 if (bp->ctx_pages == 0)
519 bp->ctx_pages = 1;
520 for (i = 0; i < bp->ctx_pages; i++) {
521 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522 BCM_PAGE_SIZE,
523 &bp->ctx_blk_mapping[i]);
524 if (bp->ctx_blk[i] == NULL)
525 goto alloc_mem_err;
526 }
527 }
Michael Chanb6016b72005-05-26 13:03:09 -0700528 return 0;
529
530alloc_mem_err:
531 bnx2_free_mem(bp);
532 return -ENOMEM;
533}
534
535static void
Michael Chane3648b32005-11-04 08:51:21 -0800536bnx2_report_fw_link(struct bnx2 *bp)
537{
538 u32 fw_link_status = 0;
539
540 if (bp->link_up) {
541 u32 bmsr;
542
543 switch (bp->line_speed) {
544 case SPEED_10:
545 if (bp->duplex == DUPLEX_HALF)
546 fw_link_status = BNX2_LINK_STATUS_10HALF;
547 else
548 fw_link_status = BNX2_LINK_STATUS_10FULL;
549 break;
550 case SPEED_100:
551 if (bp->duplex == DUPLEX_HALF)
552 fw_link_status = BNX2_LINK_STATUS_100HALF;
553 else
554 fw_link_status = BNX2_LINK_STATUS_100FULL;
555 break;
556 case SPEED_1000:
557 if (bp->duplex == DUPLEX_HALF)
558 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559 else
560 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561 break;
562 case SPEED_2500:
563 if (bp->duplex == DUPLEX_HALF)
564 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565 else
566 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567 break;
568 }
569
570 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572 if (bp->autoneg) {
573 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
Michael Chanca58c3a2007-05-03 13:22:52 -0700575 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
576 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800577
578 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581 else
582 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583 }
584 }
585 else
586 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589}
590
591static void
Michael Chanb6016b72005-05-26 13:03:09 -0700592bnx2_report_link(struct bnx2 *bp)
593{
594 if (bp->link_up) {
595 netif_carrier_on(bp->dev);
596 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598 printk("%d Mbps ", bp->line_speed);
599
600 if (bp->duplex == DUPLEX_FULL)
601 printk("full duplex");
602 else
603 printk("half duplex");
604
605 if (bp->flow_ctrl) {
606 if (bp->flow_ctrl & FLOW_CTRL_RX) {
607 printk(", receive ");
608 if (bp->flow_ctrl & FLOW_CTRL_TX)
609 printk("& transmit ");
610 }
611 else {
612 printk(", transmit ");
613 }
614 printk("flow control ON");
615 }
616 printk("\n");
617 }
618 else {
619 netif_carrier_off(bp->dev);
620 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621 }
Michael Chane3648b32005-11-04 08:51:21 -0800622
623 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700624}
625
626static void
627bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628{
629 u32 local_adv, remote_adv;
630
631 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400632 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700633 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635 if (bp->duplex == DUPLEX_FULL) {
636 bp->flow_ctrl = bp->req_flow_ctrl;
637 }
638 return;
639 }
640
641 if (bp->duplex != DUPLEX_FULL) {
642 return;
643 }
644
Michael Chan5b0c76a2005-11-04 08:45:49 -0800645 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647 u32 val;
648
649 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651 bp->flow_ctrl |= FLOW_CTRL_TX;
652 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653 bp->flow_ctrl |= FLOW_CTRL_RX;
654 return;
655 }
656
Michael Chanca58c3a2007-05-03 13:22:52 -0700657 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
658 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700659
660 if (bp->phy_flags & PHY_SERDES_FLAG) {
661 u32 new_local_adv = 0;
662 u32 new_remote_adv = 0;
663
664 if (local_adv & ADVERTISE_1000XPAUSE)
665 new_local_adv |= ADVERTISE_PAUSE_CAP;
666 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667 new_local_adv |= ADVERTISE_PAUSE_ASYM;
668 if (remote_adv & ADVERTISE_1000XPAUSE)
669 new_remote_adv |= ADVERTISE_PAUSE_CAP;
670 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673 local_adv = new_local_adv;
674 remote_adv = new_remote_adv;
675 }
676
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv & ADVERTISE_PAUSE_CAP) {
679 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682 }
683 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684 bp->flow_ctrl = FLOW_CTRL_RX;
685 }
686 }
687 else {
688 if (remote_adv & ADVERTISE_PAUSE_CAP) {
689 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690 }
691 }
692 }
693 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697 bp->flow_ctrl = FLOW_CTRL_TX;
698 }
699 }
700}
701
702static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800703bnx2_5708s_linkup(struct bnx2 *bp)
704{
705 u32 val;
706
707 bp->link_up = 1;
708 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710 case BCM5708S_1000X_STAT1_SPEED_10:
711 bp->line_speed = SPEED_10;
712 break;
713 case BCM5708S_1000X_STAT1_SPEED_100:
714 bp->line_speed = SPEED_100;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_1G:
717 bp->line_speed = SPEED_1000;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_2G5:
720 bp->line_speed = SPEED_2500;
721 break;
722 }
723 if (val & BCM5708S_1000X_STAT1_FD)
724 bp->duplex = DUPLEX_FULL;
725 else
726 bp->duplex = DUPLEX_HALF;
727
728 return 0;
729}
730
731static int
732bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700733{
734 u32 bmcr, local_adv, remote_adv, common;
735
736 bp->link_up = 1;
737 bp->line_speed = SPEED_1000;
738
Michael Chanca58c3a2007-05-03 13:22:52 -0700739 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700740 if (bmcr & BMCR_FULLDPLX) {
741 bp->duplex = DUPLEX_FULL;
742 }
743 else {
744 bp->duplex = DUPLEX_HALF;
745 }
746
747 if (!(bmcr & BMCR_ANENABLE)) {
748 return 0;
749 }
750
Michael Chanca58c3a2007-05-03 13:22:52 -0700751 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
752 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700753
754 common = local_adv & remote_adv;
755 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757 if (common & ADVERTISE_1000XFULL) {
758 bp->duplex = DUPLEX_FULL;
759 }
760 else {
761 bp->duplex = DUPLEX_HALF;
762 }
763 }
764
765 return 0;
766}
767
768static int
769bnx2_copper_linkup(struct bnx2 *bp)
770{
771 u32 bmcr;
772
Michael Chanca58c3a2007-05-03 13:22:52 -0700773 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700774 if (bmcr & BMCR_ANENABLE) {
775 u32 local_adv, remote_adv, common;
776
777 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780 common = local_adv & (remote_adv >> 2);
781 if (common & ADVERTISE_1000FULL) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_FULL;
784 }
785 else if (common & ADVERTISE_1000HALF) {
786 bp->line_speed = SPEED_1000;
787 bp->duplex = DUPLEX_HALF;
788 }
789 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700790 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
791 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700792
793 common = local_adv & remote_adv;
794 if (common & ADVERTISE_100FULL) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_FULL;
797 }
798 else if (common & ADVERTISE_100HALF) {
799 bp->line_speed = SPEED_100;
800 bp->duplex = DUPLEX_HALF;
801 }
802 else if (common & ADVERTISE_10FULL) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_FULL;
805 }
806 else if (common & ADVERTISE_10HALF) {
807 bp->line_speed = SPEED_10;
808 bp->duplex = DUPLEX_HALF;
809 }
810 else {
811 bp->line_speed = 0;
812 bp->link_up = 0;
813 }
814 }
815 }
816 else {
817 if (bmcr & BMCR_SPEED100) {
818 bp->line_speed = SPEED_100;
819 }
820 else {
821 bp->line_speed = SPEED_10;
822 }
823 if (bmcr & BMCR_FULLDPLX) {
824 bp->duplex = DUPLEX_FULL;
825 }
826 else {
827 bp->duplex = DUPLEX_HALF;
828 }
829 }
830
831 return 0;
832}
833
834static int
835bnx2_set_mac_link(struct bnx2 *bp)
836{
837 u32 val;
838
839 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841 (bp->duplex == DUPLEX_HALF)) {
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843 }
844
845 /* Configure the EMAC mode register. */
846 val = REG_RD(bp, BNX2_EMAC_MODE);
847
848 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800849 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800850 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700851
852 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800853 switch (bp->line_speed) {
854 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800855 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800857 break;
858 }
859 /* fall through */
860 case SPEED_100:
861 val |= BNX2_EMAC_MODE_PORT_MII;
862 break;
863 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800864 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800865 /* fall through */
866 case SPEED_1000:
867 val |= BNX2_EMAC_MODE_PORT_GMII;
868 break;
869 }
Michael Chanb6016b72005-05-26 13:03:09 -0700870 }
871 else {
872 val |= BNX2_EMAC_MODE_PORT_GMII;
873 }
874
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp->duplex == DUPLEX_HALF)
877 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878 REG_WR(bp, BNX2_EMAC_MODE, val);
879
880 /* Enable/disable rx PAUSE. */
881 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883 if (bp->flow_ctrl & FLOW_CTRL_RX)
884 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887 /* Enable/disable tx PAUSE. */
888 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891 if (bp->flow_ctrl & FLOW_CTRL_TX)
892 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895 /* Acknowledge the interrupt. */
896 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898 return 0;
899}
900
901static int
902bnx2_set_link(struct bnx2 *bp)
903{
904 u32 bmsr;
905 u8 link_up;
906
Michael Chan80be4432006-11-19 14:07:28 -0800907 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700908 bp->link_up = 1;
909 return 0;
910 }
911
912 link_up = bp->link_up;
913
Michael Chanca58c3a2007-05-03 13:22:52 -0700914 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
915 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -0700916
917 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919 u32 val;
920
921 val = REG_RD(bp, BNX2_EMAC_STATUS);
922 if (val & BNX2_EMAC_STATUS_LINK)
923 bmsr |= BMSR_LSTATUS;
924 else
925 bmsr &= ~BMSR_LSTATUS;
926 }
927
928 if (bmsr & BMSR_LSTATUS) {
929 bp->link_up = 1;
930
931 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800932 if (CHIP_NUM(bp) == CHIP_NUM_5706)
933 bnx2_5706s_linkup(bp);
934 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700936 }
937 else {
938 bnx2_copper_linkup(bp);
939 }
940 bnx2_resolve_flow_ctrl(bp);
941 }
942 else {
943 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944 (bp->autoneg & AUTONEG_SPEED)) {
945
946 u32 bmcr;
947
948 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800949 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700950 if (!(bmcr & BMCR_ANENABLE)) {
951 bnx2_write_phy(bp, MII_BMCR, bmcr |
952 BMCR_ANENABLE);
953 }
954 }
955 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956 bp->link_up = 0;
957 }
958
959 if (bp->link_up != link_up) {
960 bnx2_report_link(bp);
961 }
962
963 bnx2_set_mac_link(bp);
964
965 return 0;
966}
967
968static int
969bnx2_reset_phy(struct bnx2 *bp)
970{
971 int i;
972 u32 reg;
973
Michael Chanca58c3a2007-05-03 13:22:52 -0700974 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -0700975
976#define PHY_RESET_MAX_WAIT 100
977 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978 udelay(10);
979
Michael Chanca58c3a2007-05-03 13:22:52 -0700980 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -0700981 if (!(reg & BMCR_RESET)) {
982 udelay(20);
983 break;
984 }
985 }
986 if (i == PHY_RESET_MAX_WAIT) {
987 return -EBUSY;
988 }
989 return 0;
990}
991
992static u32
993bnx2_phy_get_pause_adv(struct bnx2 *bp)
994{
995 u32 adv = 0;
996
997 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
999
1000 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001 adv = ADVERTISE_1000XPAUSE;
1002 }
1003 else {
1004 adv = ADVERTISE_PAUSE_CAP;
1005 }
1006 }
1007 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009 adv = ADVERTISE_1000XPSE_ASYM;
1010 }
1011 else {
1012 adv = ADVERTISE_PAUSE_ASYM;
1013 }
1014 }
1015 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1018 }
1019 else {
1020 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1021 }
1022 }
1023 return adv;
1024}
1025
1026static int
1027bnx2_setup_serdes_phy(struct bnx2 *bp)
1028{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001029 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001030 u32 new_adv = 0;
1031
1032 if (!(bp->autoneg & AUTONEG_SPEED)) {
1033 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001034 int force_link_down = 0;
1035
Michael Chanca58c3a2007-05-03 13:22:52 -07001036 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001037 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1038
Michael Chanca58c3a2007-05-03 13:22:52 -07001039 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001040 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041 new_bmcr |= BMCR_SPEED1000;
1042 if (bp->req_line_speed == SPEED_2500) {
1043 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 if (!(up1 & BCM5708S_UP1_2G5)) {
1046 up1 |= BCM5708S_UP1_2G5;
1047 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048 force_link_down = 1;
1049 }
1050 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001051 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052 if (up1 & BCM5708S_UP1_2G5) {
1053 up1 &= ~BCM5708S_UP1_2G5;
1054 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055 force_link_down = 1;
1056 }
1057 }
1058
Michael Chanb6016b72005-05-26 13:03:09 -07001059 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001060 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001061 new_bmcr |= BMCR_FULLDPLX;
1062 }
1063 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001064 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001065 new_bmcr &= ~BMCR_FULLDPLX;
1066 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001067 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001068 /* Force a link down visible on the other side */
1069 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001070 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001071 ~(ADVERTISE_1000XFULL |
1072 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001073 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001074 BMCR_ANRESTART | BMCR_ANENABLE);
1075
1076 bp->link_up = 0;
1077 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001078 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001079 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001080 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001081 bnx2_write_phy(bp, bp->mii_adv, adv);
1082 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001083 }
1084 return 0;
1085 }
1086
Michael Chan5b0c76a2005-11-04 08:45:49 -08001087 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089 up1 |= BCM5708S_UP1_2G5;
1090 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1091 }
1092
Michael Chanb6016b72005-05-26 13:03:09 -07001093 if (bp->advertising & ADVERTISED_1000baseT_Full)
1094 new_adv |= ADVERTISE_1000XFULL;
1095
1096 new_adv |= bnx2_phy_get_pause_adv(bp);
1097
Michael Chanca58c3a2007-05-03 13:22:52 -07001098 bnx2_read_phy(bp, bp->mii_adv, &adv);
1099 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001100
1101 bp->serdes_an_pending = 0;
1102 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103 /* Force a link down visible on the other side */
1104 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001105 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001106 spin_unlock_bh(&bp->phy_lock);
1107 msleep(20);
1108 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001109 }
1110
Michael Chanca58c3a2007-05-03 13:22:52 -07001111 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1112 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001113 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001114 /* Speed up link-up time when the link partner
1115 * does not autonegotiate which is very common
1116 * in blade servers. Some blade servers use
1117 * IPMI for kerboard input and it's important
1118 * to minimize link disruptions. Autoneg. involves
1119 * exchanging base pages plus 3 next pages and
1120 * normally completes in about 120 msec.
1121 */
1122 bp->current_interval = SERDES_AN_TIMEOUT;
1123 bp->serdes_an_pending = 1;
1124 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001125 }
1126
1127 return 0;
1128}
1129
1130#define ETHTOOL_ALL_FIBRE_SPEED \
1131 (ADVERTISED_1000baseT_Full)
1132
1133#define ETHTOOL_ALL_COPPER_SPEED \
1134 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1135 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1136 ADVERTISED_1000baseT_Full)
1137
1138#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001140
Michael Chanb6016b72005-05-26 13:03:09 -07001141#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1142
1143static int
1144bnx2_setup_copper_phy(struct bnx2 *bp)
1145{
1146 u32 bmcr;
1147 u32 new_bmcr;
1148
Michael Chanca58c3a2007-05-03 13:22:52 -07001149 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001150
1151 if (bp->autoneg & AUTONEG_SPEED) {
1152 u32 adv_reg, adv1000_reg;
1153 u32 new_adv_reg = 0;
1154 u32 new_adv1000_reg = 0;
1155
Michael Chanca58c3a2007-05-03 13:22:52 -07001156 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001157 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158 ADVERTISE_PAUSE_ASYM);
1159
1160 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161 adv1000_reg &= PHY_ALL_1000_SPEED;
1162
1163 if (bp->advertising & ADVERTISED_10baseT_Half)
1164 new_adv_reg |= ADVERTISE_10HALF;
1165 if (bp->advertising & ADVERTISED_10baseT_Full)
1166 new_adv_reg |= ADVERTISE_10FULL;
1167 if (bp->advertising & ADVERTISED_100baseT_Half)
1168 new_adv_reg |= ADVERTISE_100HALF;
1169 if (bp->advertising & ADVERTISED_100baseT_Full)
1170 new_adv_reg |= ADVERTISE_100FULL;
1171 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001173
Michael Chanb6016b72005-05-26 13:03:09 -07001174 new_adv_reg |= ADVERTISE_CSMA;
1175
1176 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1177
1178 if ((adv1000_reg != new_adv1000_reg) ||
1179 (adv_reg != new_adv_reg) ||
1180 ((bmcr & BMCR_ANENABLE) == 0)) {
1181
Michael Chanca58c3a2007-05-03 13:22:52 -07001182 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001183 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001184 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001185 BMCR_ANENABLE);
1186 }
1187 else if (bp->link_up) {
1188 /* Flow ctrl may have changed from auto to forced */
1189 /* or vice-versa. */
1190
1191 bnx2_resolve_flow_ctrl(bp);
1192 bnx2_set_mac_link(bp);
1193 }
1194 return 0;
1195 }
1196
1197 new_bmcr = 0;
1198 if (bp->req_line_speed == SPEED_100) {
1199 new_bmcr |= BMCR_SPEED100;
1200 }
1201 if (bp->req_duplex == DUPLEX_FULL) {
1202 new_bmcr |= BMCR_FULLDPLX;
1203 }
1204 if (new_bmcr != bmcr) {
1205 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001206
Michael Chanca58c3a2007-05-03 13:22:52 -07001207 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1208 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001209
Michael Chanb6016b72005-05-26 13:03:09 -07001210 if (bmsr & BMSR_LSTATUS) {
1211 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001212 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001213 spin_unlock_bh(&bp->phy_lock);
1214 msleep(50);
1215 spin_lock_bh(&bp->phy_lock);
1216
Michael Chanca58c3a2007-05-03 13:22:52 -07001217 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1218 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001219 }
1220
Michael Chanca58c3a2007-05-03 13:22:52 -07001221 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001222
1223 /* Normally, the new speed is setup after the link has
1224 * gone down and up again. In some cases, link will not go
1225 * down so we need to set up the new speed here.
1226 */
1227 if (bmsr & BMSR_LSTATUS) {
1228 bp->line_speed = bp->req_line_speed;
1229 bp->duplex = bp->req_duplex;
1230 bnx2_resolve_flow_ctrl(bp);
1231 bnx2_set_mac_link(bp);
1232 }
1233 }
1234 return 0;
1235}
1236
1237static int
1238bnx2_setup_phy(struct bnx2 *bp)
1239{
1240 if (bp->loopback == MAC_LOOPBACK)
1241 return 0;
1242
1243 if (bp->phy_flags & PHY_SERDES_FLAG) {
1244 return (bnx2_setup_serdes_phy(bp));
1245 }
1246 else {
1247 return (bnx2_setup_copper_phy(bp));
1248 }
1249}
1250
1251static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001252bnx2_init_5708s_phy(struct bnx2 *bp)
1253{
1254 u32 val;
1255
1256 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1259
1260 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1263
1264 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1267
1268 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270 val |= BCM5708S_UP1_2G5;
1271 bnx2_write_phy(bp, BCM5708S_UP1, val);
1272 }
1273
1274 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001275 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001277 /* increase tx signal amplitude */
1278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279 BCM5708S_BLK_ADDR_TX_MISC);
1280 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1284 }
1285
Michael Chane3648b32005-11-04 08:51:21 -08001286 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001287 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288
1289 if (val) {
1290 u32 is_backplane;
1291
Michael Chane3648b32005-11-04 08:51:21 -08001292 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001293 BNX2_SHARED_HW_CFG_CONFIG);
1294 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296 BCM5708S_BLK_ADDR_TX_MISC);
1297 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_DIG);
1300 }
1301 }
1302 return 0;
1303}
1304
1305static int
1306bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001307{
1308 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1309
Michael Chan59b47d82006-11-19 14:10:45 -08001310 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001312
1313 if (bp->dev->mtu > 1500) {
1314 u32 val;
1315
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1320
1321 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322 bnx2_read_phy(bp, 0x1c, &val);
1323 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324 }
1325 else {
1326 u32 val;
1327
1328 bnx2_write_phy(bp, 0x18, 0x7);
1329 bnx2_read_phy(bp, 0x18, &val);
1330 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333 bnx2_read_phy(bp, 0x1c, &val);
1334 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1335 }
1336
1337 return 0;
1338}
1339
1340static int
1341bnx2_init_copper_phy(struct bnx2 *bp)
1342{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001343 u32 val;
1344
Michael Chanb6016b72005-05-26 13:03:09 -07001345 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346 bnx2_write_phy(bp, 0x18, 0x0c00);
1347 bnx2_write_phy(bp, 0x17, 0x000a);
1348 bnx2_write_phy(bp, 0x15, 0x310b);
1349 bnx2_write_phy(bp, 0x17, 0x201f);
1350 bnx2_write_phy(bp, 0x15, 0x9506);
1351 bnx2_write_phy(bp, 0x17, 0x401f);
1352 bnx2_write_phy(bp, 0x15, 0x14e2);
1353 bnx2_write_phy(bp, 0x18, 0x0400);
1354 }
1355
Michael Chanb659f442007-02-02 00:46:35 -08001356 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358 MII_BNX2_DSP_EXPAND_REG | 0x8);
1359 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360 val &= ~(1 << 8);
1361 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362 }
1363
Michael Chanb6016b72005-05-26 13:03:09 -07001364 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001365 /* Set extended packet length bit */
1366 bnx2_write_phy(bp, 0x18, 0x7);
1367 bnx2_read_phy(bp, 0x18, &val);
1368 bnx2_write_phy(bp, 0x18, val | 0x4000);
1369
1370 bnx2_read_phy(bp, 0x10, &val);
1371 bnx2_write_phy(bp, 0x10, val | 0x1);
1372 }
1373 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001374 bnx2_write_phy(bp, 0x18, 0x7);
1375 bnx2_read_phy(bp, 0x18, &val);
1376 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1377
1378 bnx2_read_phy(bp, 0x10, &val);
1379 bnx2_write_phy(bp, 0x10, val & ~0x1);
1380 }
1381
Michael Chan5b0c76a2005-11-04 08:45:49 -08001382 /* ethernet@wirespeed */
1383 bnx2_write_phy(bp, 0x18, 0x7007);
1384 bnx2_read_phy(bp, 0x18, &val);
1385 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001386 return 0;
1387}
1388
1389
1390static int
1391bnx2_init_phy(struct bnx2 *bp)
1392{
1393 u32 val;
1394 int rc = 0;
1395
1396 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1398
Michael Chanca58c3a2007-05-03 13:22:52 -07001399 bp->mii_bmcr = MII_BMCR;
1400 bp->mii_bmsr = MII_BMSR;
1401 bp->mii_adv = MII_ADVERTISE;
1402 bp->mii_lpa = MII_LPA;
1403
Michael Chanb6016b72005-05-26 13:03:09 -07001404 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1405
1406 bnx2_reset_phy(bp);
1407
1408 bnx2_read_phy(bp, MII_PHYSID1, &val);
1409 bp->phy_id = val << 16;
1410 bnx2_read_phy(bp, MII_PHYSID2, &val);
1411 bp->phy_id |= val & 0xffff;
1412
1413 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001414 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1415 rc = bnx2_init_5706s_phy(bp);
1416 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1417 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001418 }
1419 else {
1420 rc = bnx2_init_copper_phy(bp);
1421 }
1422
1423 bnx2_setup_phy(bp);
1424
1425 return rc;
1426}
1427
1428static int
1429bnx2_set_mac_loopback(struct bnx2 *bp)
1430{
1431 u32 mac_mode;
1432
1433 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1434 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1435 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1436 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1437 bp->link_up = 1;
1438 return 0;
1439}
1440
Michael Chanbc5a0692006-01-23 16:13:22 -08001441static int bnx2_test_link(struct bnx2 *);
1442
1443static int
1444bnx2_set_phy_loopback(struct bnx2 *bp)
1445{
1446 u32 mac_mode;
1447 int rc, i;
1448
1449 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001450 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001451 BMCR_SPEED1000);
1452 spin_unlock_bh(&bp->phy_lock);
1453 if (rc)
1454 return rc;
1455
1456 for (i = 0; i < 10; i++) {
1457 if (bnx2_test_link(bp) == 0)
1458 break;
Michael Chan80be4432006-11-19 14:07:28 -08001459 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001460 }
1461
1462 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1463 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1464 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001465 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001466
1467 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1468 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1469 bp->link_up = 1;
1470 return 0;
1471}
1472
Michael Chanb6016b72005-05-26 13:03:09 -07001473static int
Michael Chanb090ae22006-01-23 16:07:10 -08001474bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001475{
1476 int i;
1477 u32 val;
1478
Michael Chanb6016b72005-05-26 13:03:09 -07001479 bp->fw_wr_seq++;
1480 msg_data |= bp->fw_wr_seq;
1481
Michael Chane3648b32005-11-04 08:51:21 -08001482 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001483
1484 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001485 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1486 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001487
Michael Chane3648b32005-11-04 08:51:21 -08001488 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001489
1490 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1491 break;
1492 }
Michael Chanb090ae22006-01-23 16:07:10 -08001493 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1494 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001495
1496 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001497 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1498 if (!silent)
1499 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1500 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001501
1502 msg_data &= ~BNX2_DRV_MSG_CODE;
1503 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1504
Michael Chane3648b32005-11-04 08:51:21 -08001505 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001506
Michael Chanb6016b72005-05-26 13:03:09 -07001507 return -EBUSY;
1508 }
1509
Michael Chanb090ae22006-01-23 16:07:10 -08001510 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1511 return -EIO;
1512
Michael Chanb6016b72005-05-26 13:03:09 -07001513 return 0;
1514}
1515
Michael Chan59b47d82006-11-19 14:10:45 -08001516static int
1517bnx2_init_5709_context(struct bnx2 *bp)
1518{
1519 int i, ret = 0;
1520 u32 val;
1521
1522 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1523 val |= (BCM_PAGE_BITS - 8) << 16;
1524 REG_WR(bp, BNX2_CTX_COMMAND, val);
1525 for (i = 0; i < bp->ctx_pages; i++) {
1526 int j;
1527
1528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1529 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1530 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1531 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1532 (u64) bp->ctx_blk_mapping[i] >> 32);
1533 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1534 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1535 for (j = 0; j < 10; j++) {
1536
1537 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1538 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1539 break;
1540 udelay(5);
1541 }
1542 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1543 ret = -EBUSY;
1544 break;
1545 }
1546 }
1547 return ret;
1548}
1549
Michael Chanb6016b72005-05-26 13:03:09 -07001550static void
1551bnx2_init_context(struct bnx2 *bp)
1552{
1553 u32 vcid;
1554
1555 vcid = 96;
1556 while (vcid) {
1557 u32 vcid_addr, pcid_addr, offset;
1558
1559 vcid--;
1560
1561 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1562 u32 new_vcid;
1563
1564 vcid_addr = GET_PCID_ADDR(vcid);
1565 if (vcid & 0x8) {
1566 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1567 }
1568 else {
1569 new_vcid = vcid;
1570 }
1571 pcid_addr = GET_PCID_ADDR(new_vcid);
1572 }
1573 else {
1574 vcid_addr = GET_CID_ADDR(vcid);
1575 pcid_addr = vcid_addr;
1576 }
1577
1578 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1579 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1580
1581 /* Zero out the context. */
1582 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1583 CTX_WR(bp, 0x00, offset, 0);
1584 }
1585
1586 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1587 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1588 }
1589}
1590
1591static int
1592bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1593{
1594 u16 *good_mbuf;
1595 u32 good_mbuf_cnt;
1596 u32 val;
1597
1598 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1599 if (good_mbuf == NULL) {
1600 printk(KERN_ERR PFX "Failed to allocate memory in "
1601 "bnx2_alloc_bad_rbuf\n");
1602 return -ENOMEM;
1603 }
1604
1605 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1606 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1607
1608 good_mbuf_cnt = 0;
1609
1610 /* Allocate a bunch of mbufs and save the good ones in an array. */
1611 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1612 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1613 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1614
1615 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1616
1617 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1618
1619 /* The addresses with Bit 9 set are bad memory blocks. */
1620 if (!(val & (1 << 9))) {
1621 good_mbuf[good_mbuf_cnt] = (u16) val;
1622 good_mbuf_cnt++;
1623 }
1624
1625 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1626 }
1627
1628 /* Free the good ones back to the mbuf pool thus discarding
1629 * all the bad ones. */
1630 while (good_mbuf_cnt) {
1631 good_mbuf_cnt--;
1632
1633 val = good_mbuf[good_mbuf_cnt];
1634 val = (val << 9) | val | 1;
1635
1636 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1637 }
1638 kfree(good_mbuf);
1639 return 0;
1640}
1641
1642static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001643bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001644{
1645 u32 val;
1646 u8 *mac_addr = bp->dev->dev_addr;
1647
1648 val = (mac_addr[0] << 8) | mac_addr[1];
1649
1650 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1651
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001652 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001653 (mac_addr[4] << 8) | mac_addr[5];
1654
1655 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1656}
1657
1658static inline int
1659bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1660{
1661 struct sk_buff *skb;
1662 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1663 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001664 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001665 unsigned long align;
1666
Michael Chan932f3772006-08-15 01:39:36 -07001667 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001668 if (skb == NULL) {
1669 return -ENOMEM;
1670 }
1671
Michael Chan59b47d82006-11-19 14:10:45 -08001672 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1673 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001674
Michael Chanb6016b72005-05-26 13:03:09 -07001675 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1676 PCI_DMA_FROMDEVICE);
1677
1678 rx_buf->skb = skb;
1679 pci_unmap_addr_set(rx_buf, mapping, mapping);
1680
1681 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1682 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1683
1684 bp->rx_prod_bseq += bp->rx_buf_use_size;
1685
1686 return 0;
1687}
1688
1689static void
1690bnx2_phy_int(struct bnx2 *bp)
1691{
1692 u32 new_link_state, old_link_state;
1693
1694 new_link_state = bp->status_blk->status_attn_bits &
1695 STATUS_ATTN_BITS_LINK_STATE;
1696 old_link_state = bp->status_blk->status_attn_bits_ack &
1697 STATUS_ATTN_BITS_LINK_STATE;
1698 if (new_link_state != old_link_state) {
1699 if (new_link_state) {
1700 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1701 STATUS_ATTN_BITS_LINK_STATE);
1702 }
1703 else {
1704 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1705 STATUS_ATTN_BITS_LINK_STATE);
1706 }
1707 bnx2_set_link(bp);
1708 }
1709}
1710
1711static void
1712bnx2_tx_int(struct bnx2 *bp)
1713{
Michael Chanf4e418f2005-11-04 08:53:48 -08001714 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001715 u16 hw_cons, sw_cons, sw_ring_cons;
1716 int tx_free_bd = 0;
1717
Michael Chanf4e418f2005-11-04 08:53:48 -08001718 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001719 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1720 hw_cons++;
1721 }
1722 sw_cons = bp->tx_cons;
1723
1724 while (sw_cons != hw_cons) {
1725 struct sw_bd *tx_buf;
1726 struct sk_buff *skb;
1727 int i, last;
1728
1729 sw_ring_cons = TX_RING_IDX(sw_cons);
1730
1731 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1732 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001733
Michael Chanb6016b72005-05-26 13:03:09 -07001734 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001735 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001736 u16 last_idx, last_ring_idx;
1737
1738 last_idx = sw_cons +
1739 skb_shinfo(skb)->nr_frags + 1;
1740 last_ring_idx = sw_ring_cons +
1741 skb_shinfo(skb)->nr_frags + 1;
1742 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1743 last_idx++;
1744 }
1745 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1746 break;
1747 }
1748 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001749
Michael Chanb6016b72005-05-26 13:03:09 -07001750 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1751 skb_headlen(skb), PCI_DMA_TODEVICE);
1752
1753 tx_buf->skb = NULL;
1754 last = skb_shinfo(skb)->nr_frags;
1755
1756 for (i = 0; i < last; i++) {
1757 sw_cons = NEXT_TX_BD(sw_cons);
1758
1759 pci_unmap_page(bp->pdev,
1760 pci_unmap_addr(
1761 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1762 mapping),
1763 skb_shinfo(skb)->frags[i].size,
1764 PCI_DMA_TODEVICE);
1765 }
1766
1767 sw_cons = NEXT_TX_BD(sw_cons);
1768
1769 tx_free_bd += last + 1;
1770
Michael Chan745720e2006-06-29 12:37:41 -07001771 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001772
Michael Chanf4e418f2005-11-04 08:53:48 -08001773 hw_cons = bp->hw_tx_cons =
1774 sblk->status_tx_quick_consumer_index0;
1775
Michael Chanb6016b72005-05-26 13:03:09 -07001776 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1777 hw_cons++;
1778 }
1779 }
1780
Michael Chane89bbf12005-08-25 15:36:58 -07001781 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001782 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1783 * before checking for netif_queue_stopped(). Without the
1784 * memory barrier, there is a small possibility that bnx2_start_xmit()
1785 * will miss it and cause the queue to be stopped forever.
1786 */
1787 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001788
Michael Chan2f8af122006-08-15 01:39:10 -07001789 if (unlikely(netif_queue_stopped(bp->dev)) &&
1790 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1791 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001792 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001793 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001794 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001795 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001796 }
Michael Chanb6016b72005-05-26 13:03:09 -07001797}
1798
1799static inline void
1800bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1801 u16 cons, u16 prod)
1802{
Michael Chan236b6392006-03-20 17:49:02 -08001803 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1804 struct rx_bd *cons_bd, *prod_bd;
1805
1806 cons_rx_buf = &bp->rx_buf_ring[cons];
1807 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001808
1809 pci_dma_sync_single_for_device(bp->pdev,
1810 pci_unmap_addr(cons_rx_buf, mapping),
1811 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1812
Michael Chan236b6392006-03-20 17:49:02 -08001813 bp->rx_prod_bseq += bp->rx_buf_use_size;
1814
1815 prod_rx_buf->skb = skb;
1816
1817 if (cons == prod)
1818 return;
1819
Michael Chanb6016b72005-05-26 13:03:09 -07001820 pci_unmap_addr_set(prod_rx_buf, mapping,
1821 pci_unmap_addr(cons_rx_buf, mapping));
1822
Michael Chan3fdfcc22006-03-20 17:49:49 -08001823 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1824 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001825 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1826 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001827}
1828
1829static int
1830bnx2_rx_int(struct bnx2 *bp, int budget)
1831{
Michael Chanf4e418f2005-11-04 08:53:48 -08001832 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001833 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1834 struct l2_fhdr *rx_hdr;
1835 int rx_pkt = 0;
1836
Michael Chanf4e418f2005-11-04 08:53:48 -08001837 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001838 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1839 hw_cons++;
1840 }
1841 sw_cons = bp->rx_cons;
1842 sw_prod = bp->rx_prod;
1843
1844 /* Memory barrier necessary as speculative reads of the rx
1845 * buffer can be ahead of the index in the status block
1846 */
1847 rmb();
1848 while (sw_cons != hw_cons) {
1849 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001850 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001851 struct sw_bd *rx_buf;
1852 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001853 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001854
1855 sw_ring_cons = RX_RING_IDX(sw_cons);
1856 sw_ring_prod = RX_RING_IDX(sw_prod);
1857
1858 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1859 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001860
1861 rx_buf->skb = NULL;
1862
1863 dma_addr = pci_unmap_addr(rx_buf, mapping);
1864
1865 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001866 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1867
1868 rx_hdr = (struct l2_fhdr *) skb->data;
1869 len = rx_hdr->l2_fhdr_pkt_len - 4;
1870
Michael Chanade2bfe2006-01-23 16:09:51 -08001871 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001872 (L2_FHDR_ERRORS_BAD_CRC |
1873 L2_FHDR_ERRORS_PHY_DECODE |
1874 L2_FHDR_ERRORS_ALIGNMENT |
1875 L2_FHDR_ERRORS_TOO_SHORT |
1876 L2_FHDR_ERRORS_GIANT_FRAME)) {
1877
1878 goto reuse_rx;
1879 }
1880
1881 /* Since we don't have a jumbo ring, copy small packets
1882 * if mtu > 1500
1883 */
1884 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1885 struct sk_buff *new_skb;
1886
Michael Chan932f3772006-08-15 01:39:36 -07001887 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001888 if (new_skb == NULL)
1889 goto reuse_rx;
1890
1891 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001892 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1893 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001894 skb_reserve(new_skb, 2);
1895 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001896
1897 bnx2_reuse_rx_skb(bp, skb,
1898 sw_ring_cons, sw_ring_prod);
1899
1900 skb = new_skb;
1901 }
1902 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001903 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001904 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1905
1906 skb_reserve(skb, bp->rx_offset);
1907 skb_put(skb, len);
1908 }
1909 else {
1910reuse_rx:
1911 bnx2_reuse_rx_skb(bp, skb,
1912 sw_ring_cons, sw_ring_prod);
1913 goto next_rx;
1914 }
1915
1916 skb->protocol = eth_type_trans(skb, bp->dev);
1917
1918 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001919 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001920
Michael Chan745720e2006-06-29 12:37:41 -07001921 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001922 goto next_rx;
1923
1924 }
1925
Michael Chanb6016b72005-05-26 13:03:09 -07001926 skb->ip_summed = CHECKSUM_NONE;
1927 if (bp->rx_csum &&
1928 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1929 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1930
Michael Chanade2bfe2006-01-23 16:09:51 -08001931 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1932 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001933 skb->ip_summed = CHECKSUM_UNNECESSARY;
1934 }
1935
1936#ifdef BCM_VLAN
1937 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1938 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1939 rx_hdr->l2_fhdr_vlan_tag);
1940 }
1941 else
1942#endif
1943 netif_receive_skb(skb);
1944
1945 bp->dev->last_rx = jiffies;
1946 rx_pkt++;
1947
1948next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001949 sw_cons = NEXT_RX_BD(sw_cons);
1950 sw_prod = NEXT_RX_BD(sw_prod);
1951
1952 if ((rx_pkt == budget))
1953 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001954
1955 /* Refresh hw_cons to see if there is new work */
1956 if (sw_cons == hw_cons) {
1957 hw_cons = bp->hw_rx_cons =
1958 sblk->status_rx_quick_consumer_index0;
1959 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1960 hw_cons++;
1961 rmb();
1962 }
Michael Chanb6016b72005-05-26 13:03:09 -07001963 }
1964 bp->rx_cons = sw_cons;
1965 bp->rx_prod = sw_prod;
1966
1967 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1968
1969 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1970
1971 mmiowb();
1972
1973 return rx_pkt;
1974
1975}
1976
1977/* MSI ISR - The only difference between this and the INTx ISR
1978 * is that the MSI interrupt is always serviced.
1979 */
1980static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001981bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001982{
1983 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001984 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001985
Michael Chanc921e4c2005-09-08 13:15:32 -07001986 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001987 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1988 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1989 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1990
1991 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001992 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1993 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001994
Michael Chan73eef4c2005-08-25 15:39:15 -07001995 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001996
Michael Chan73eef4c2005-08-25 15:39:15 -07001997 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001998}
1999
2000static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002001bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002002{
2003 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002004 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002005
2006 /* When using INTx, it is possible for the interrupt to arrive
2007 * at the CPU before the status block posted prior to the
2008 * interrupt. Reading a register will flush the status block.
2009 * When using MSI, the MSI message will always complete after
2010 * the status block write.
2011 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002012 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002013 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2014 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002015 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002016
2017 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2018 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2019 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2020
2021 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002022 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2023 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002024
Michael Chan73eef4c2005-08-25 15:39:15 -07002025 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002026
Michael Chan73eef4c2005-08-25 15:39:15 -07002027 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002028}
2029
Michael Chanf4e418f2005-11-04 08:53:48 -08002030static inline int
2031bnx2_has_work(struct bnx2 *bp)
2032{
2033 struct status_block *sblk = bp->status_blk;
2034
2035 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2036 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2037 return 1;
2038
Michael Chandb8b2252007-03-28 14:17:36 -07002039 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2040 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
Michael Chanf4e418f2005-11-04 08:53:48 -08002041 return 1;
2042
2043 return 0;
2044}
2045
Michael Chanb6016b72005-05-26 13:03:09 -07002046static int
2047bnx2_poll(struct net_device *dev, int *budget)
2048{
Michael Chan972ec0d2006-01-23 16:12:43 -08002049 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002050
Michael Chanb6016b72005-05-26 13:03:09 -07002051 if ((bp->status_blk->status_attn_bits &
2052 STATUS_ATTN_BITS_LINK_STATE) !=
2053 (bp->status_blk->status_attn_bits_ack &
2054 STATUS_ATTN_BITS_LINK_STATE)) {
2055
Michael Chanc770a652005-08-25 15:38:39 -07002056 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002057 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002058 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002059
2060 /* This is needed to take care of transient status
2061 * during link changes.
2062 */
2063 REG_WR(bp, BNX2_HC_COMMAND,
2064 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2065 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002066 }
2067
Michael Chanf4e418f2005-11-04 08:53:48 -08002068 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002069 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002070
Michael Chanf4e418f2005-11-04 08:53:48 -08002071 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002072 int orig_budget = *budget;
2073 int work_done;
2074
2075 if (orig_budget > dev->quota)
2076 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002077
Michael Chanb6016b72005-05-26 13:03:09 -07002078 work_done = bnx2_rx_int(bp, orig_budget);
2079 *budget -= work_done;
2080 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002081 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002082
Michael Chanf4e418f2005-11-04 08:53:48 -08002083 bp->last_status_idx = bp->status_blk->status_idx;
2084 rmb();
2085
2086 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002087 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002088 if (likely(bp->flags & USING_MSI_FLAG)) {
2089 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2091 bp->last_status_idx);
2092 return 0;
2093 }
Michael Chanb6016b72005-05-26 13:03:09 -07002094 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002095 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2096 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2097 bp->last_status_idx);
2098
2099 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2100 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2101 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002102 return 0;
2103 }
2104
2105 return 1;
2106}
2107
Herbert Xu932ff272006-06-09 12:20:56 -07002108/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002109 * from set_multicast.
2110 */
2111static void
2112bnx2_set_rx_mode(struct net_device *dev)
2113{
Michael Chan972ec0d2006-01-23 16:12:43 -08002114 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002115 u32 rx_mode, sort_mode;
2116 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002117
Michael Chanc770a652005-08-25 15:38:39 -07002118 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002119
2120 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2121 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2122 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2123#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002124 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002125 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002126#else
Michael Chane29054f2006-01-23 16:06:06 -08002127 if (!(bp->flags & ASF_ENABLE_FLAG))
2128 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002129#endif
2130 if (dev->flags & IFF_PROMISC) {
2131 /* Promiscuous mode. */
2132 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002133 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2134 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002135 }
2136 else if (dev->flags & IFF_ALLMULTI) {
2137 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2138 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2139 0xffffffff);
2140 }
2141 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2142 }
2143 else {
2144 /* Accept one or more multicast(s). */
2145 struct dev_mc_list *mclist;
2146 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2147 u32 regidx;
2148 u32 bit;
2149 u32 crc;
2150
2151 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2152
2153 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2154 i++, mclist = mclist->next) {
2155
2156 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2157 bit = crc & 0xff;
2158 regidx = (bit & 0xe0) >> 5;
2159 bit &= 0x1f;
2160 mc_filter[regidx] |= (1 << bit);
2161 }
2162
2163 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2164 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2165 mc_filter[i]);
2166 }
2167
2168 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2169 }
2170
2171 if (rx_mode != bp->rx_mode) {
2172 bp->rx_mode = rx_mode;
2173 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2174 }
2175
2176 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2177 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2178 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2179
Michael Chanc770a652005-08-25 15:38:39 -07002180 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002181}
2182
Michael Chanfba9fe92006-06-12 22:21:25 -07002183#define FW_BUF_SIZE 0x8000
2184
2185static int
2186bnx2_gunzip_init(struct bnx2 *bp)
2187{
2188 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2189 goto gunzip_nomem1;
2190
2191 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2192 goto gunzip_nomem2;
2193
2194 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2195 if (bp->strm->workspace == NULL)
2196 goto gunzip_nomem3;
2197
2198 return 0;
2199
2200gunzip_nomem3:
2201 kfree(bp->strm);
2202 bp->strm = NULL;
2203
2204gunzip_nomem2:
2205 vfree(bp->gunzip_buf);
2206 bp->gunzip_buf = NULL;
2207
2208gunzip_nomem1:
2209 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2210 "uncompression.\n", bp->dev->name);
2211 return -ENOMEM;
2212}
2213
2214static void
2215bnx2_gunzip_end(struct bnx2 *bp)
2216{
2217 kfree(bp->strm->workspace);
2218
2219 kfree(bp->strm);
2220 bp->strm = NULL;
2221
2222 if (bp->gunzip_buf) {
2223 vfree(bp->gunzip_buf);
2224 bp->gunzip_buf = NULL;
2225 }
2226}
2227
2228static int
2229bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2230{
2231 int n, rc;
2232
2233 /* check gzip header */
2234 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2235 return -EINVAL;
2236
2237 n = 10;
2238
2239#define FNAME 0x8
2240 if (zbuf[3] & FNAME)
2241 while ((zbuf[n++] != 0) && (n < len));
2242
2243 bp->strm->next_in = zbuf + n;
2244 bp->strm->avail_in = len - n;
2245 bp->strm->next_out = bp->gunzip_buf;
2246 bp->strm->avail_out = FW_BUF_SIZE;
2247
2248 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2249 if (rc != Z_OK)
2250 return rc;
2251
2252 rc = zlib_inflate(bp->strm, Z_FINISH);
2253
2254 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2255 *outbuf = bp->gunzip_buf;
2256
2257 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2258 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2259 bp->dev->name, bp->strm->msg);
2260
2261 zlib_inflateEnd(bp->strm);
2262
2263 if (rc == Z_STREAM_END)
2264 return 0;
2265
2266 return rc;
2267}
2268
Michael Chanb6016b72005-05-26 13:03:09 -07002269static void
2270load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2271 u32 rv2p_proc)
2272{
2273 int i;
2274 u32 val;
2275
2276
2277 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002278 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002279 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002280 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002281 rv2p_code++;
2282
2283 if (rv2p_proc == RV2P_PROC1) {
2284 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2285 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2286 }
2287 else {
2288 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2289 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2290 }
2291 }
2292
2293 /* Reset the processor, un-stall is done later. */
2294 if (rv2p_proc == RV2P_PROC1) {
2295 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2296 }
2297 else {
2298 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2299 }
2300}
2301
Michael Chanaf3ee512006-11-19 14:09:25 -08002302static int
Michael Chanb6016b72005-05-26 13:03:09 -07002303load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2304{
2305 u32 offset;
2306 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002307 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002308
2309 /* Halt the CPU. */
2310 val = REG_RD_IND(bp, cpu_reg->mode);
2311 val |= cpu_reg->mode_value_halt;
2312 REG_WR_IND(bp, cpu_reg->mode, val);
2313 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2314
2315 /* Load the Text area. */
2316 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002317 if (fw->gz_text) {
2318 u32 text_len;
2319 void *text;
2320
2321 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2322 &text_len);
2323 if (rc)
2324 return rc;
2325
2326 fw->text = text;
2327 }
2328 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002329 int j;
2330
2331 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002332 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002333 }
2334 }
2335
2336 /* Load the Data area. */
2337 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2338 if (fw->data) {
2339 int j;
2340
2341 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2342 REG_WR_IND(bp, offset, fw->data[j]);
2343 }
2344 }
2345
2346 /* Load the SBSS area. */
2347 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2348 if (fw->sbss) {
2349 int j;
2350
2351 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2352 REG_WR_IND(bp, offset, fw->sbss[j]);
2353 }
2354 }
2355
2356 /* Load the BSS area. */
2357 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2358 if (fw->bss) {
2359 int j;
2360
2361 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2362 REG_WR_IND(bp, offset, fw->bss[j]);
2363 }
2364 }
2365
2366 /* Load the Read-Only area. */
2367 offset = cpu_reg->spad_base +
2368 (fw->rodata_addr - cpu_reg->mips_view_base);
2369 if (fw->rodata) {
2370 int j;
2371
2372 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2373 REG_WR_IND(bp, offset, fw->rodata[j]);
2374 }
2375 }
2376
2377 /* Clear the pre-fetch instruction. */
2378 REG_WR_IND(bp, cpu_reg->inst, 0);
2379 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2380
2381 /* Start the CPU. */
2382 val = REG_RD_IND(bp, cpu_reg->mode);
2383 val &= ~cpu_reg->mode_value_halt;
2384 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2385 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002386
2387 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002388}
2389
Michael Chanfba9fe92006-06-12 22:21:25 -07002390static int
Michael Chanb6016b72005-05-26 13:03:09 -07002391bnx2_init_cpus(struct bnx2 *bp)
2392{
2393 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002394 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002395 int rc = 0;
2396 void *text;
2397 u32 text_len;
2398
2399 if ((rc = bnx2_gunzip_init(bp)) != 0)
2400 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002401
2402 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002403 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2404 &text_len);
2405 if (rc)
2406 goto init_cpu_err;
2407
2408 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2409
2410 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2411 &text_len);
2412 if (rc)
2413 goto init_cpu_err;
2414
2415 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002416
2417 /* Initialize the RX Processor. */
2418 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2419 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2420 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2421 cpu_reg.state = BNX2_RXP_CPU_STATE;
2422 cpu_reg.state_value_clear = 0xffffff;
2423 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2424 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2425 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2426 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2427 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2428 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2429 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002430
Michael Chand43584c2006-11-19 14:14:35 -08002431 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2432 fw = &bnx2_rxp_fw_09;
2433 else
2434 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002435
Michael Chanaf3ee512006-11-19 14:09:25 -08002436 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002437 if (rc)
2438 goto init_cpu_err;
2439
Michael Chanb6016b72005-05-26 13:03:09 -07002440 /* Initialize the TX Processor. */
2441 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2442 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2443 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2444 cpu_reg.state = BNX2_TXP_CPU_STATE;
2445 cpu_reg.state_value_clear = 0xffffff;
2446 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2447 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2448 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2449 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2450 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2451 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2452 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002453
Michael Chand43584c2006-11-19 14:14:35 -08002454 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2455 fw = &bnx2_txp_fw_09;
2456 else
2457 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002458
Michael Chanaf3ee512006-11-19 14:09:25 -08002459 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002460 if (rc)
2461 goto init_cpu_err;
2462
Michael Chanb6016b72005-05-26 13:03:09 -07002463 /* Initialize the TX Patch-up Processor. */
2464 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2465 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2466 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2467 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2468 cpu_reg.state_value_clear = 0xffffff;
2469 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2470 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2471 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2472 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2473 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2474 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2475 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002476
Michael Chand43584c2006-11-19 14:14:35 -08002477 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2478 fw = &bnx2_tpat_fw_09;
2479 else
2480 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002481
Michael Chanaf3ee512006-11-19 14:09:25 -08002482 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002483 if (rc)
2484 goto init_cpu_err;
2485
Michael Chanb6016b72005-05-26 13:03:09 -07002486 /* Initialize the Completion Processor. */
2487 cpu_reg.mode = BNX2_COM_CPU_MODE;
2488 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2489 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2490 cpu_reg.state = BNX2_COM_CPU_STATE;
2491 cpu_reg.state_value_clear = 0xffffff;
2492 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2493 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2494 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2495 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2496 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2497 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2498 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002499
Michael Chand43584c2006-11-19 14:14:35 -08002500 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2501 fw = &bnx2_com_fw_09;
2502 else
2503 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002504
Michael Chanaf3ee512006-11-19 14:09:25 -08002505 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002506 if (rc)
2507 goto init_cpu_err;
2508
Michael Chand43584c2006-11-19 14:14:35 -08002509 /* Initialize the Command Processor. */
2510 cpu_reg.mode = BNX2_CP_CPU_MODE;
2511 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2512 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2513 cpu_reg.state = BNX2_CP_CPU_STATE;
2514 cpu_reg.state_value_clear = 0xffffff;
2515 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2516 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2517 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2518 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2519 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2520 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2521 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002522
Michael Chand43584c2006-11-19 14:14:35 -08002523 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2524 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002525
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002526 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002527 if (rc)
2528 goto init_cpu_err;
2529 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002530init_cpu_err:
2531 bnx2_gunzip_end(bp);
2532 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002533}
2534
2535static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002536bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002537{
2538 u16 pmcsr;
2539
2540 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2541
2542 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002543 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002544 u32 val;
2545
2546 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2547 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2548 PCI_PM_CTRL_PME_STATUS);
2549
2550 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2551 /* delay required during transition out of D3hot */
2552 msleep(20);
2553
2554 val = REG_RD(bp, BNX2_EMAC_MODE);
2555 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2556 val &= ~BNX2_EMAC_MODE_MPKT;
2557 REG_WR(bp, BNX2_EMAC_MODE, val);
2558
2559 val = REG_RD(bp, BNX2_RPM_CONFIG);
2560 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2561 REG_WR(bp, BNX2_RPM_CONFIG, val);
2562 break;
2563 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002564 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002565 int i;
2566 u32 val, wol_msg;
2567
2568 if (bp->wol) {
2569 u32 advertising;
2570 u8 autoneg;
2571
2572 autoneg = bp->autoneg;
2573 advertising = bp->advertising;
2574
2575 bp->autoneg = AUTONEG_SPEED;
2576 bp->advertising = ADVERTISED_10baseT_Half |
2577 ADVERTISED_10baseT_Full |
2578 ADVERTISED_100baseT_Half |
2579 ADVERTISED_100baseT_Full |
2580 ADVERTISED_Autoneg;
2581
2582 bnx2_setup_copper_phy(bp);
2583
2584 bp->autoneg = autoneg;
2585 bp->advertising = advertising;
2586
2587 bnx2_set_mac_addr(bp);
2588
2589 val = REG_RD(bp, BNX2_EMAC_MODE);
2590
2591 /* Enable port mode. */
2592 val &= ~BNX2_EMAC_MODE_PORT;
2593 val |= BNX2_EMAC_MODE_PORT_MII |
2594 BNX2_EMAC_MODE_MPKT_RCVD |
2595 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002596 BNX2_EMAC_MODE_MPKT;
2597
2598 REG_WR(bp, BNX2_EMAC_MODE, val);
2599
2600 /* receive all multicast */
2601 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2602 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2603 0xffffffff);
2604 }
2605 REG_WR(bp, BNX2_EMAC_RX_MODE,
2606 BNX2_EMAC_RX_MODE_SORT_MODE);
2607
2608 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2609 BNX2_RPM_SORT_USER0_MC_EN;
2610 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2611 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2612 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2613 BNX2_RPM_SORT_USER0_ENA);
2614
2615 /* Need to enable EMAC and RPM for WOL. */
2616 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2617 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2618 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2619 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2620
2621 val = REG_RD(bp, BNX2_RPM_CONFIG);
2622 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2623 REG_WR(bp, BNX2_RPM_CONFIG, val);
2624
2625 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2626 }
2627 else {
2628 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2629 }
2630
Michael Chandda1e392006-01-23 16:08:14 -08002631 if (!(bp->flags & NO_WOL_FLAG))
2632 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002633
2634 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2635 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2636 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2637
2638 if (bp->wol)
2639 pmcsr |= 3;
2640 }
2641 else {
2642 pmcsr |= 3;
2643 }
2644 if (bp->wol) {
2645 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2646 }
2647 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2648 pmcsr);
2649
2650 /* No more memory access after this point until
2651 * device is brought back to D0.
2652 */
2653 udelay(50);
2654 break;
2655 }
2656 default:
2657 return -EINVAL;
2658 }
2659 return 0;
2660}
2661
2662static int
2663bnx2_acquire_nvram_lock(struct bnx2 *bp)
2664{
2665 u32 val;
2666 int j;
2667
2668 /* Request access to the flash interface. */
2669 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2670 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2671 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2672 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2673 break;
2674
2675 udelay(5);
2676 }
2677
2678 if (j >= NVRAM_TIMEOUT_COUNT)
2679 return -EBUSY;
2680
2681 return 0;
2682}
2683
2684static int
2685bnx2_release_nvram_lock(struct bnx2 *bp)
2686{
2687 int j;
2688 u32 val;
2689
2690 /* Relinquish nvram interface. */
2691 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2692
2693 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2694 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2695 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2696 break;
2697
2698 udelay(5);
2699 }
2700
2701 if (j >= NVRAM_TIMEOUT_COUNT)
2702 return -EBUSY;
2703
2704 return 0;
2705}
2706
2707
2708static int
2709bnx2_enable_nvram_write(struct bnx2 *bp)
2710{
2711 u32 val;
2712
2713 val = REG_RD(bp, BNX2_MISC_CFG);
2714 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2715
2716 if (!bp->flash_info->buffered) {
2717 int j;
2718
2719 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2720 REG_WR(bp, BNX2_NVM_COMMAND,
2721 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2722
2723 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2724 udelay(5);
2725
2726 val = REG_RD(bp, BNX2_NVM_COMMAND);
2727 if (val & BNX2_NVM_COMMAND_DONE)
2728 break;
2729 }
2730
2731 if (j >= NVRAM_TIMEOUT_COUNT)
2732 return -EBUSY;
2733 }
2734 return 0;
2735}
2736
2737static void
2738bnx2_disable_nvram_write(struct bnx2 *bp)
2739{
2740 u32 val;
2741
2742 val = REG_RD(bp, BNX2_MISC_CFG);
2743 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2744}
2745
2746
2747static void
2748bnx2_enable_nvram_access(struct bnx2 *bp)
2749{
2750 u32 val;
2751
2752 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2753 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002754 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002755 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2756}
2757
2758static void
2759bnx2_disable_nvram_access(struct bnx2 *bp)
2760{
2761 u32 val;
2762
2763 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2764 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002765 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002766 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2767 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2768}
2769
2770static int
2771bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2772{
2773 u32 cmd;
2774 int j;
2775
2776 if (bp->flash_info->buffered)
2777 /* Buffered flash, no erase needed */
2778 return 0;
2779
2780 /* Build an erase command */
2781 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2782 BNX2_NVM_COMMAND_DOIT;
2783
2784 /* Need to clear DONE bit separately. */
2785 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2786
2787 /* Address of the NVRAM to read from. */
2788 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2789
2790 /* Issue an erase command. */
2791 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2792
2793 /* Wait for completion. */
2794 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2795 u32 val;
2796
2797 udelay(5);
2798
2799 val = REG_RD(bp, BNX2_NVM_COMMAND);
2800 if (val & BNX2_NVM_COMMAND_DONE)
2801 break;
2802 }
2803
2804 if (j >= NVRAM_TIMEOUT_COUNT)
2805 return -EBUSY;
2806
2807 return 0;
2808}
2809
2810static int
2811bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2812{
2813 u32 cmd;
2814 int j;
2815
2816 /* Build the command word. */
2817 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2818
2819 /* Calculate an offset of a buffered flash. */
2820 if (bp->flash_info->buffered) {
2821 offset = ((offset / bp->flash_info->page_size) <<
2822 bp->flash_info->page_bits) +
2823 (offset % bp->flash_info->page_size);
2824 }
2825
2826 /* Need to clear DONE bit separately. */
2827 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2828
2829 /* Address of the NVRAM to read from. */
2830 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2831
2832 /* Issue a read command. */
2833 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2834
2835 /* Wait for completion. */
2836 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2837 u32 val;
2838
2839 udelay(5);
2840
2841 val = REG_RD(bp, BNX2_NVM_COMMAND);
2842 if (val & BNX2_NVM_COMMAND_DONE) {
2843 val = REG_RD(bp, BNX2_NVM_READ);
2844
2845 val = be32_to_cpu(val);
2846 memcpy(ret_val, &val, 4);
2847 break;
2848 }
2849 }
2850 if (j >= NVRAM_TIMEOUT_COUNT)
2851 return -EBUSY;
2852
2853 return 0;
2854}
2855
2856
2857static int
2858bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2859{
2860 u32 cmd, val32;
2861 int j;
2862
2863 /* Build the command word. */
2864 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2865
2866 /* Calculate an offset of a buffered flash. */
2867 if (bp->flash_info->buffered) {
2868 offset = ((offset / bp->flash_info->page_size) <<
2869 bp->flash_info->page_bits) +
2870 (offset % bp->flash_info->page_size);
2871 }
2872
2873 /* Need to clear DONE bit separately. */
2874 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2875
2876 memcpy(&val32, val, 4);
2877 val32 = cpu_to_be32(val32);
2878
2879 /* Write the data. */
2880 REG_WR(bp, BNX2_NVM_WRITE, val32);
2881
2882 /* Address of the NVRAM to write to. */
2883 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2884
2885 /* Issue the write command. */
2886 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2887
2888 /* Wait for completion. */
2889 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2890 udelay(5);
2891
2892 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2893 break;
2894 }
2895 if (j >= NVRAM_TIMEOUT_COUNT)
2896 return -EBUSY;
2897
2898 return 0;
2899}
2900
2901static int
2902bnx2_init_nvram(struct bnx2 *bp)
2903{
2904 u32 val;
2905 int j, entry_count, rc;
2906 struct flash_spec *flash;
2907
2908 /* Determine the selected interface. */
2909 val = REG_RD(bp, BNX2_NVM_CFG1);
2910
2911 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2912
2913 rc = 0;
2914 if (val & 0x40000000) {
2915
2916 /* Flash interface has been reconfigured */
2917 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002918 j++, flash++) {
2919 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2920 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002921 bp->flash_info = flash;
2922 break;
2923 }
2924 }
2925 }
2926 else {
Michael Chan37137702005-11-04 08:49:17 -08002927 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002928 /* Not yet been reconfigured */
2929
Michael Chan37137702005-11-04 08:49:17 -08002930 if (val & (1 << 23))
2931 mask = FLASH_BACKUP_STRAP_MASK;
2932 else
2933 mask = FLASH_STRAP_MASK;
2934
Michael Chanb6016b72005-05-26 13:03:09 -07002935 for (j = 0, flash = &flash_table[0]; j < entry_count;
2936 j++, flash++) {
2937
Michael Chan37137702005-11-04 08:49:17 -08002938 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002939 bp->flash_info = flash;
2940
2941 /* Request access to the flash interface. */
2942 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2943 return rc;
2944
2945 /* Enable access to flash interface */
2946 bnx2_enable_nvram_access(bp);
2947
2948 /* Reconfigure the flash interface */
2949 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2950 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2951 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2952 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2953
2954 /* Disable access to flash interface */
2955 bnx2_disable_nvram_access(bp);
2956 bnx2_release_nvram_lock(bp);
2957
2958 break;
2959 }
2960 }
2961 } /* if (val & 0x40000000) */
2962
2963 if (j == entry_count) {
2964 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002965 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002966 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002967 }
2968
Michael Chan1122db72006-01-23 16:11:42 -08002969 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2970 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2971 if (val)
2972 bp->flash_size = val;
2973 else
2974 bp->flash_size = bp->flash_info->total_size;
2975
Michael Chanb6016b72005-05-26 13:03:09 -07002976 return rc;
2977}
2978
2979static int
2980bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2981 int buf_size)
2982{
2983 int rc = 0;
2984 u32 cmd_flags, offset32, len32, extra;
2985
2986 if (buf_size == 0)
2987 return 0;
2988
2989 /* Request access to the flash interface. */
2990 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2991 return rc;
2992
2993 /* Enable access to flash interface */
2994 bnx2_enable_nvram_access(bp);
2995
2996 len32 = buf_size;
2997 offset32 = offset;
2998 extra = 0;
2999
3000 cmd_flags = 0;
3001
3002 if (offset32 & 3) {
3003 u8 buf[4];
3004 u32 pre_len;
3005
3006 offset32 &= ~3;
3007 pre_len = 4 - (offset & 3);
3008
3009 if (pre_len >= len32) {
3010 pre_len = len32;
3011 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3012 BNX2_NVM_COMMAND_LAST;
3013 }
3014 else {
3015 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3016 }
3017
3018 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3019
3020 if (rc)
3021 return rc;
3022
3023 memcpy(ret_buf, buf + (offset & 3), pre_len);
3024
3025 offset32 += 4;
3026 ret_buf += pre_len;
3027 len32 -= pre_len;
3028 }
3029 if (len32 & 3) {
3030 extra = 4 - (len32 & 3);
3031 len32 = (len32 + 4) & ~3;
3032 }
3033
3034 if (len32 == 4) {
3035 u8 buf[4];
3036
3037 if (cmd_flags)
3038 cmd_flags = BNX2_NVM_COMMAND_LAST;
3039 else
3040 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3041 BNX2_NVM_COMMAND_LAST;
3042
3043 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3044
3045 memcpy(ret_buf, buf, 4 - extra);
3046 }
3047 else if (len32 > 0) {
3048 u8 buf[4];
3049
3050 /* Read the first word. */
3051 if (cmd_flags)
3052 cmd_flags = 0;
3053 else
3054 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3055
3056 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3057
3058 /* Advance to the next dword. */
3059 offset32 += 4;
3060 ret_buf += 4;
3061 len32 -= 4;
3062
3063 while (len32 > 4 && rc == 0) {
3064 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3065
3066 /* Advance to the next dword. */
3067 offset32 += 4;
3068 ret_buf += 4;
3069 len32 -= 4;
3070 }
3071
3072 if (rc)
3073 return rc;
3074
3075 cmd_flags = BNX2_NVM_COMMAND_LAST;
3076 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3077
3078 memcpy(ret_buf, buf, 4 - extra);
3079 }
3080
3081 /* Disable access to flash interface */
3082 bnx2_disable_nvram_access(bp);
3083
3084 bnx2_release_nvram_lock(bp);
3085
3086 return rc;
3087}
3088
3089static int
3090bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3091 int buf_size)
3092{
3093 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003094 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003095 int rc = 0;
3096 int align_start, align_end;
3097
3098 buf = data_buf;
3099 offset32 = offset;
3100 len32 = buf_size;
3101 align_start = align_end = 0;
3102
3103 if ((align_start = (offset32 & 3))) {
3104 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003105 len32 += align_start;
3106 if (len32 < 4)
3107 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003108 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3109 return rc;
3110 }
3111
3112 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003113 align_end = 4 - (len32 & 3);
3114 len32 += align_end;
3115 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3116 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003117 }
3118
3119 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003120 align_buf = kmalloc(len32, GFP_KERNEL);
3121 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003122 return -ENOMEM;
3123 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003124 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003125 }
3126 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003127 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003128 }
Michael Chane6be7632007-01-08 19:56:13 -08003129 memcpy(align_buf + align_start, data_buf, buf_size);
3130 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003131 }
3132
Michael Chanae181bc2006-05-22 16:39:20 -07003133 if (bp->flash_info->buffered == 0) {
3134 flash_buffer = kmalloc(264, GFP_KERNEL);
3135 if (flash_buffer == NULL) {
3136 rc = -ENOMEM;
3137 goto nvram_write_end;
3138 }
3139 }
3140
Michael Chanb6016b72005-05-26 13:03:09 -07003141 written = 0;
3142 while ((written < len32) && (rc == 0)) {
3143 u32 page_start, page_end, data_start, data_end;
3144 u32 addr, cmd_flags;
3145 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003146
3147 /* Find the page_start addr */
3148 page_start = offset32 + written;
3149 page_start -= (page_start % bp->flash_info->page_size);
3150 /* Find the page_end addr */
3151 page_end = page_start + bp->flash_info->page_size;
3152 /* Find the data_start addr */
3153 data_start = (written == 0) ? offset32 : page_start;
3154 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003155 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003156 (offset32 + len32) : page_end;
3157
3158 /* Request access to the flash interface. */
3159 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3160 goto nvram_write_end;
3161
3162 /* Enable access to flash interface */
3163 bnx2_enable_nvram_access(bp);
3164
3165 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3166 if (bp->flash_info->buffered == 0) {
3167 int j;
3168
3169 /* Read the whole page into the buffer
3170 * (non-buffer flash only) */
3171 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3172 if (j == (bp->flash_info->page_size - 4)) {
3173 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3174 }
3175 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003176 page_start + j,
3177 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003178 cmd_flags);
3179
3180 if (rc)
3181 goto nvram_write_end;
3182
3183 cmd_flags = 0;
3184 }
3185 }
3186
3187 /* Enable writes to flash interface (unlock write-protect) */
3188 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3189 goto nvram_write_end;
3190
Michael Chanb6016b72005-05-26 13:03:09 -07003191 /* Loop to write back the buffer data from page_start to
3192 * data_start */
3193 i = 0;
3194 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003195 /* Erase the page */
3196 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3197 goto nvram_write_end;
3198
3199 /* Re-enable the write again for the actual write */
3200 bnx2_enable_nvram_write(bp);
3201
Michael Chanb6016b72005-05-26 13:03:09 -07003202 for (addr = page_start; addr < data_start;
3203 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003204
Michael Chanb6016b72005-05-26 13:03:09 -07003205 rc = bnx2_nvram_write_dword(bp, addr,
3206 &flash_buffer[i], cmd_flags);
3207
3208 if (rc != 0)
3209 goto nvram_write_end;
3210
3211 cmd_flags = 0;
3212 }
3213 }
3214
3215 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003216 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003217 if ((addr == page_end - 4) ||
3218 ((bp->flash_info->buffered) &&
3219 (addr == data_end - 4))) {
3220
3221 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3222 }
3223 rc = bnx2_nvram_write_dword(bp, addr, buf,
3224 cmd_flags);
3225
3226 if (rc != 0)
3227 goto nvram_write_end;
3228
3229 cmd_flags = 0;
3230 buf += 4;
3231 }
3232
3233 /* Loop to write back the buffer data from data_end
3234 * to page_end */
3235 if (bp->flash_info->buffered == 0) {
3236 for (addr = data_end; addr < page_end;
3237 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003238
Michael Chanb6016b72005-05-26 13:03:09 -07003239 if (addr == page_end-4) {
3240 cmd_flags = BNX2_NVM_COMMAND_LAST;
3241 }
3242 rc = bnx2_nvram_write_dword(bp, addr,
3243 &flash_buffer[i], cmd_flags);
3244
3245 if (rc != 0)
3246 goto nvram_write_end;
3247
3248 cmd_flags = 0;
3249 }
3250 }
3251
3252 /* Disable writes to flash interface (lock write-protect) */
3253 bnx2_disable_nvram_write(bp);
3254
3255 /* Disable access to flash interface */
3256 bnx2_disable_nvram_access(bp);
3257 bnx2_release_nvram_lock(bp);
3258
3259 /* Increment written */
3260 written += data_end - data_start;
3261 }
3262
3263nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003264 kfree(flash_buffer);
3265 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003266 return rc;
3267}
3268
3269static int
3270bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3271{
3272 u32 val;
3273 int i, rc = 0;
3274
3275 /* Wait for the current PCI transaction to complete before
3276 * issuing a reset. */
3277 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3278 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3279 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3280 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3281 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3282 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3283 udelay(5);
3284
Michael Chanb090ae22006-01-23 16:07:10 -08003285 /* Wait for the firmware to tell us it is ok to issue a reset. */
3286 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3287
Michael Chanb6016b72005-05-26 13:03:09 -07003288 /* Deposit a driver reset signature so the firmware knows that
3289 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003291 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3292
Michael Chanb6016b72005-05-26 13:03:09 -07003293 /* Do a dummy read to force the chip to complete all current transaction
3294 * before we issue a reset. */
3295 val = REG_RD(bp, BNX2_MISC_ID);
3296
Michael Chan234754d2006-11-19 14:11:41 -08003297 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3298 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3299 REG_RD(bp, BNX2_MISC_COMMAND);
3300 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003301
Michael Chan234754d2006-11-19 14:11:41 -08003302 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3303 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003304
Michael Chan234754d2006-11-19 14:11:41 -08003305 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003306
Michael Chan234754d2006-11-19 14:11:41 -08003307 } else {
3308 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3309 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3310 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3311
3312 /* Chip reset. */
3313 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3314
3315 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3316 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3317 current->state = TASK_UNINTERRUPTIBLE;
3318 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003319 }
Michael Chanb6016b72005-05-26 13:03:09 -07003320
Michael Chan234754d2006-11-19 14:11:41 -08003321 /* Reset takes approximate 30 usec */
3322 for (i = 0; i < 10; i++) {
3323 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3324 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3325 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3326 break;
3327 udelay(10);
3328 }
3329
3330 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3331 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3332 printk(KERN_ERR PFX "Chip reset did not complete\n");
3333 return -EBUSY;
3334 }
Michael Chanb6016b72005-05-26 13:03:09 -07003335 }
3336
3337 /* Make sure byte swapping is properly configured. */
3338 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3339 if (val != 0x01020304) {
3340 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3341 return -ENODEV;
3342 }
3343
Michael Chanb6016b72005-05-26 13:03:09 -07003344 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003345 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3346 if (rc)
3347 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003348
3349 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3350 /* Adjust the voltage regular to two steps lower. The default
3351 * of this register is 0x0000000e. */
3352 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3353
3354 /* Remove bad rbuf memory from the free pool. */
3355 rc = bnx2_alloc_bad_rbuf(bp);
3356 }
3357
3358 return rc;
3359}
3360
3361static int
3362bnx2_init_chip(struct bnx2 *bp)
3363{
3364 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003365 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003366
3367 /* Make sure the interrupt is not active. */
3368 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3369
3370 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3371 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3372#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003373 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003374#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003375 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003376 DMA_READ_CHANS << 12 |
3377 DMA_WRITE_CHANS << 16;
3378
3379 val |= (0x2 << 20) | (1 << 11);
3380
Michael Chandda1e392006-01-23 16:08:14 -08003381 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003382 val |= (1 << 23);
3383
3384 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3385 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3386 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3387
3388 REG_WR(bp, BNX2_DMA_CONFIG, val);
3389
3390 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3391 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3392 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3393 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3394 }
3395
3396 if (bp->flags & PCIX_FLAG) {
3397 u16 val16;
3398
3399 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3400 &val16);
3401 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3402 val16 & ~PCI_X_CMD_ERO);
3403 }
3404
3405 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3406 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3407 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3408 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3409
3410 /* Initialize context mapping and zero out the quick contexts. The
3411 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003412 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3413 bnx2_init_5709_context(bp);
3414 else
3415 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003416
Michael Chanfba9fe92006-06-12 22:21:25 -07003417 if ((rc = bnx2_init_cpus(bp)) != 0)
3418 return rc;
3419
Michael Chanb6016b72005-05-26 13:03:09 -07003420 bnx2_init_nvram(bp);
3421
3422 bnx2_set_mac_addr(bp);
3423
3424 val = REG_RD(bp, BNX2_MQ_CONFIG);
3425 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3426 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003427 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3428 val |= BNX2_MQ_CONFIG_HALT_DIS;
3429
Michael Chanb6016b72005-05-26 13:03:09 -07003430 REG_WR(bp, BNX2_MQ_CONFIG, val);
3431
3432 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3433 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3434 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3435
3436 val = (BCM_PAGE_BITS - 8) << 24;
3437 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3438
3439 /* Configure page size. */
3440 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3441 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3442 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3443 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3444
3445 val = bp->mac_addr[0] +
3446 (bp->mac_addr[1] << 8) +
3447 (bp->mac_addr[2] << 16) +
3448 bp->mac_addr[3] +
3449 (bp->mac_addr[4] << 8) +
3450 (bp->mac_addr[5] << 16);
3451 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3452
3453 /* Program the MTU. Also include 4 bytes for CRC32. */
3454 val = bp->dev->mtu + ETH_HLEN + 4;
3455 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3456 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3457 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3458
3459 bp->last_status_idx = 0;
3460 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3461
3462 /* Set up how to generate a link change interrupt. */
3463 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3464
3465 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3466 (u64) bp->status_blk_mapping & 0xffffffff);
3467 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3468
3469 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3470 (u64) bp->stats_blk_mapping & 0xffffffff);
3471 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3472 (u64) bp->stats_blk_mapping >> 32);
3473
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003474 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003475 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3476
3477 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3478 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3479
3480 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3481 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3482
3483 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3484
3485 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3486
3487 REG_WR(bp, BNX2_HC_COM_TICKS,
3488 (bp->com_ticks_int << 16) | bp->com_ticks);
3489
3490 REG_WR(bp, BNX2_HC_CMD_TICKS,
3491 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3492
3493 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3494 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3495
3496 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3497 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3498 else {
3499 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3500 BNX2_HC_CONFIG_TX_TMR_MODE |
3501 BNX2_HC_CONFIG_COLLECT_STATS);
3502 }
3503
3504 /* Clear internal stats counters. */
3505 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3506
3507 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3508
Michael Chane29054f2006-01-23 16:06:06 -08003509 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3510 BNX2_PORT_FEATURE_ASF_ENABLED)
3511 bp->flags |= ASF_ENABLE_FLAG;
3512
Michael Chanb6016b72005-05-26 13:03:09 -07003513 /* Initialize the receive filter. */
3514 bnx2_set_rx_mode(bp->dev);
3515
Michael Chanb090ae22006-01-23 16:07:10 -08003516 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3517 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003518
3519 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3520 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3521
3522 udelay(20);
3523
Michael Chanbf5295b2006-03-23 01:11:56 -08003524 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3525
Michael Chanb090ae22006-01-23 16:07:10 -08003526 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003527}
3528
Michael Chan59b47d82006-11-19 14:10:45 -08003529static void
3530bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3531{
3532 u32 val, offset0, offset1, offset2, offset3;
3533
3534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3535 offset0 = BNX2_L2CTX_TYPE_XI;
3536 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3537 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3538 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3539 } else {
3540 offset0 = BNX2_L2CTX_TYPE;
3541 offset1 = BNX2_L2CTX_CMD_TYPE;
3542 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3543 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3544 }
3545 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3546 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3547
3548 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3549 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3550
3551 val = (u64) bp->tx_desc_mapping >> 32;
3552 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3553
3554 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3555 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3556}
Michael Chanb6016b72005-05-26 13:03:09 -07003557
3558static void
3559bnx2_init_tx_ring(struct bnx2 *bp)
3560{
3561 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003562 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003563
Michael Chan2f8af122006-08-15 01:39:10 -07003564 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3565
Michael Chanb6016b72005-05-26 13:03:09 -07003566 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003567
Michael Chanb6016b72005-05-26 13:03:09 -07003568 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3569 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3570
3571 bp->tx_prod = 0;
3572 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003573 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003574 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003575
Michael Chan59b47d82006-11-19 14:10:45 -08003576 cid = TX_CID;
3577 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3578 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003579
Michael Chan59b47d82006-11-19 14:10:45 -08003580 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003581}
3582
3583static void
3584bnx2_init_rx_ring(struct bnx2 *bp)
3585{
3586 struct rx_bd *rxbd;
3587 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003588 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003589 u32 val;
3590
3591 /* 8 for CRC and VLAN */
3592 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003593 /* hw alignment */
3594 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003595
3596 ring_prod = prod = bp->rx_prod = 0;
3597 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003598 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003599 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003600
Michael Chan13daffa2006-03-20 17:49:20 -08003601 for (i = 0; i < bp->rx_max_ring; i++) {
3602 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003603
Michael Chan13daffa2006-03-20 17:49:20 -08003604 rxbd = &bp->rx_desc_ring[i][0];
3605 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3606 rxbd->rx_bd_len = bp->rx_buf_use_size;
3607 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3608 }
3609 if (i == (bp->rx_max_ring - 1))
3610 j = 0;
3611 else
3612 j = i + 1;
3613 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3614 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3615 0xffffffff;
3616 }
Michael Chanb6016b72005-05-26 13:03:09 -07003617
3618 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3619 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3620 val |= 0x02 << 8;
3621 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3622
Michael Chan13daffa2006-03-20 17:49:20 -08003623 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003624 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3625
Michael Chan13daffa2006-03-20 17:49:20 -08003626 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003627 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3628
Michael Chan236b6392006-03-20 17:49:02 -08003629 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003630 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3631 break;
3632 }
3633 prod = NEXT_RX_BD(prod);
3634 ring_prod = RX_RING_IDX(prod);
3635 }
3636 bp->rx_prod = prod;
3637
3638 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3639
3640 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3641}
3642
3643static void
Michael Chan13daffa2006-03-20 17:49:20 -08003644bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3645{
3646 u32 num_rings, max;
3647
3648 bp->rx_ring_size = size;
3649 num_rings = 1;
3650 while (size > MAX_RX_DESC_CNT) {
3651 size -= MAX_RX_DESC_CNT;
3652 num_rings++;
3653 }
3654 /* round to next power of 2 */
3655 max = MAX_RX_RINGS;
3656 while ((max & num_rings) == 0)
3657 max >>= 1;
3658
3659 if (num_rings != max)
3660 max <<= 1;
3661
3662 bp->rx_max_ring = max;
3663 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3664}
3665
3666static void
Michael Chanb6016b72005-05-26 13:03:09 -07003667bnx2_free_tx_skbs(struct bnx2 *bp)
3668{
3669 int i;
3670
3671 if (bp->tx_buf_ring == NULL)
3672 return;
3673
3674 for (i = 0; i < TX_DESC_CNT; ) {
3675 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3676 struct sk_buff *skb = tx_buf->skb;
3677 int j, last;
3678
3679 if (skb == NULL) {
3680 i++;
3681 continue;
3682 }
3683
3684 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3685 skb_headlen(skb), PCI_DMA_TODEVICE);
3686
3687 tx_buf->skb = NULL;
3688
3689 last = skb_shinfo(skb)->nr_frags;
3690 for (j = 0; j < last; j++) {
3691 tx_buf = &bp->tx_buf_ring[i + j + 1];
3692 pci_unmap_page(bp->pdev,
3693 pci_unmap_addr(tx_buf, mapping),
3694 skb_shinfo(skb)->frags[j].size,
3695 PCI_DMA_TODEVICE);
3696 }
Michael Chan745720e2006-06-29 12:37:41 -07003697 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003698 i += j + 1;
3699 }
3700
3701}
3702
3703static void
3704bnx2_free_rx_skbs(struct bnx2 *bp)
3705{
3706 int i;
3707
3708 if (bp->rx_buf_ring == NULL)
3709 return;
3710
Michael Chan13daffa2006-03-20 17:49:20 -08003711 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003712 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3713 struct sk_buff *skb = rx_buf->skb;
3714
Michael Chan05d0f1c2005-11-04 08:53:48 -08003715 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003716 continue;
3717
3718 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3719 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3720
3721 rx_buf->skb = NULL;
3722
Michael Chan745720e2006-06-29 12:37:41 -07003723 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003724 }
3725}
3726
3727static void
3728bnx2_free_skbs(struct bnx2 *bp)
3729{
3730 bnx2_free_tx_skbs(bp);
3731 bnx2_free_rx_skbs(bp);
3732}
3733
3734static int
3735bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3736{
3737 int rc;
3738
3739 rc = bnx2_reset_chip(bp, reset_code);
3740 bnx2_free_skbs(bp);
3741 if (rc)
3742 return rc;
3743
Michael Chanfba9fe92006-06-12 22:21:25 -07003744 if ((rc = bnx2_init_chip(bp)) != 0)
3745 return rc;
3746
Michael Chanb6016b72005-05-26 13:03:09 -07003747 bnx2_init_tx_ring(bp);
3748 bnx2_init_rx_ring(bp);
3749 return 0;
3750}
3751
3752static int
3753bnx2_init_nic(struct bnx2 *bp)
3754{
3755 int rc;
3756
3757 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3758 return rc;
3759
Michael Chan80be4432006-11-19 14:07:28 -08003760 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003761 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003762 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003763 bnx2_set_link(bp);
3764 return 0;
3765}
3766
3767static int
3768bnx2_test_registers(struct bnx2 *bp)
3769{
3770 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07003771 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003772 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003773 u16 offset;
3774 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07003775#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07003776 u32 rw_mask;
3777 u32 ro_mask;
3778 } reg_tbl[] = {
3779 { 0x006c, 0, 0x00000000, 0x0000003f },
3780 { 0x0090, 0, 0xffffffff, 0x00000000 },
3781 { 0x0094, 0, 0x00000000, 0x00000000 },
3782
Michael Chan5bae30c2007-05-03 13:18:46 -07003783 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
3784 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3785 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3786 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
3787 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
3788 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3789 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
3790 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3791 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07003792
Michael Chan5bae30c2007-05-03 13:18:46 -07003793 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3794 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3795 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3796 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3797 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3798 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07003799
Michael Chan5bae30c2007-05-03 13:18:46 -07003800 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3801 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
3802 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003803
3804 { 0x1000, 0, 0x00000000, 0x00000001 },
3805 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003806
3807 { 0x1408, 0, 0x01c00800, 0x00000000 },
3808 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3809 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003810 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003811 { 0x14b0, 0, 0x00000002, 0x00000001 },
3812 { 0x14b8, 0, 0x00000000, 0x00000000 },
3813 { 0x14c0, 0, 0x00000000, 0x00000009 },
3814 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3815 { 0x14cc, 0, 0x00000000, 0x00000001 },
3816 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003817
3818 { 0x1800, 0, 0x00000000, 0x00000001 },
3819 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003820
3821 { 0x2800, 0, 0x00000000, 0x00000001 },
3822 { 0x2804, 0, 0x00000000, 0x00003f01 },
3823 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3824 { 0x2810, 0, 0xffff0000, 0x00000000 },
3825 { 0x2814, 0, 0xffff0000, 0x00000000 },
3826 { 0x2818, 0, 0xffff0000, 0x00000000 },
3827 { 0x281c, 0, 0xffff0000, 0x00000000 },
3828 { 0x2834, 0, 0xffffffff, 0x00000000 },
3829 { 0x2840, 0, 0x00000000, 0xffffffff },
3830 { 0x2844, 0, 0x00000000, 0xffffffff },
3831 { 0x2848, 0, 0xffffffff, 0x00000000 },
3832 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3833
3834 { 0x2c00, 0, 0x00000000, 0x00000011 },
3835 { 0x2c04, 0, 0x00000000, 0x00030007 },
3836
Michael Chanb6016b72005-05-26 13:03:09 -07003837 { 0x3c00, 0, 0x00000000, 0x00000001 },
3838 { 0x3c04, 0, 0x00000000, 0x00070000 },
3839 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3840 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3841 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3842 { 0x3c14, 0, 0x00000000, 0xffffffff },
3843 { 0x3c18, 0, 0x00000000, 0xffffffff },
3844 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3845 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003846
3847 { 0x5004, 0, 0x00000000, 0x0000007f },
3848 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003849
Michael Chanb6016b72005-05-26 13:03:09 -07003850 { 0x5c00, 0, 0x00000000, 0x00000001 },
3851 { 0x5c04, 0, 0x00000000, 0x0003000f },
3852 { 0x5c08, 0, 0x00000003, 0x00000000 },
3853 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3854 { 0x5c10, 0, 0x00000000, 0xffffffff },
3855 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3856 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3857 { 0x5c88, 0, 0x00000000, 0x00077373 },
3858 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3859
3860 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3861 { 0x680c, 0, 0xffffffff, 0x00000000 },
3862 { 0x6810, 0, 0xffffffff, 0x00000000 },
3863 { 0x6814, 0, 0xffffffff, 0x00000000 },
3864 { 0x6818, 0, 0xffffffff, 0x00000000 },
3865 { 0x681c, 0, 0xffffffff, 0x00000000 },
3866 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3867 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3868 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3869 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3870 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3871 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3872 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3873 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3874 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3875 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3876 { 0x684c, 0, 0xffffffff, 0x00000000 },
3877 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3878 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3879 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3880 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3881 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3882 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3883
3884 { 0xffff, 0, 0x00000000, 0x00000000 },
3885 };
3886
3887 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07003888 is_5709 = 0;
3889 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3890 is_5709 = 1;
3891
Michael Chanb6016b72005-05-26 13:03:09 -07003892 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3893 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07003894 u16 flags = reg_tbl[i].flags;
3895
3896 if (is_5709 && (flags & BNX2_FL_NOT_5709))
3897 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07003898
3899 offset = (u32) reg_tbl[i].offset;
3900 rw_mask = reg_tbl[i].rw_mask;
3901 ro_mask = reg_tbl[i].ro_mask;
3902
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003903 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003904
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003905 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003906
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003907 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003908 if ((val & rw_mask) != 0) {
3909 goto reg_test_err;
3910 }
3911
3912 if ((val & ro_mask) != (save_val & ro_mask)) {
3913 goto reg_test_err;
3914 }
3915
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003916 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003917
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003918 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003919 if ((val & rw_mask) != rw_mask) {
3920 goto reg_test_err;
3921 }
3922
3923 if ((val & ro_mask) != (save_val & ro_mask)) {
3924 goto reg_test_err;
3925 }
3926
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003927 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003928 continue;
3929
3930reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003931 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003932 ret = -ENODEV;
3933 break;
3934 }
3935 return ret;
3936}
3937
3938static int
3939bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3940{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003941 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003942 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3943 int i;
3944
3945 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3946 u32 offset;
3947
3948 for (offset = 0; offset < size; offset += 4) {
3949
3950 REG_WR_IND(bp, start + offset, test_pattern[i]);
3951
3952 if (REG_RD_IND(bp, start + offset) !=
3953 test_pattern[i]) {
3954 return -ENODEV;
3955 }
3956 }
3957 }
3958 return 0;
3959}
3960
3961static int
3962bnx2_test_memory(struct bnx2 *bp)
3963{
3964 int ret = 0;
3965 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07003966 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07003967 u32 offset;
3968 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07003969 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07003970 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003971 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003972 { 0xe0000, 0x4000 },
3973 { 0x120000, 0x4000 },
3974 { 0x1a0000, 0x4000 },
3975 { 0x160000, 0x4000 },
3976 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07003977 },
3978 mem_tbl_5709[] = {
3979 { 0x60000, 0x4000 },
3980 { 0xa0000, 0x3000 },
3981 { 0xe0000, 0x4000 },
3982 { 0x120000, 0x4000 },
3983 { 0x1a0000, 0x4000 },
3984 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07003985 };
Michael Chan5bae30c2007-05-03 13:18:46 -07003986 struct mem_entry *mem_tbl;
3987
3988 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3989 mem_tbl = mem_tbl_5709;
3990 else
3991 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07003992
3993 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3994 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3995 mem_tbl[i].len)) != 0) {
3996 return ret;
3997 }
3998 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003999
Michael Chanb6016b72005-05-26 13:03:09 -07004000 return ret;
4001}
4002
Michael Chanbc5a0692006-01-23 16:13:22 -08004003#define BNX2_MAC_LOOPBACK 0
4004#define BNX2_PHY_LOOPBACK 1
4005
Michael Chanb6016b72005-05-26 13:03:09 -07004006static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004007bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004008{
4009 unsigned int pkt_size, num_pkts, i;
4010 struct sk_buff *skb, *rx_skb;
4011 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004012 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004013 dma_addr_t map;
4014 struct tx_bd *txbd;
4015 struct sw_bd *rx_buf;
4016 struct l2_fhdr *rx_hdr;
4017 int ret = -ENODEV;
4018
Michael Chanbc5a0692006-01-23 16:13:22 -08004019 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4020 bp->loopback = MAC_LOOPBACK;
4021 bnx2_set_mac_loopback(bp);
4022 }
4023 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004024 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004025 bnx2_set_phy_loopback(bp);
4026 }
4027 else
4028 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004029
4030 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004031 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004032 if (!skb)
4033 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004034 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004035 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004036 memset(packet + 6, 0x0, 8);
4037 for (i = 14; i < pkt_size; i++)
4038 packet[i] = (unsigned char) (i & 0xff);
4039
4040 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4041 PCI_DMA_TODEVICE);
4042
Michael Chanbf5295b2006-03-23 01:11:56 -08004043 REG_WR(bp, BNX2_HC_COMMAND,
4044 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4045
Michael Chanb6016b72005-05-26 13:03:09 -07004046 REG_RD(bp, BNX2_HC_COMMAND);
4047
4048 udelay(5);
4049 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4050
Michael Chanb6016b72005-05-26 13:03:09 -07004051 num_pkts = 0;
4052
Michael Chanbc5a0692006-01-23 16:13:22 -08004053 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004054
4055 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4056 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4057 txbd->tx_bd_mss_nbytes = pkt_size;
4058 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4059
4060 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004061 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4062 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004063
Michael Chan234754d2006-11-19 14:11:41 -08004064 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4065 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004066
4067 udelay(100);
4068
Michael Chanbf5295b2006-03-23 01:11:56 -08004069 REG_WR(bp, BNX2_HC_COMMAND,
4070 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4071
Michael Chanb6016b72005-05-26 13:03:09 -07004072 REG_RD(bp, BNX2_HC_COMMAND);
4073
4074 udelay(5);
4075
4076 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004077 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004078
Michael Chanbc5a0692006-01-23 16:13:22 -08004079 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004080 goto loopback_test_done;
4081 }
4082
4083 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4084 if (rx_idx != rx_start_idx + num_pkts) {
4085 goto loopback_test_done;
4086 }
4087
4088 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4089 rx_skb = rx_buf->skb;
4090
4091 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4092 skb_reserve(rx_skb, bp->rx_offset);
4093
4094 pci_dma_sync_single_for_cpu(bp->pdev,
4095 pci_unmap_addr(rx_buf, mapping),
4096 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4097
Michael Chanade2bfe2006-01-23 16:09:51 -08004098 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004099 (L2_FHDR_ERRORS_BAD_CRC |
4100 L2_FHDR_ERRORS_PHY_DECODE |
4101 L2_FHDR_ERRORS_ALIGNMENT |
4102 L2_FHDR_ERRORS_TOO_SHORT |
4103 L2_FHDR_ERRORS_GIANT_FRAME)) {
4104
4105 goto loopback_test_done;
4106 }
4107
4108 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4109 goto loopback_test_done;
4110 }
4111
4112 for (i = 14; i < pkt_size; i++) {
4113 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4114 goto loopback_test_done;
4115 }
4116 }
4117
4118 ret = 0;
4119
4120loopback_test_done:
4121 bp->loopback = 0;
4122 return ret;
4123}
4124
Michael Chanbc5a0692006-01-23 16:13:22 -08004125#define BNX2_MAC_LOOPBACK_FAILED 1
4126#define BNX2_PHY_LOOPBACK_FAILED 2
4127#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4128 BNX2_PHY_LOOPBACK_FAILED)
4129
4130static int
4131bnx2_test_loopback(struct bnx2 *bp)
4132{
4133 int rc = 0;
4134
4135 if (!netif_running(bp->dev))
4136 return BNX2_LOOPBACK_FAILED;
4137
4138 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4139 spin_lock_bh(&bp->phy_lock);
4140 bnx2_init_phy(bp);
4141 spin_unlock_bh(&bp->phy_lock);
4142 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4143 rc |= BNX2_MAC_LOOPBACK_FAILED;
4144 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4145 rc |= BNX2_PHY_LOOPBACK_FAILED;
4146 return rc;
4147}
4148
Michael Chanb6016b72005-05-26 13:03:09 -07004149#define NVRAM_SIZE 0x200
4150#define CRC32_RESIDUAL 0xdebb20e3
4151
4152static int
4153bnx2_test_nvram(struct bnx2 *bp)
4154{
4155 u32 buf[NVRAM_SIZE / 4];
4156 u8 *data = (u8 *) buf;
4157 int rc = 0;
4158 u32 magic, csum;
4159
4160 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4161 goto test_nvram_done;
4162
4163 magic = be32_to_cpu(buf[0]);
4164 if (magic != 0x669955aa) {
4165 rc = -ENODEV;
4166 goto test_nvram_done;
4167 }
4168
4169 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4170 goto test_nvram_done;
4171
4172 csum = ether_crc_le(0x100, data);
4173 if (csum != CRC32_RESIDUAL) {
4174 rc = -ENODEV;
4175 goto test_nvram_done;
4176 }
4177
4178 csum = ether_crc_le(0x100, data + 0x100);
4179 if (csum != CRC32_RESIDUAL) {
4180 rc = -ENODEV;
4181 }
4182
4183test_nvram_done:
4184 return rc;
4185}
4186
4187static int
4188bnx2_test_link(struct bnx2 *bp)
4189{
4190 u32 bmsr;
4191
Michael Chanc770a652005-08-25 15:38:39 -07004192 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07004193 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
4194 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004195 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004196
Michael Chanb6016b72005-05-26 13:03:09 -07004197 if (bmsr & BMSR_LSTATUS) {
4198 return 0;
4199 }
4200 return -ENODEV;
4201}
4202
4203static int
4204bnx2_test_intr(struct bnx2 *bp)
4205{
4206 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004207 u16 status_idx;
4208
4209 if (!netif_running(bp->dev))
4210 return -ENODEV;
4211
4212 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4213
4214 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004215 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004216 REG_RD(bp, BNX2_HC_COMMAND);
4217
4218 for (i = 0; i < 10; i++) {
4219 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4220 status_idx) {
4221
4222 break;
4223 }
4224
4225 msleep_interruptible(10);
4226 }
4227 if (i < 10)
4228 return 0;
4229
4230 return -ENODEV;
4231}
4232
4233static void
Michael Chan48b01e22006-11-19 14:08:00 -08004234bnx2_5706_serdes_timer(struct bnx2 *bp)
4235{
4236 spin_lock(&bp->phy_lock);
4237 if (bp->serdes_an_pending)
4238 bp->serdes_an_pending--;
4239 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4240 u32 bmcr;
4241
4242 bp->current_interval = bp->timer_interval;
4243
Michael Chanca58c3a2007-05-03 13:22:52 -07004244 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004245
4246 if (bmcr & BMCR_ANENABLE) {
4247 u32 phy1, phy2;
4248
4249 bnx2_write_phy(bp, 0x1c, 0x7c00);
4250 bnx2_read_phy(bp, 0x1c, &phy1);
4251
4252 bnx2_write_phy(bp, 0x17, 0x0f01);
4253 bnx2_read_phy(bp, 0x15, &phy2);
4254 bnx2_write_phy(bp, 0x17, 0x0f01);
4255 bnx2_read_phy(bp, 0x15, &phy2);
4256
4257 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4258 !(phy2 & 0x20)) { /* no CONFIG */
4259
4260 bmcr &= ~BMCR_ANENABLE;
4261 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004262 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004263 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4264 }
4265 }
4266 }
4267 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4268 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4269 u32 phy2;
4270
4271 bnx2_write_phy(bp, 0x17, 0x0f01);
4272 bnx2_read_phy(bp, 0x15, &phy2);
4273 if (phy2 & 0x20) {
4274 u32 bmcr;
4275
Michael Chanca58c3a2007-05-03 13:22:52 -07004276 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004277 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004278 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004279
4280 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4281 }
4282 } else
4283 bp->current_interval = bp->timer_interval;
4284
4285 spin_unlock(&bp->phy_lock);
4286}
4287
4288static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004289bnx2_5708_serdes_timer(struct bnx2 *bp)
4290{
4291 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4292 bp->serdes_an_pending = 0;
4293 return;
4294 }
4295
4296 spin_lock(&bp->phy_lock);
4297 if (bp->serdes_an_pending)
4298 bp->serdes_an_pending--;
4299 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4300 u32 bmcr;
4301
Michael Chanca58c3a2007-05-03 13:22:52 -07004302 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004303
4304 if (bmcr & BMCR_ANENABLE) {
4305 bmcr &= ~BMCR_ANENABLE;
4306 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4307 bnx2_write_phy(bp, MII_BMCR, bmcr);
4308 bp->current_interval = SERDES_FORCED_TIMEOUT;
4309 } else {
4310 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4311 bmcr |= BMCR_ANENABLE;
4312 bnx2_write_phy(bp, MII_BMCR, bmcr);
4313 bp->serdes_an_pending = 2;
4314 bp->current_interval = bp->timer_interval;
4315 }
4316
4317 } else
4318 bp->current_interval = bp->timer_interval;
4319
4320 spin_unlock(&bp->phy_lock);
4321}
4322
4323static void
Michael Chanb6016b72005-05-26 13:03:09 -07004324bnx2_timer(unsigned long data)
4325{
4326 struct bnx2 *bp = (struct bnx2 *) data;
4327 u32 msg;
4328
Michael Chancd339a02005-08-25 15:35:24 -07004329 if (!netif_running(bp->dev))
4330 return;
4331
Michael Chanb6016b72005-05-26 13:03:09 -07004332 if (atomic_read(&bp->intr_sem) != 0)
4333 goto bnx2_restart_timer;
4334
4335 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004336 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004337
Michael Chancea94db2006-06-12 22:16:13 -07004338 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4339
Michael Chanf8dd0642006-11-19 14:08:29 -08004340 if (bp->phy_flags & PHY_SERDES_FLAG) {
4341 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4342 bnx2_5706_serdes_timer(bp);
4343 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4344 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004345 }
4346
4347bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004348 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004349}
4350
4351/* Called with rtnl_lock */
4352static int
4353bnx2_open(struct net_device *dev)
4354{
Michael Chan972ec0d2006-01-23 16:12:43 -08004355 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004356 int rc;
4357
Michael Chan1b2f9222007-05-03 13:20:19 -07004358 netif_carrier_off(dev);
4359
Pavel Machek829ca9a2005-09-03 15:56:56 -07004360 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004361 bnx2_disable_int(bp);
4362
4363 rc = bnx2_alloc_mem(bp);
4364 if (rc)
4365 return rc;
4366
4367 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4368 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4369 !disable_msi) {
4370
4371 if (pci_enable_msi(bp->pdev) == 0) {
4372 bp->flags |= USING_MSI_FLAG;
4373 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4374 dev);
4375 }
4376 else {
4377 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004378 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004379 }
4380 }
4381 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004382 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004383 dev->name, dev);
4384 }
4385 if (rc) {
4386 bnx2_free_mem(bp);
4387 return rc;
4388 }
4389
4390 rc = bnx2_init_nic(bp);
4391
4392 if (rc) {
4393 free_irq(bp->pdev->irq, dev);
4394 if (bp->flags & USING_MSI_FLAG) {
4395 pci_disable_msi(bp->pdev);
4396 bp->flags &= ~USING_MSI_FLAG;
4397 }
4398 bnx2_free_skbs(bp);
4399 bnx2_free_mem(bp);
4400 return rc;
4401 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004402
Michael Chancd339a02005-08-25 15:35:24 -07004403 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004404
4405 atomic_set(&bp->intr_sem, 0);
4406
4407 bnx2_enable_int(bp);
4408
4409 if (bp->flags & USING_MSI_FLAG) {
4410 /* Test MSI to make sure it is working
4411 * If MSI test fails, go back to INTx mode
4412 */
4413 if (bnx2_test_intr(bp) != 0) {
4414 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4415 " using MSI, switching to INTx mode. Please"
4416 " report this failure to the PCI maintainer"
4417 " and include system chipset information.\n",
4418 bp->dev->name);
4419
4420 bnx2_disable_int(bp);
4421 free_irq(bp->pdev->irq, dev);
4422 pci_disable_msi(bp->pdev);
4423 bp->flags &= ~USING_MSI_FLAG;
4424
4425 rc = bnx2_init_nic(bp);
4426
4427 if (!rc) {
4428 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004429 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004430 }
4431 if (rc) {
4432 bnx2_free_skbs(bp);
4433 bnx2_free_mem(bp);
4434 del_timer_sync(&bp->timer);
4435 return rc;
4436 }
4437 bnx2_enable_int(bp);
4438 }
4439 }
4440 if (bp->flags & USING_MSI_FLAG) {
4441 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4442 }
4443
4444 netif_start_queue(dev);
4445
4446 return 0;
4447}
4448
4449static void
David Howellsc4028952006-11-22 14:57:56 +00004450bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004451{
David Howellsc4028952006-11-22 14:57:56 +00004452 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004453
Michael Chanafdc08b2005-08-25 15:34:29 -07004454 if (!netif_running(bp->dev))
4455 return;
4456
4457 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004458 bnx2_netif_stop(bp);
4459
4460 bnx2_init_nic(bp);
4461
4462 atomic_set(&bp->intr_sem, 1);
4463 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004464 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004465}
4466
4467static void
4468bnx2_tx_timeout(struct net_device *dev)
4469{
Michael Chan972ec0d2006-01-23 16:12:43 -08004470 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004471
4472 /* This allows the netif to be shutdown gracefully before resetting */
4473 schedule_work(&bp->reset_task);
4474}
4475
4476#ifdef BCM_VLAN
4477/* Called with rtnl_lock */
4478static void
4479bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4480{
Michael Chan972ec0d2006-01-23 16:12:43 -08004481 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004482
4483 bnx2_netif_stop(bp);
4484
4485 bp->vlgrp = vlgrp;
4486 bnx2_set_rx_mode(dev);
4487
4488 bnx2_netif_start(bp);
4489}
4490
4491/* Called with rtnl_lock */
4492static void
4493bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4494{
Michael Chan972ec0d2006-01-23 16:12:43 -08004495 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004496
4497 bnx2_netif_stop(bp);
Dan Aloni5c15bde2007-03-02 20:44:51 -08004498 vlan_group_set_device(bp->vlgrp, vid, NULL);
Michael Chanb6016b72005-05-26 13:03:09 -07004499 bnx2_set_rx_mode(dev);
4500
4501 bnx2_netif_start(bp);
4502}
4503#endif
4504
Herbert Xu932ff272006-06-09 12:20:56 -07004505/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004506 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4507 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004508 */
4509static int
4510bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4511{
Michael Chan972ec0d2006-01-23 16:12:43 -08004512 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004513 dma_addr_t mapping;
4514 struct tx_bd *txbd;
4515 struct sw_bd *tx_buf;
4516 u32 len, vlan_tag_flags, last_frag, mss;
4517 u16 prod, ring_prod;
4518 int i;
4519
Michael Chane89bbf12005-08-25 15:36:58 -07004520 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004521 netif_stop_queue(dev);
4522 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4523 dev->name);
4524
4525 return NETDEV_TX_BUSY;
4526 }
4527 len = skb_headlen(skb);
4528 prod = bp->tx_prod;
4529 ring_prod = TX_RING_IDX(prod);
4530
4531 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004532 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004533 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4534 }
4535
4536 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4537 vlan_tag_flags |=
4538 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4539 }
Herbert Xu79671682006-06-22 02:40:14 -07004540 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004541 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4542 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004543 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004544
Michael Chanb6016b72005-05-26 13:03:09 -07004545 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4546
Michael Chan4666f872007-05-03 13:22:28 -07004547 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004548
Michael Chan4666f872007-05-03 13:22:28 -07004549 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4550 u32 tcp_off = skb_transport_offset(skb) -
4551 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004552
Michael Chan4666f872007-05-03 13:22:28 -07004553 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4554 TX_BD_FLAGS_SW_FLAGS;
4555 if (likely(tcp_off == 0))
4556 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4557 else {
4558 tcp_off >>= 3;
4559 vlan_tag_flags |= ((tcp_off & 0x3) <<
4560 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4561 ((tcp_off & 0x10) <<
4562 TX_BD_FLAGS_TCP6_OFF4_SHL);
4563 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4564 }
4565 } else {
4566 if (skb_header_cloned(skb) &&
4567 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4568 dev_kfree_skb(skb);
4569 return NETDEV_TX_OK;
4570 }
4571
4572 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4573
4574 iph = ip_hdr(skb);
4575 iph->check = 0;
4576 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4577 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4578 iph->daddr, 0,
4579 IPPROTO_TCP,
4580 0);
4581 if (tcp_opt_len || (iph->ihl > 5)) {
4582 vlan_tag_flags |= ((iph->ihl - 5) +
4583 (tcp_opt_len >> 2)) << 8;
4584 }
Michael Chanb6016b72005-05-26 13:03:09 -07004585 }
Michael Chan4666f872007-05-03 13:22:28 -07004586 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004587 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004588
4589 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004590
Michael Chanb6016b72005-05-26 13:03:09 -07004591 tx_buf = &bp->tx_buf_ring[ring_prod];
4592 tx_buf->skb = skb;
4593 pci_unmap_addr_set(tx_buf, mapping, mapping);
4594
4595 txbd = &bp->tx_desc_ring[ring_prod];
4596
4597 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4598 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4599 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4600 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4601
4602 last_frag = skb_shinfo(skb)->nr_frags;
4603
4604 for (i = 0; i < last_frag; i++) {
4605 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4606
4607 prod = NEXT_TX_BD(prod);
4608 ring_prod = TX_RING_IDX(prod);
4609 txbd = &bp->tx_desc_ring[ring_prod];
4610
4611 len = frag->size;
4612 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4613 len, PCI_DMA_TODEVICE);
4614 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4615 mapping, mapping);
4616
4617 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4618 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4619 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4620 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4621
4622 }
4623 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4624
4625 prod = NEXT_TX_BD(prod);
4626 bp->tx_prod_bseq += skb->len;
4627
Michael Chan234754d2006-11-19 14:11:41 -08004628 REG_WR16(bp, bp->tx_bidx_addr, prod);
4629 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004630
4631 mmiowb();
4632
4633 bp->tx_prod = prod;
4634 dev->trans_start = jiffies;
4635
Michael Chane89bbf12005-08-25 15:36:58 -07004636 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004637 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004638 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004639 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004640 }
4641
4642 return NETDEV_TX_OK;
4643}
4644
4645/* Called with rtnl_lock */
4646static int
4647bnx2_close(struct net_device *dev)
4648{
Michael Chan972ec0d2006-01-23 16:12:43 -08004649 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004650 u32 reset_code;
4651
Michael Chanafdc08b2005-08-25 15:34:29 -07004652 /* Calling flush_scheduled_work() may deadlock because
4653 * linkwatch_event() may be on the workqueue and it will try to get
4654 * the rtnl_lock which we are holding.
4655 */
4656 while (bp->in_reset_task)
4657 msleep(1);
4658
Michael Chanb6016b72005-05-26 13:03:09 -07004659 bnx2_netif_stop(bp);
4660 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004661 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004662 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004663 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004664 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4665 else
4666 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4667 bnx2_reset_chip(bp, reset_code);
4668 free_irq(bp->pdev->irq, dev);
4669 if (bp->flags & USING_MSI_FLAG) {
4670 pci_disable_msi(bp->pdev);
4671 bp->flags &= ~USING_MSI_FLAG;
4672 }
4673 bnx2_free_skbs(bp);
4674 bnx2_free_mem(bp);
4675 bp->link_up = 0;
4676 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004677 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004678 return 0;
4679}
4680
4681#define GET_NET_STATS64(ctr) \
4682 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4683 (unsigned long) (ctr##_lo)
4684
4685#define GET_NET_STATS32(ctr) \
4686 (ctr##_lo)
4687
4688#if (BITS_PER_LONG == 64)
4689#define GET_NET_STATS GET_NET_STATS64
4690#else
4691#define GET_NET_STATS GET_NET_STATS32
4692#endif
4693
4694static struct net_device_stats *
4695bnx2_get_stats(struct net_device *dev)
4696{
Michael Chan972ec0d2006-01-23 16:12:43 -08004697 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004698 struct statistics_block *stats_blk = bp->stats_blk;
4699 struct net_device_stats *net_stats = &bp->net_stats;
4700
4701 if (bp->stats_blk == NULL) {
4702 return net_stats;
4703 }
4704 net_stats->rx_packets =
4705 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4706 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4707 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4708
4709 net_stats->tx_packets =
4710 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4711 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4712 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4713
4714 net_stats->rx_bytes =
4715 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4716
4717 net_stats->tx_bytes =
4718 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4719
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004720 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004721 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4722
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004723 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004724 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4725
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004726 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004727 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4728 stats_blk->stat_EtherStatsOverrsizePkts);
4729
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004730 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004731 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4732
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004733 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004734 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4735
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004736 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004737 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4738
4739 net_stats->rx_errors = net_stats->rx_length_errors +
4740 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4741 net_stats->rx_crc_errors;
4742
4743 net_stats->tx_aborted_errors =
4744 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4745 stats_blk->stat_Dot3StatsLateCollisions);
4746
Michael Chan5b0c76a2005-11-04 08:45:49 -08004747 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4748 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004749 net_stats->tx_carrier_errors = 0;
4750 else {
4751 net_stats->tx_carrier_errors =
4752 (unsigned long)
4753 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4754 }
4755
4756 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004757 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004758 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4759 +
4760 net_stats->tx_aborted_errors +
4761 net_stats->tx_carrier_errors;
4762
Michael Chancea94db2006-06-12 22:16:13 -07004763 net_stats->rx_missed_errors =
4764 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4765 stats_blk->stat_FwRxDrop);
4766
Michael Chanb6016b72005-05-26 13:03:09 -07004767 return net_stats;
4768}
4769
4770/* All ethtool functions called with rtnl_lock */
4771
4772static int
4773bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4774{
Michael Chan972ec0d2006-01-23 16:12:43 -08004775 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004776
4777 cmd->supported = SUPPORTED_Autoneg;
4778 if (bp->phy_flags & PHY_SERDES_FLAG) {
4779 cmd->supported |= SUPPORTED_1000baseT_Full |
4780 SUPPORTED_FIBRE;
4781
4782 cmd->port = PORT_FIBRE;
4783 }
4784 else {
4785 cmd->supported |= SUPPORTED_10baseT_Half |
4786 SUPPORTED_10baseT_Full |
4787 SUPPORTED_100baseT_Half |
4788 SUPPORTED_100baseT_Full |
4789 SUPPORTED_1000baseT_Full |
4790 SUPPORTED_TP;
4791
4792 cmd->port = PORT_TP;
4793 }
4794
4795 cmd->advertising = bp->advertising;
4796
4797 if (bp->autoneg & AUTONEG_SPEED) {
4798 cmd->autoneg = AUTONEG_ENABLE;
4799 }
4800 else {
4801 cmd->autoneg = AUTONEG_DISABLE;
4802 }
4803
4804 if (netif_carrier_ok(dev)) {
4805 cmd->speed = bp->line_speed;
4806 cmd->duplex = bp->duplex;
4807 }
4808 else {
4809 cmd->speed = -1;
4810 cmd->duplex = -1;
4811 }
4812
4813 cmd->transceiver = XCVR_INTERNAL;
4814 cmd->phy_address = bp->phy_addr;
4815
4816 return 0;
4817}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004818
Michael Chanb6016b72005-05-26 13:03:09 -07004819static int
4820bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4821{
Michael Chan972ec0d2006-01-23 16:12:43 -08004822 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004823 u8 autoneg = bp->autoneg;
4824 u8 req_duplex = bp->req_duplex;
4825 u16 req_line_speed = bp->req_line_speed;
4826 u32 advertising = bp->advertising;
4827
4828 if (cmd->autoneg == AUTONEG_ENABLE) {
4829 autoneg |= AUTONEG_SPEED;
4830
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004831 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004832
4833 /* allow advertising 1 speed */
4834 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4835 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4836 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4837 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4838
4839 if (bp->phy_flags & PHY_SERDES_FLAG)
4840 return -EINVAL;
4841
4842 advertising = cmd->advertising;
4843
4844 }
4845 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4846 advertising = cmd->advertising;
4847 }
4848 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4849 return -EINVAL;
4850 }
4851 else {
4852 if (bp->phy_flags & PHY_SERDES_FLAG) {
4853 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4854 }
4855 else {
4856 advertising = ETHTOOL_ALL_COPPER_SPEED;
4857 }
4858 }
4859 advertising |= ADVERTISED_Autoneg;
4860 }
4861 else {
4862 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004863 if ((cmd->speed != SPEED_1000 &&
4864 cmd->speed != SPEED_2500) ||
4865 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004866 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004867
4868 if (cmd->speed == SPEED_2500 &&
4869 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4870 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004871 }
4872 else if (cmd->speed == SPEED_1000) {
4873 return -EINVAL;
4874 }
4875 autoneg &= ~AUTONEG_SPEED;
4876 req_line_speed = cmd->speed;
4877 req_duplex = cmd->duplex;
4878 advertising = 0;
4879 }
4880
4881 bp->autoneg = autoneg;
4882 bp->advertising = advertising;
4883 bp->req_line_speed = req_line_speed;
4884 bp->req_duplex = req_duplex;
4885
Michael Chanc770a652005-08-25 15:38:39 -07004886 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004887
4888 bnx2_setup_phy(bp);
4889
Michael Chanc770a652005-08-25 15:38:39 -07004890 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004891
4892 return 0;
4893}
4894
4895static void
4896bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4897{
Michael Chan972ec0d2006-01-23 16:12:43 -08004898 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004899
4900 strcpy(info->driver, DRV_MODULE_NAME);
4901 strcpy(info->version, DRV_MODULE_VERSION);
4902 strcpy(info->bus_info, pci_name(bp->pdev));
4903 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4904 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4905 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004906 info->fw_version[1] = info->fw_version[3] = '.';
4907 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004908}
4909
Michael Chan244ac4f2006-03-20 17:48:46 -08004910#define BNX2_REGDUMP_LEN (32 * 1024)
4911
4912static int
4913bnx2_get_regs_len(struct net_device *dev)
4914{
4915 return BNX2_REGDUMP_LEN;
4916}
4917
4918static void
4919bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4920{
4921 u32 *p = _p, i, offset;
4922 u8 *orig_p = _p;
4923 struct bnx2 *bp = netdev_priv(dev);
4924 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4925 0x0800, 0x0880, 0x0c00, 0x0c10,
4926 0x0c30, 0x0d08, 0x1000, 0x101c,
4927 0x1040, 0x1048, 0x1080, 0x10a4,
4928 0x1400, 0x1490, 0x1498, 0x14f0,
4929 0x1500, 0x155c, 0x1580, 0x15dc,
4930 0x1600, 0x1658, 0x1680, 0x16d8,
4931 0x1800, 0x1820, 0x1840, 0x1854,
4932 0x1880, 0x1894, 0x1900, 0x1984,
4933 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4934 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4935 0x2000, 0x2030, 0x23c0, 0x2400,
4936 0x2800, 0x2820, 0x2830, 0x2850,
4937 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4938 0x3c00, 0x3c94, 0x4000, 0x4010,
4939 0x4080, 0x4090, 0x43c0, 0x4458,
4940 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4941 0x4fc0, 0x5010, 0x53c0, 0x5444,
4942 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4943 0x5fc0, 0x6000, 0x6400, 0x6428,
4944 0x6800, 0x6848, 0x684c, 0x6860,
4945 0x6888, 0x6910, 0x8000 };
4946
4947 regs->version = 0;
4948
4949 memset(p, 0, BNX2_REGDUMP_LEN);
4950
4951 if (!netif_running(bp->dev))
4952 return;
4953
4954 i = 0;
4955 offset = reg_boundaries[0];
4956 p += offset;
4957 while (offset < BNX2_REGDUMP_LEN) {
4958 *p++ = REG_RD(bp, offset);
4959 offset += 4;
4960 if (offset == reg_boundaries[i + 1]) {
4961 offset = reg_boundaries[i + 2];
4962 p = (u32 *) (orig_p + offset);
4963 i += 2;
4964 }
4965 }
4966}
4967
Michael Chanb6016b72005-05-26 13:03:09 -07004968static void
4969bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4970{
Michael Chan972ec0d2006-01-23 16:12:43 -08004971 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004972
4973 if (bp->flags & NO_WOL_FLAG) {
4974 wol->supported = 0;
4975 wol->wolopts = 0;
4976 }
4977 else {
4978 wol->supported = WAKE_MAGIC;
4979 if (bp->wol)
4980 wol->wolopts = WAKE_MAGIC;
4981 else
4982 wol->wolopts = 0;
4983 }
4984 memset(&wol->sopass, 0, sizeof(wol->sopass));
4985}
4986
4987static int
4988bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4989{
Michael Chan972ec0d2006-01-23 16:12:43 -08004990 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004991
4992 if (wol->wolopts & ~WAKE_MAGIC)
4993 return -EINVAL;
4994
4995 if (wol->wolopts & WAKE_MAGIC) {
4996 if (bp->flags & NO_WOL_FLAG)
4997 return -EINVAL;
4998
4999 bp->wol = 1;
5000 }
5001 else {
5002 bp->wol = 0;
5003 }
5004 return 0;
5005}
5006
5007static int
5008bnx2_nway_reset(struct net_device *dev)
5009{
Michael Chan972ec0d2006-01-23 16:12:43 -08005010 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005011 u32 bmcr;
5012
5013 if (!(bp->autoneg & AUTONEG_SPEED)) {
5014 return -EINVAL;
5015 }
5016
Michael Chanc770a652005-08-25 15:38:39 -07005017 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005018
5019 /* Force a link down visible on the other side */
5020 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005021 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005022 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005023
5024 msleep(20);
5025
Michael Chanc770a652005-08-25 15:38:39 -07005026 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005027
5028 bp->current_interval = SERDES_AN_TIMEOUT;
5029 bp->serdes_an_pending = 1;
5030 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005031 }
5032
Michael Chanca58c3a2007-05-03 13:22:52 -07005033 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005034 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005035 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005036
Michael Chanc770a652005-08-25 15:38:39 -07005037 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005038
5039 return 0;
5040}
5041
5042static int
5043bnx2_get_eeprom_len(struct net_device *dev)
5044{
Michael Chan972ec0d2006-01-23 16:12:43 -08005045 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005046
Michael Chan1122db72006-01-23 16:11:42 -08005047 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005048 return 0;
5049
Michael Chan1122db72006-01-23 16:11:42 -08005050 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005051}
5052
5053static int
5054bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5055 u8 *eebuf)
5056{
Michael Chan972ec0d2006-01-23 16:12:43 -08005057 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005058 int rc;
5059
John W. Linville1064e942005-11-10 12:58:24 -08005060 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005061
5062 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5063
5064 return rc;
5065}
5066
5067static int
5068bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5069 u8 *eebuf)
5070{
Michael Chan972ec0d2006-01-23 16:12:43 -08005071 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005072 int rc;
5073
John W. Linville1064e942005-11-10 12:58:24 -08005074 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005075
5076 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5077
5078 return rc;
5079}
5080
5081static int
5082bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5083{
Michael Chan972ec0d2006-01-23 16:12:43 -08005084 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005085
5086 memset(coal, 0, sizeof(struct ethtool_coalesce));
5087
5088 coal->rx_coalesce_usecs = bp->rx_ticks;
5089 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5090 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5091 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5092
5093 coal->tx_coalesce_usecs = bp->tx_ticks;
5094 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5095 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5096 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5097
5098 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5099
5100 return 0;
5101}
5102
5103static int
5104bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5105{
Michael Chan972ec0d2006-01-23 16:12:43 -08005106 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005107
5108 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5109 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5110
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005111 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005112 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5113
5114 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5115 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5116
5117 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5118 if (bp->rx_quick_cons_trip_int > 0xff)
5119 bp->rx_quick_cons_trip_int = 0xff;
5120
5121 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5122 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5123
5124 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5125 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5126
5127 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5128 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5129
5130 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5131 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5132 0xff;
5133
5134 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5135 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5136 bp->stats_ticks &= 0xffff00;
5137
5138 if (netif_running(bp->dev)) {
5139 bnx2_netif_stop(bp);
5140 bnx2_init_nic(bp);
5141 bnx2_netif_start(bp);
5142 }
5143
5144 return 0;
5145}
5146
5147static void
5148bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5149{
Michael Chan972ec0d2006-01-23 16:12:43 -08005150 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005151
Michael Chan13daffa2006-03-20 17:49:20 -08005152 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005153 ering->rx_mini_max_pending = 0;
5154 ering->rx_jumbo_max_pending = 0;
5155
5156 ering->rx_pending = bp->rx_ring_size;
5157 ering->rx_mini_pending = 0;
5158 ering->rx_jumbo_pending = 0;
5159
5160 ering->tx_max_pending = MAX_TX_DESC_CNT;
5161 ering->tx_pending = bp->tx_ring_size;
5162}
5163
5164static int
5165bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5166{
Michael Chan972ec0d2006-01-23 16:12:43 -08005167 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005168
Michael Chan13daffa2006-03-20 17:49:20 -08005169 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005170 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5171 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5172
5173 return -EINVAL;
5174 }
Michael Chan13daffa2006-03-20 17:49:20 -08005175 if (netif_running(bp->dev)) {
5176 bnx2_netif_stop(bp);
5177 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5178 bnx2_free_skbs(bp);
5179 bnx2_free_mem(bp);
5180 }
5181
5182 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005183 bp->tx_ring_size = ering->tx_pending;
5184
5185 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005186 int rc;
5187
5188 rc = bnx2_alloc_mem(bp);
5189 if (rc)
5190 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005191 bnx2_init_nic(bp);
5192 bnx2_netif_start(bp);
5193 }
5194
5195 return 0;
5196}
5197
5198static void
5199bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5200{
Michael Chan972ec0d2006-01-23 16:12:43 -08005201 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005202
5203 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5204 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5205 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5206}
5207
5208static int
5209bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5210{
Michael Chan972ec0d2006-01-23 16:12:43 -08005211 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005212
5213 bp->req_flow_ctrl = 0;
5214 if (epause->rx_pause)
5215 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5216 if (epause->tx_pause)
5217 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5218
5219 if (epause->autoneg) {
5220 bp->autoneg |= AUTONEG_FLOW_CTRL;
5221 }
5222 else {
5223 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5224 }
5225
Michael Chanc770a652005-08-25 15:38:39 -07005226 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005227
5228 bnx2_setup_phy(bp);
5229
Michael Chanc770a652005-08-25 15:38:39 -07005230 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005231
5232 return 0;
5233}
5234
5235static u32
5236bnx2_get_rx_csum(struct net_device *dev)
5237{
Michael Chan972ec0d2006-01-23 16:12:43 -08005238 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005239
5240 return bp->rx_csum;
5241}
5242
5243static int
5244bnx2_set_rx_csum(struct net_device *dev, u32 data)
5245{
Michael Chan972ec0d2006-01-23 16:12:43 -08005246 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005247
5248 bp->rx_csum = data;
5249 return 0;
5250}
5251
Michael Chanb11d6212006-06-29 12:31:21 -07005252static int
5253bnx2_set_tso(struct net_device *dev, u32 data)
5254{
Michael Chan4666f872007-05-03 13:22:28 -07005255 struct bnx2 *bp = netdev_priv(dev);
5256
5257 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005258 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005259 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5260 dev->features |= NETIF_F_TSO6;
5261 } else
5262 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5263 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005264 return 0;
5265}
5266
Michael Chancea94db2006-06-12 22:16:13 -07005267#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005268
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005269static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005270 char string[ETH_GSTRING_LEN];
5271} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5272 { "rx_bytes" },
5273 { "rx_error_bytes" },
5274 { "tx_bytes" },
5275 { "tx_error_bytes" },
5276 { "rx_ucast_packets" },
5277 { "rx_mcast_packets" },
5278 { "rx_bcast_packets" },
5279 { "tx_ucast_packets" },
5280 { "tx_mcast_packets" },
5281 { "tx_bcast_packets" },
5282 { "tx_mac_errors" },
5283 { "tx_carrier_errors" },
5284 { "rx_crc_errors" },
5285 { "rx_align_errors" },
5286 { "tx_single_collisions" },
5287 { "tx_multi_collisions" },
5288 { "tx_deferred" },
5289 { "tx_excess_collisions" },
5290 { "tx_late_collisions" },
5291 { "tx_total_collisions" },
5292 { "rx_fragments" },
5293 { "rx_jabbers" },
5294 { "rx_undersize_packets" },
5295 { "rx_oversize_packets" },
5296 { "rx_64_byte_packets" },
5297 { "rx_65_to_127_byte_packets" },
5298 { "rx_128_to_255_byte_packets" },
5299 { "rx_256_to_511_byte_packets" },
5300 { "rx_512_to_1023_byte_packets" },
5301 { "rx_1024_to_1522_byte_packets" },
5302 { "rx_1523_to_9022_byte_packets" },
5303 { "tx_64_byte_packets" },
5304 { "tx_65_to_127_byte_packets" },
5305 { "tx_128_to_255_byte_packets" },
5306 { "tx_256_to_511_byte_packets" },
5307 { "tx_512_to_1023_byte_packets" },
5308 { "tx_1024_to_1522_byte_packets" },
5309 { "tx_1523_to_9022_byte_packets" },
5310 { "rx_xon_frames" },
5311 { "rx_xoff_frames" },
5312 { "tx_xon_frames" },
5313 { "tx_xoff_frames" },
5314 { "rx_mac_ctrl_frames" },
5315 { "rx_filtered_packets" },
5316 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005317 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005318};
5319
5320#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5321
Arjan van de Venf71e1302006-03-03 21:33:57 -05005322static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005323 STATS_OFFSET32(stat_IfHCInOctets_hi),
5324 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5325 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5326 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5327 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5328 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5329 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5330 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5331 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5332 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5333 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005334 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5335 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5336 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5337 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5338 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5339 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5340 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5341 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5342 STATS_OFFSET32(stat_EtherStatsCollisions),
5343 STATS_OFFSET32(stat_EtherStatsFragments),
5344 STATS_OFFSET32(stat_EtherStatsJabbers),
5345 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5346 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5347 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5348 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5349 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5350 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5351 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5352 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5353 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5354 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5355 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5356 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5357 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5358 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5359 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5360 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5361 STATS_OFFSET32(stat_XonPauseFramesReceived),
5362 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5363 STATS_OFFSET32(stat_OutXonSent),
5364 STATS_OFFSET32(stat_OutXoffSent),
5365 STATS_OFFSET32(stat_MacControlFramesReceived),
5366 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5367 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005368 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005369};
5370
5371/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5372 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005373 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005374static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005375 8,0,8,8,8,8,8,8,8,8,
5376 4,0,4,4,4,4,4,4,4,4,
5377 4,4,4,4,4,4,4,4,4,4,
5378 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005379 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005380};
5381
Michael Chan5b0c76a2005-11-04 08:45:49 -08005382static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5383 8,0,8,8,8,8,8,8,8,8,
5384 4,4,4,4,4,4,4,4,4,4,
5385 4,4,4,4,4,4,4,4,4,4,
5386 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005387 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005388};
5389
Michael Chanb6016b72005-05-26 13:03:09 -07005390#define BNX2_NUM_TESTS 6
5391
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005392static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005393 char string[ETH_GSTRING_LEN];
5394} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5395 { "register_test (offline)" },
5396 { "memory_test (offline)" },
5397 { "loopback_test (offline)" },
5398 { "nvram_test (online)" },
5399 { "interrupt_test (online)" },
5400 { "link_test (online)" },
5401};
5402
5403static int
5404bnx2_self_test_count(struct net_device *dev)
5405{
5406 return BNX2_NUM_TESTS;
5407}
5408
5409static void
5410bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5411{
Michael Chan972ec0d2006-01-23 16:12:43 -08005412 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005413
5414 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5415 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005416 int i;
5417
Michael Chanb6016b72005-05-26 13:03:09 -07005418 bnx2_netif_stop(bp);
5419 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5420 bnx2_free_skbs(bp);
5421
5422 if (bnx2_test_registers(bp) != 0) {
5423 buf[0] = 1;
5424 etest->flags |= ETH_TEST_FL_FAILED;
5425 }
5426 if (bnx2_test_memory(bp) != 0) {
5427 buf[1] = 1;
5428 etest->flags |= ETH_TEST_FL_FAILED;
5429 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005430 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005431 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005432
5433 if (!netif_running(bp->dev)) {
5434 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5435 }
5436 else {
5437 bnx2_init_nic(bp);
5438 bnx2_netif_start(bp);
5439 }
5440
5441 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005442 for (i = 0; i < 7; i++) {
5443 if (bp->link_up)
5444 break;
5445 msleep_interruptible(1000);
5446 }
Michael Chanb6016b72005-05-26 13:03:09 -07005447 }
5448
5449 if (bnx2_test_nvram(bp) != 0) {
5450 buf[3] = 1;
5451 etest->flags |= ETH_TEST_FL_FAILED;
5452 }
5453 if (bnx2_test_intr(bp) != 0) {
5454 buf[4] = 1;
5455 etest->flags |= ETH_TEST_FL_FAILED;
5456 }
5457
5458 if (bnx2_test_link(bp) != 0) {
5459 buf[5] = 1;
5460 etest->flags |= ETH_TEST_FL_FAILED;
5461
5462 }
5463}
5464
5465static void
5466bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5467{
5468 switch (stringset) {
5469 case ETH_SS_STATS:
5470 memcpy(buf, bnx2_stats_str_arr,
5471 sizeof(bnx2_stats_str_arr));
5472 break;
5473 case ETH_SS_TEST:
5474 memcpy(buf, bnx2_tests_str_arr,
5475 sizeof(bnx2_tests_str_arr));
5476 break;
5477 }
5478}
5479
5480static int
5481bnx2_get_stats_count(struct net_device *dev)
5482{
5483 return BNX2_NUM_STATS;
5484}
5485
5486static void
5487bnx2_get_ethtool_stats(struct net_device *dev,
5488 struct ethtool_stats *stats, u64 *buf)
5489{
Michael Chan972ec0d2006-01-23 16:12:43 -08005490 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005491 int i;
5492 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005493 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005494
5495 if (hw_stats == NULL) {
5496 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5497 return;
5498 }
5499
Michael Chan5b0c76a2005-11-04 08:45:49 -08005500 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5501 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5502 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5503 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005504 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005505 else
5506 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005507
5508 for (i = 0; i < BNX2_NUM_STATS; i++) {
5509 if (stats_len_arr[i] == 0) {
5510 /* skip this counter */
5511 buf[i] = 0;
5512 continue;
5513 }
5514 if (stats_len_arr[i] == 4) {
5515 /* 4-byte counter */
5516 buf[i] = (u64)
5517 *(hw_stats + bnx2_stats_offset_arr[i]);
5518 continue;
5519 }
5520 /* 8-byte counter */
5521 buf[i] = (((u64) *(hw_stats +
5522 bnx2_stats_offset_arr[i])) << 32) +
5523 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5524 }
5525}
5526
5527static int
5528bnx2_phys_id(struct net_device *dev, u32 data)
5529{
Michael Chan972ec0d2006-01-23 16:12:43 -08005530 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005531 int i;
5532 u32 save;
5533
5534 if (data == 0)
5535 data = 2;
5536
5537 save = REG_RD(bp, BNX2_MISC_CFG);
5538 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5539
5540 for (i = 0; i < (data * 2); i++) {
5541 if ((i % 2) == 0) {
5542 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5543 }
5544 else {
5545 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5546 BNX2_EMAC_LED_1000MB_OVERRIDE |
5547 BNX2_EMAC_LED_100MB_OVERRIDE |
5548 BNX2_EMAC_LED_10MB_OVERRIDE |
5549 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5550 BNX2_EMAC_LED_TRAFFIC);
5551 }
5552 msleep_interruptible(500);
5553 if (signal_pending(current))
5554 break;
5555 }
5556 REG_WR(bp, BNX2_EMAC_LED, 0);
5557 REG_WR(bp, BNX2_MISC_CFG, save);
5558 return 0;
5559}
5560
Michael Chan4666f872007-05-03 13:22:28 -07005561static int
5562bnx2_set_tx_csum(struct net_device *dev, u32 data)
5563{
5564 struct bnx2 *bp = netdev_priv(dev);
5565
5566 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5567 return (ethtool_op_set_tx_hw_csum(dev, data));
5568 else
5569 return (ethtool_op_set_tx_csum(dev, data));
5570}
5571
Jeff Garzik7282d492006-09-13 14:30:00 -04005572static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005573 .get_settings = bnx2_get_settings,
5574 .set_settings = bnx2_set_settings,
5575 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005576 .get_regs_len = bnx2_get_regs_len,
5577 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005578 .get_wol = bnx2_get_wol,
5579 .set_wol = bnx2_set_wol,
5580 .nway_reset = bnx2_nway_reset,
5581 .get_link = ethtool_op_get_link,
5582 .get_eeprom_len = bnx2_get_eeprom_len,
5583 .get_eeprom = bnx2_get_eeprom,
5584 .set_eeprom = bnx2_set_eeprom,
5585 .get_coalesce = bnx2_get_coalesce,
5586 .set_coalesce = bnx2_set_coalesce,
5587 .get_ringparam = bnx2_get_ringparam,
5588 .set_ringparam = bnx2_set_ringparam,
5589 .get_pauseparam = bnx2_get_pauseparam,
5590 .set_pauseparam = bnx2_set_pauseparam,
5591 .get_rx_csum = bnx2_get_rx_csum,
5592 .set_rx_csum = bnx2_set_rx_csum,
5593 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005594 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005595 .get_sg = ethtool_op_get_sg,
5596 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005597 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005598 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005599 .self_test_count = bnx2_self_test_count,
5600 .self_test = bnx2_self_test,
5601 .get_strings = bnx2_get_strings,
5602 .phys_id = bnx2_phys_id,
5603 .get_stats_count = bnx2_get_stats_count,
5604 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005605 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005606};
5607
5608/* Called with rtnl_lock */
5609static int
5610bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5611{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005612 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005613 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005614 int err;
5615
5616 switch(cmd) {
5617 case SIOCGMIIPHY:
5618 data->phy_id = bp->phy_addr;
5619
5620 /* fallthru */
5621 case SIOCGMIIREG: {
5622 u32 mii_regval;
5623
Michael Chandad3e452007-05-03 13:18:03 -07005624 if (!netif_running(dev))
5625 return -EAGAIN;
5626
Michael Chanc770a652005-08-25 15:38:39 -07005627 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005628 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005629 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005630
5631 data->val_out = mii_regval;
5632
5633 return err;
5634 }
5635
5636 case SIOCSMIIREG:
5637 if (!capable(CAP_NET_ADMIN))
5638 return -EPERM;
5639
Michael Chandad3e452007-05-03 13:18:03 -07005640 if (!netif_running(dev))
5641 return -EAGAIN;
5642
Michael Chanc770a652005-08-25 15:38:39 -07005643 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005644 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005645 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005646
5647 return err;
5648
5649 default:
5650 /* do nothing */
5651 break;
5652 }
5653 return -EOPNOTSUPP;
5654}
5655
5656/* Called with rtnl_lock */
5657static int
5658bnx2_change_mac_addr(struct net_device *dev, void *p)
5659{
5660 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005661 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005662
Michael Chan73eef4c2005-08-25 15:39:15 -07005663 if (!is_valid_ether_addr(addr->sa_data))
5664 return -EINVAL;
5665
Michael Chanb6016b72005-05-26 13:03:09 -07005666 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5667 if (netif_running(dev))
5668 bnx2_set_mac_addr(bp);
5669
5670 return 0;
5671}
5672
5673/* Called with rtnl_lock */
5674static int
5675bnx2_change_mtu(struct net_device *dev, int new_mtu)
5676{
Michael Chan972ec0d2006-01-23 16:12:43 -08005677 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005678
5679 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5680 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5681 return -EINVAL;
5682
5683 dev->mtu = new_mtu;
5684 if (netif_running(dev)) {
5685 bnx2_netif_stop(bp);
5686
5687 bnx2_init_nic(bp);
5688
5689 bnx2_netif_start(bp);
5690 }
5691 return 0;
5692}
5693
5694#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5695static void
5696poll_bnx2(struct net_device *dev)
5697{
Michael Chan972ec0d2006-01-23 16:12:43 -08005698 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005699
5700 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005701 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005702 enable_irq(bp->pdev->irq);
5703}
5704#endif
5705
Michael Chan253c8b72007-01-08 19:56:01 -08005706static void __devinit
5707bnx2_get_5709_media(struct bnx2 *bp)
5708{
5709 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5710 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5711 u32 strap;
5712
5713 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5714 return;
5715 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5716 bp->phy_flags |= PHY_SERDES_FLAG;
5717 return;
5718 }
5719
5720 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5721 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5722 else
5723 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5724
5725 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5726 switch (strap) {
5727 case 0x4:
5728 case 0x5:
5729 case 0x6:
5730 bp->phy_flags |= PHY_SERDES_FLAG;
5731 return;
5732 }
5733 } else {
5734 switch (strap) {
5735 case 0x1:
5736 case 0x2:
5737 case 0x4:
5738 bp->phy_flags |= PHY_SERDES_FLAG;
5739 return;
5740 }
5741 }
5742}
5743
Michael Chanb6016b72005-05-26 13:03:09 -07005744static int __devinit
5745bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5746{
5747 struct bnx2 *bp;
5748 unsigned long mem_len;
5749 int rc;
5750 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07005751 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07005752
5753 SET_MODULE_OWNER(dev);
5754 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005755 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005756
5757 bp->flags = 0;
5758 bp->phy_flags = 0;
5759
5760 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5761 rc = pci_enable_device(pdev);
5762 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005763 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005764 goto err_out;
5765 }
5766
5767 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005768 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005769 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005770 rc = -ENODEV;
5771 goto err_out_disable;
5772 }
5773
5774 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5775 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005776 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005777 goto err_out_disable;
5778 }
5779
5780 pci_set_master(pdev);
5781
5782 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5783 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005784 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005785 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005786 rc = -EIO;
5787 goto err_out_release;
5788 }
5789
Michael Chanb6016b72005-05-26 13:03:09 -07005790 bp->dev = dev;
5791 bp->pdev = pdev;
5792
5793 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00005794 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005795
5796 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005797 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005798 dev->mem_end = dev->mem_start + mem_len;
5799 dev->irq = pdev->irq;
5800
5801 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5802
5803 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005804 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005805 rc = -ENOMEM;
5806 goto err_out_release;
5807 }
5808
5809 /* Configure byte swap and enable write to the reg_window registers.
5810 * Rely on CPU to do target byte swapping on big endian systems
5811 * The chip's target access swapping will not swap all accesses
5812 */
5813 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5814 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5815 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5816
Pavel Machek829ca9a2005-09-03 15:56:56 -07005817 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005818
5819 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5820
Michael Chan59b47d82006-11-19 14:10:45 -08005821 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5822 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5823 if (bp->pcix_cap == 0) {
5824 dev_err(&pdev->dev,
5825 "Cannot find PCIX capability, aborting.\n");
5826 rc = -EIO;
5827 goto err_out_unmap;
5828 }
5829 }
5830
Michael Chan40453c82007-05-03 13:19:18 -07005831 /* 5708 cannot support DMA addresses > 40-bit. */
5832 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5833 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5834 else
5835 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5836
5837 /* Configure DMA attributes. */
5838 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5839 dev->features |= NETIF_F_HIGHDMA;
5840 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5841 if (rc) {
5842 dev_err(&pdev->dev,
5843 "pci_set_consistent_dma_mask failed, aborting.\n");
5844 goto err_out_unmap;
5845 }
5846 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5847 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5848 goto err_out_unmap;
5849 }
5850
Michael Chanb6016b72005-05-26 13:03:09 -07005851 /* Get bus information. */
5852 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5853 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5854 u32 clkreg;
5855
5856 bp->flags |= PCIX_FLAG;
5857
5858 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005859
Michael Chanb6016b72005-05-26 13:03:09 -07005860 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5861 switch (clkreg) {
5862 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5863 bp->bus_speed_mhz = 133;
5864 break;
5865
5866 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5867 bp->bus_speed_mhz = 100;
5868 break;
5869
5870 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5871 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5872 bp->bus_speed_mhz = 66;
5873 break;
5874
5875 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5876 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5877 bp->bus_speed_mhz = 50;
5878 break;
5879
5880 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5881 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5882 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5883 bp->bus_speed_mhz = 33;
5884 break;
5885 }
5886 }
5887 else {
5888 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5889 bp->bus_speed_mhz = 66;
5890 else
5891 bp->bus_speed_mhz = 33;
5892 }
5893
5894 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5895 bp->flags |= PCI_32BIT_FLAG;
5896
5897 /* 5706A0 may falsely detect SERR and PERR. */
5898 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5899 reg = REG_RD(bp, PCI_COMMAND);
5900 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5901 REG_WR(bp, PCI_COMMAND, reg);
5902 }
5903 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5904 !(bp->flags & PCIX_FLAG)) {
5905
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005906 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005907 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005908 goto err_out_unmap;
5909 }
5910
5911 bnx2_init_nvram(bp);
5912
Michael Chane3648b32005-11-04 08:51:21 -08005913 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5914
5915 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08005916 BNX2_SHM_HDR_SIGNATURE_SIG) {
5917 u32 off = PCI_FUNC(pdev->devfn) << 2;
5918
5919 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5920 } else
Michael Chane3648b32005-11-04 08:51:21 -08005921 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5922
Michael Chanb6016b72005-05-26 13:03:09 -07005923 /* Get the permanent MAC address. First we need to make sure the
5924 * firmware is actually running.
5925 */
Michael Chane3648b32005-11-04 08:51:21 -08005926 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005927
5928 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5929 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005930 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005931 rc = -ENODEV;
5932 goto err_out_unmap;
5933 }
5934
Michael Chane3648b32005-11-04 08:51:21 -08005935 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005936
Michael Chane3648b32005-11-04 08:51:21 -08005937 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005938 bp->mac_addr[0] = (u8) (reg >> 8);
5939 bp->mac_addr[1] = (u8) reg;
5940
Michael Chane3648b32005-11-04 08:51:21 -08005941 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005942 bp->mac_addr[2] = (u8) (reg >> 24);
5943 bp->mac_addr[3] = (u8) (reg >> 16);
5944 bp->mac_addr[4] = (u8) (reg >> 8);
5945 bp->mac_addr[5] = (u8) reg;
5946
5947 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005948 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005949
5950 bp->rx_csum = 1;
5951
5952 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5953
5954 bp->tx_quick_cons_trip_int = 20;
5955 bp->tx_quick_cons_trip = 20;
5956 bp->tx_ticks_int = 80;
5957 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005958
Michael Chanb6016b72005-05-26 13:03:09 -07005959 bp->rx_quick_cons_trip_int = 6;
5960 bp->rx_quick_cons_trip = 6;
5961 bp->rx_ticks_int = 18;
5962 bp->rx_ticks = 18;
5963
5964 bp->stats_ticks = 1000000 & 0xffff00;
5965
5966 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005967 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005968
Michael Chan5b0c76a2005-11-04 08:45:49 -08005969 bp->phy_addr = 1;
5970
Michael Chanb6016b72005-05-26 13:03:09 -07005971 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08005972 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5973 bnx2_get_5709_media(bp);
5974 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07005975 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005976
5977 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07005978 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005979 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08005980 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005981 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005982 BNX2_SHARED_HW_CFG_CONFIG);
5983 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5984 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5985 }
Michael Chan261dd5c2007-01-08 19:55:46 -08005986 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5987 CHIP_NUM(bp) == CHIP_NUM_5708)
5988 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08005989 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5990 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005991
Michael Chan16088272006-06-12 22:16:43 -07005992 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5993 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5994 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005995 bp->flags |= NO_WOL_FLAG;
5996
Michael Chanb6016b72005-05-26 13:03:09 -07005997 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5998 bp->tx_quick_cons_trip_int =
5999 bp->tx_quick_cons_trip;
6000 bp->tx_ticks_int = bp->tx_ticks;
6001 bp->rx_quick_cons_trip_int =
6002 bp->rx_quick_cons_trip;
6003 bp->rx_ticks_int = bp->rx_ticks;
6004 bp->comp_prod_trip_int = bp->comp_prod_trip;
6005 bp->com_ticks_int = bp->com_ticks;
6006 bp->cmd_ticks_int = bp->cmd_ticks;
6007 }
6008
Michael Chanf9317a42006-09-29 17:06:23 -07006009 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6010 *
6011 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6012 * with byte enables disabled on the unused 32-bit word. This is legal
6013 * but causes problems on the AMD 8132 which will eventually stop
6014 * responding after a while.
6015 *
6016 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006017 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006018 */
6019 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6020 struct pci_dev *amd_8132 = NULL;
6021
6022 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6023 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6024 amd_8132))) {
6025 u8 rev;
6026
6027 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6028 if (rev >= 0x10 && rev <= 0x13) {
6029 disable_msi = 1;
6030 pci_dev_put(amd_8132);
6031 break;
6032 }
6033 }
6034 }
6035
Michael Chanb6016b72005-05-26 13:03:09 -07006036 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6037 bp->req_line_speed = 0;
6038 if (bp->phy_flags & PHY_SERDES_FLAG) {
6039 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006040
Michael Chane3648b32005-11-04 08:51:21 -08006041 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006042 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6043 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6044 bp->autoneg = 0;
6045 bp->req_line_speed = bp->line_speed = SPEED_1000;
6046 bp->req_duplex = DUPLEX_FULL;
6047 }
Michael Chanb6016b72005-05-26 13:03:09 -07006048 }
6049 else {
6050 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6051 }
6052
6053 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6054
Michael Chancd339a02005-08-25 15:35:24 -07006055 init_timer(&bp->timer);
6056 bp->timer.expires = RUN_AT(bp->timer_interval);
6057 bp->timer.data = (unsigned long) bp;
6058 bp->timer.function = bnx2_timer;
6059
Michael Chanb6016b72005-05-26 13:03:09 -07006060 return 0;
6061
6062err_out_unmap:
6063 if (bp->regview) {
6064 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006065 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006066 }
6067
6068err_out_release:
6069 pci_release_regions(pdev);
6070
6071err_out_disable:
6072 pci_disable_device(pdev);
6073 pci_set_drvdata(pdev, NULL);
6074
6075err_out:
6076 return rc;
6077}
6078
6079static int __devinit
6080bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6081{
6082 static int version_printed = 0;
6083 struct net_device *dev = NULL;
6084 struct bnx2 *bp;
6085 int rc, i;
6086
6087 if (version_printed++ == 0)
6088 printk(KERN_INFO "%s", version);
6089
6090 /* dev zeroed in init_etherdev */
6091 dev = alloc_etherdev(sizeof(*bp));
6092
6093 if (!dev)
6094 return -ENOMEM;
6095
6096 rc = bnx2_init_board(pdev, dev);
6097 if (rc < 0) {
6098 free_netdev(dev);
6099 return rc;
6100 }
6101
6102 dev->open = bnx2_open;
6103 dev->hard_start_xmit = bnx2_start_xmit;
6104 dev->stop = bnx2_close;
6105 dev->get_stats = bnx2_get_stats;
6106 dev->set_multicast_list = bnx2_set_rx_mode;
6107 dev->do_ioctl = bnx2_ioctl;
6108 dev->set_mac_address = bnx2_change_mac_addr;
6109 dev->change_mtu = bnx2_change_mtu;
6110 dev->tx_timeout = bnx2_tx_timeout;
6111 dev->watchdog_timeo = TX_TIMEOUT;
6112#ifdef BCM_VLAN
6113 dev->vlan_rx_register = bnx2_vlan_rx_register;
6114 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6115#endif
6116 dev->poll = bnx2_poll;
6117 dev->ethtool_ops = &bnx2_ethtool_ops;
6118 dev->weight = 64;
6119
Michael Chan972ec0d2006-01-23 16:12:43 -08006120 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006121
6122#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6123 dev->poll_controller = poll_bnx2;
6124#endif
6125
Michael Chan1b2f9222007-05-03 13:20:19 -07006126 pci_set_drvdata(pdev, dev);
6127
6128 memcpy(dev->dev_addr, bp->mac_addr, 6);
6129 memcpy(dev->perm_addr, bp->mac_addr, 6);
6130 bp->name = board_info[ent->driver_data].name;
6131
Michael Chan4666f872007-05-03 13:22:28 -07006132 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6133 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6134 else
6135 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan1b2f9222007-05-03 13:20:19 -07006136#ifdef BCM_VLAN
6137 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6138#endif
6139 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006140 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6141 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006142
Michael Chanb6016b72005-05-26 13:03:09 -07006143 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006144 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006145 if (bp->regview)
6146 iounmap(bp->regview);
6147 pci_release_regions(pdev);
6148 pci_disable_device(pdev);
6149 pci_set_drvdata(pdev, NULL);
6150 free_netdev(dev);
6151 return rc;
6152 }
6153
Michael Chanb6016b72005-05-26 13:03:09 -07006154 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6155 "IRQ %d, ",
6156 dev->name,
6157 bp->name,
6158 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6159 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6160 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6161 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6162 bp->bus_speed_mhz,
6163 dev->base_addr,
6164 bp->pdev->irq);
6165
6166 printk("node addr ");
6167 for (i = 0; i < 6; i++)
6168 printk("%2.2x", dev->dev_addr[i]);
6169 printk("\n");
6170
Michael Chanb6016b72005-05-26 13:03:09 -07006171 return 0;
6172}
6173
6174static void __devexit
6175bnx2_remove_one(struct pci_dev *pdev)
6176{
6177 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006178 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006179
Michael Chanafdc08b2005-08-25 15:34:29 -07006180 flush_scheduled_work();
6181
Michael Chanb6016b72005-05-26 13:03:09 -07006182 unregister_netdev(dev);
6183
6184 if (bp->regview)
6185 iounmap(bp->regview);
6186
6187 free_netdev(dev);
6188 pci_release_regions(pdev);
6189 pci_disable_device(pdev);
6190 pci_set_drvdata(pdev, NULL);
6191}
6192
6193static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006194bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006195{
6196 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006197 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006198 u32 reset_code;
6199
6200 if (!netif_running(dev))
6201 return 0;
6202
Michael Chan1d60290f2006-03-20 17:50:08 -08006203 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006204 bnx2_netif_stop(bp);
6205 netif_device_detach(dev);
6206 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006207 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006208 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006209 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006210 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6211 else
6212 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6213 bnx2_reset_chip(bp, reset_code);
6214 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006215 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006216 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006217 return 0;
6218}
6219
6220static int
6221bnx2_resume(struct pci_dev *pdev)
6222{
6223 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006224 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006225
6226 if (!netif_running(dev))
6227 return 0;
6228
Michael Chan30c517b2007-05-03 13:20:40 -07006229 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006230 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006231 netif_device_attach(dev);
6232 bnx2_init_nic(bp);
6233 bnx2_netif_start(bp);
6234 return 0;
6235}
6236
6237static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006238 .name = DRV_MODULE_NAME,
6239 .id_table = bnx2_pci_tbl,
6240 .probe = bnx2_init_one,
6241 .remove = __devexit_p(bnx2_remove_one),
6242 .suspend = bnx2_suspend,
6243 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006244};
6245
6246static int __init bnx2_init(void)
6247{
Jeff Garzik29917622006-08-19 17:48:59 -04006248 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006249}
6250
6251static void __exit bnx2_cleanup(void)
6252{
6253 pci_unregister_driver(&bnx2_pci_driver);
6254}
6255
6256module_init(bnx2_init);
6257module_exit(bnx2_cleanup);
6258
6259
6260