blob: 5bacb7587df41b77cdc414b278e85750279f4477 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080056#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070057
58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": "
Michael Chanf123bc52006-11-19 14:15:31 -080060#define DRV_MODULE_VERSION "1.5.1"
61#define DRV_MODULE_RELDATE "November 15, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070062
63#define RUN_AT(x) (jiffies + (x))
64
65/* Time in jiffies before concluding the transmitter is hung. */
66#define TX_TIMEOUT (5*HZ)
67
Randy Dunlape19360f2006-04-10 23:22:06 -070068static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070069 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080072MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070073MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
75
76static int disable_msi = 0;
77
78module_param(disable_msi, int, 0);
79MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080087 BCM5708,
88 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080089 BCM5709,
Michael Chanb6016b72005-05-26 13:03:09 -070090} board_t;
91
92/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050093static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070094 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chanb6016b72005-05-26 13:03:09 -0700104 };
105
106static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chanb6016b72005-05-26 13:03:09 -0700123 { 0, }
124};
125
126static struct flash_spec flash_table[] =
127{
128 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 "Entry 0100"},
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
171 /* Fast EEPROM */
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175 "EEPROM - fast"},
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1001"},
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1010"},
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1100"},
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1101"},
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700211};
212
213MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
214
Michael Chane89bbf12005-08-25 15:36:58 -0700215static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216{
Michael Chan2f8af122006-08-15 01:39:10 -0700217 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700218
Michael Chan2f8af122006-08-15 01:39:10 -0700219 smp_mb();
220 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
Michael Chane89bbf12005-08-25 15:36:58 -0700221 if (diff > MAX_TX_DESC_CNT)
222 diff = (diff & MAX_TX_DESC_CNT) - 1;
223 return (bp->tx_ring_size - diff);
224}
225
Michael Chanb6016b72005-05-26 13:03:09 -0700226static u32
227bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
228{
229 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
230 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
231}
232
233static void
234bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
235{
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
238}
239
240static void
241bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
242{
243 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800244 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
245 int i;
246
247 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
248 REG_WR(bp, BNX2_CTX_CTX_CTRL,
249 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
250 for (i = 0; i < 5; i++) {
251 u32 val;
252 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
253 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
254 break;
255 udelay(5);
256 }
257 } else {
258 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
259 REG_WR(bp, BNX2_CTX_DATA, val);
260 }
Michael Chanb6016b72005-05-26 13:03:09 -0700261}
262
263static int
264bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
265{
266 u32 val1;
267 int i, ret;
268
269 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
270 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
271 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
272
273 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
274 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275
276 udelay(40);
277 }
278
279 val1 = (bp->phy_addr << 21) | (reg << 16) |
280 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
281 BNX2_EMAC_MDIO_COMM_START_BUSY;
282 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
283
284 for (i = 0; i < 50; i++) {
285 udelay(10);
286
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
288 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
289 udelay(5);
290
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
293
294 break;
295 }
296 }
297
298 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
299 *val = 0x0;
300 ret = -EBUSY;
301 }
302 else {
303 *val = val1;
304 ret = 0;
305 }
306
307 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
310
311 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
312 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313
314 udelay(40);
315 }
316
317 return ret;
318}
319
320static int
321bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
322{
323 u32 val1;
324 int i, ret;
325
326 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
327 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
328 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
329
330 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
331 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332
333 udelay(40);
334 }
335
336 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
337 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
338 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
339 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400340
Michael Chanb6016b72005-05-26 13:03:09 -0700341 for (i = 0; i < 50; i++) {
342 udelay(10);
343
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
345 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
346 udelay(5);
347 break;
348 }
349 }
350
351 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
352 ret = -EBUSY;
353 else
354 ret = 0;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 return ret;
367}
368
369static void
370bnx2_disable_int(struct bnx2 *bp)
371{
372 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
373 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
374 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
375}
376
377static void
378bnx2_enable_int(struct bnx2 *bp)
379{
Michael Chanb6016b72005-05-26 13:03:09 -0700380 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800381 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
382 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
383
384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
386
Michael Chanbf5295b2006-03-23 01:11:56 -0800387 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700388}
389
390static void
391bnx2_disable_int_sync(struct bnx2 *bp)
392{
393 atomic_inc(&bp->intr_sem);
394 bnx2_disable_int(bp);
395 synchronize_irq(bp->pdev->irq);
396}
397
398static void
399bnx2_netif_stop(struct bnx2 *bp)
400{
401 bnx2_disable_int_sync(bp);
402 if (netif_running(bp->dev)) {
403 netif_poll_disable(bp->dev);
404 netif_tx_disable(bp->dev);
405 bp->dev->trans_start = jiffies; /* prevent tx timeout */
406 }
407}
408
409static void
410bnx2_netif_start(struct bnx2 *bp)
411{
412 if (atomic_dec_and_test(&bp->intr_sem)) {
413 if (netif_running(bp->dev)) {
414 netif_wake_queue(bp->dev);
415 netif_poll_enable(bp->dev);
416 bnx2_enable_int(bp);
417 }
418 }
419}
420
421static void
422bnx2_free_mem(struct bnx2 *bp)
423{
Michael Chan13daffa2006-03-20 17:49:20 -0800424 int i;
425
Michael Chan59b47d82006-11-19 14:10:45 -0800426 for (i = 0; i < bp->ctx_pages; i++) {
427 if (bp->ctx_blk[i]) {
428 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
429 bp->ctx_blk[i],
430 bp->ctx_blk_mapping[i]);
431 bp->ctx_blk[i] = NULL;
432 }
433 }
Michael Chanb6016b72005-05-26 13:03:09 -0700434 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800435 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700436 bp->status_blk, bp->status_blk_mapping);
437 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800438 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700439 }
440 if (bp->tx_desc_ring) {
441 pci_free_consistent(bp->pdev,
442 sizeof(struct tx_bd) * TX_DESC_CNT,
443 bp->tx_desc_ring, bp->tx_desc_mapping);
444 bp->tx_desc_ring = NULL;
445 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400446 kfree(bp->tx_buf_ring);
447 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800448 for (i = 0; i < bp->rx_max_ring; i++) {
449 if (bp->rx_desc_ring[i])
450 pci_free_consistent(bp->pdev,
451 sizeof(struct rx_bd) * RX_DESC_CNT,
452 bp->rx_desc_ring[i],
453 bp->rx_desc_mapping[i]);
454 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700455 }
Michael Chan13daffa2006-03-20 17:49:20 -0800456 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400457 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700458}
459
460static int
461bnx2_alloc_mem(struct bnx2 *bp)
462{
Michael Chan0f31f992006-03-23 01:12:38 -0800463 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800464
Michael Chan0f31f992006-03-23 01:12:38 -0800465 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
466 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700467 if (bp->tx_buf_ring == NULL)
468 return -ENOMEM;
469
Michael Chanb6016b72005-05-26 13:03:09 -0700470 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
471 sizeof(struct tx_bd) *
472 TX_DESC_CNT,
473 &bp->tx_desc_mapping);
474 if (bp->tx_desc_ring == NULL)
475 goto alloc_mem_err;
476
Michael Chan13daffa2006-03-20 17:49:20 -0800477 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
478 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700479 if (bp->rx_buf_ring == NULL)
480 goto alloc_mem_err;
481
Michael Chan13daffa2006-03-20 17:49:20 -0800482 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
483 bp->rx_max_ring);
484
485 for (i = 0; i < bp->rx_max_ring; i++) {
486 bp->rx_desc_ring[i] =
487 pci_alloc_consistent(bp->pdev,
488 sizeof(struct rx_bd) * RX_DESC_CNT,
489 &bp->rx_desc_mapping[i]);
490 if (bp->rx_desc_ring[i] == NULL)
491 goto alloc_mem_err;
492
493 }
Michael Chanb6016b72005-05-26 13:03:09 -0700494
Michael Chan0f31f992006-03-23 01:12:38 -0800495 /* Combine status and statistics blocks into one allocation. */
496 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
497 bp->status_stats_size = status_blk_size +
498 sizeof(struct statistics_block);
499
500 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700501 &bp->status_blk_mapping);
502 if (bp->status_blk == NULL)
503 goto alloc_mem_err;
504
Michael Chan0f31f992006-03-23 01:12:38 -0800505 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700506
Michael Chan0f31f992006-03-23 01:12:38 -0800507 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
508 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700509
Michael Chan0f31f992006-03-23 01:12:38 -0800510 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan59b47d82006-11-19 14:10:45 -0800512 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
513 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
514 if (bp->ctx_pages == 0)
515 bp->ctx_pages = 1;
516 for (i = 0; i < bp->ctx_pages; i++) {
517 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
518 BCM_PAGE_SIZE,
519 &bp->ctx_blk_mapping[i]);
520 if (bp->ctx_blk[i] == NULL)
521 goto alloc_mem_err;
522 }
523 }
Michael Chanb6016b72005-05-26 13:03:09 -0700524 return 0;
525
526alloc_mem_err:
527 bnx2_free_mem(bp);
528 return -ENOMEM;
529}
530
531static void
Michael Chane3648b32005-11-04 08:51:21 -0800532bnx2_report_fw_link(struct bnx2 *bp)
533{
534 u32 fw_link_status = 0;
535
536 if (bp->link_up) {
537 u32 bmsr;
538
539 switch (bp->line_speed) {
540 case SPEED_10:
541 if (bp->duplex == DUPLEX_HALF)
542 fw_link_status = BNX2_LINK_STATUS_10HALF;
543 else
544 fw_link_status = BNX2_LINK_STATUS_10FULL;
545 break;
546 case SPEED_100:
547 if (bp->duplex == DUPLEX_HALF)
548 fw_link_status = BNX2_LINK_STATUS_100HALF;
549 else
550 fw_link_status = BNX2_LINK_STATUS_100FULL;
551 break;
552 case SPEED_1000:
553 if (bp->duplex == DUPLEX_HALF)
554 fw_link_status = BNX2_LINK_STATUS_1000HALF;
555 else
556 fw_link_status = BNX2_LINK_STATUS_1000FULL;
557 break;
558 case SPEED_2500:
559 if (bp->duplex == DUPLEX_HALF)
560 fw_link_status = BNX2_LINK_STATUS_2500HALF;
561 else
562 fw_link_status = BNX2_LINK_STATUS_2500FULL;
563 break;
564 }
565
566 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
567
568 if (bp->autoneg) {
569 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
570
571 bnx2_read_phy(bp, MII_BMSR, &bmsr);
572 bnx2_read_phy(bp, MII_BMSR, &bmsr);
573
574 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
575 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
576 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
577 else
578 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
579 }
580 }
581 else
582 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
583
584 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
585}
586
587static void
Michael Chanb6016b72005-05-26 13:03:09 -0700588bnx2_report_link(struct bnx2 *bp)
589{
590 if (bp->link_up) {
591 netif_carrier_on(bp->dev);
592 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
593
594 printk("%d Mbps ", bp->line_speed);
595
596 if (bp->duplex == DUPLEX_FULL)
597 printk("full duplex");
598 else
599 printk("half duplex");
600
601 if (bp->flow_ctrl) {
602 if (bp->flow_ctrl & FLOW_CTRL_RX) {
603 printk(", receive ");
604 if (bp->flow_ctrl & FLOW_CTRL_TX)
605 printk("& transmit ");
606 }
607 else {
608 printk(", transmit ");
609 }
610 printk("flow control ON");
611 }
612 printk("\n");
613 }
614 else {
615 netif_carrier_off(bp->dev);
616 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
617 }
Michael Chane3648b32005-11-04 08:51:21 -0800618
619 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700620}
621
622static void
623bnx2_resolve_flow_ctrl(struct bnx2 *bp)
624{
625 u32 local_adv, remote_adv;
626
627 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400628 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700629 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
630
631 if (bp->duplex == DUPLEX_FULL) {
632 bp->flow_ctrl = bp->req_flow_ctrl;
633 }
634 return;
635 }
636
637 if (bp->duplex != DUPLEX_FULL) {
638 return;
639 }
640
Michael Chan5b0c76a2005-11-04 08:45:49 -0800641 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
642 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
643 u32 val;
644
645 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
646 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
647 bp->flow_ctrl |= FLOW_CTRL_TX;
648 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
649 bp->flow_ctrl |= FLOW_CTRL_RX;
650 return;
651 }
652
Michael Chanb6016b72005-05-26 13:03:09 -0700653 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
654 bnx2_read_phy(bp, MII_LPA, &remote_adv);
655
656 if (bp->phy_flags & PHY_SERDES_FLAG) {
657 u32 new_local_adv = 0;
658 u32 new_remote_adv = 0;
659
660 if (local_adv & ADVERTISE_1000XPAUSE)
661 new_local_adv |= ADVERTISE_PAUSE_CAP;
662 if (local_adv & ADVERTISE_1000XPSE_ASYM)
663 new_local_adv |= ADVERTISE_PAUSE_ASYM;
664 if (remote_adv & ADVERTISE_1000XPAUSE)
665 new_remote_adv |= ADVERTISE_PAUSE_CAP;
666 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
667 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
668
669 local_adv = new_local_adv;
670 remote_adv = new_remote_adv;
671 }
672
673 /* See Table 28B-3 of 802.3ab-1999 spec. */
674 if (local_adv & ADVERTISE_PAUSE_CAP) {
675 if(local_adv & ADVERTISE_PAUSE_ASYM) {
676 if (remote_adv & ADVERTISE_PAUSE_CAP) {
677 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
678 }
679 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
680 bp->flow_ctrl = FLOW_CTRL_RX;
681 }
682 }
683 else {
684 if (remote_adv & ADVERTISE_PAUSE_CAP) {
685 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
686 }
687 }
688 }
689 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
690 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
691 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
692
693 bp->flow_ctrl = FLOW_CTRL_TX;
694 }
695 }
696}
697
698static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800699bnx2_5708s_linkup(struct bnx2 *bp)
700{
701 u32 val;
702
703 bp->link_up = 1;
704 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
705 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
706 case BCM5708S_1000X_STAT1_SPEED_10:
707 bp->line_speed = SPEED_10;
708 break;
709 case BCM5708S_1000X_STAT1_SPEED_100:
710 bp->line_speed = SPEED_100;
711 break;
712 case BCM5708S_1000X_STAT1_SPEED_1G:
713 bp->line_speed = SPEED_1000;
714 break;
715 case BCM5708S_1000X_STAT1_SPEED_2G5:
716 bp->line_speed = SPEED_2500;
717 break;
718 }
719 if (val & BCM5708S_1000X_STAT1_FD)
720 bp->duplex = DUPLEX_FULL;
721 else
722 bp->duplex = DUPLEX_HALF;
723
724 return 0;
725}
726
727static int
728bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700729{
730 u32 bmcr, local_adv, remote_adv, common;
731
732 bp->link_up = 1;
733 bp->line_speed = SPEED_1000;
734
735 bnx2_read_phy(bp, MII_BMCR, &bmcr);
736 if (bmcr & BMCR_FULLDPLX) {
737 bp->duplex = DUPLEX_FULL;
738 }
739 else {
740 bp->duplex = DUPLEX_HALF;
741 }
742
743 if (!(bmcr & BMCR_ANENABLE)) {
744 return 0;
745 }
746
747 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
748 bnx2_read_phy(bp, MII_LPA, &remote_adv);
749
750 common = local_adv & remote_adv;
751 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
752
753 if (common & ADVERTISE_1000XFULL) {
754 bp->duplex = DUPLEX_FULL;
755 }
756 else {
757 bp->duplex = DUPLEX_HALF;
758 }
759 }
760
761 return 0;
762}
763
764static int
765bnx2_copper_linkup(struct bnx2 *bp)
766{
767 u32 bmcr;
768
769 bnx2_read_phy(bp, MII_BMCR, &bmcr);
770 if (bmcr & BMCR_ANENABLE) {
771 u32 local_adv, remote_adv, common;
772
773 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
774 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
775
776 common = local_adv & (remote_adv >> 2);
777 if (common & ADVERTISE_1000FULL) {
778 bp->line_speed = SPEED_1000;
779 bp->duplex = DUPLEX_FULL;
780 }
781 else if (common & ADVERTISE_1000HALF) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_HALF;
784 }
785 else {
786 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
787 bnx2_read_phy(bp, MII_LPA, &remote_adv);
788
789 common = local_adv & remote_adv;
790 if (common & ADVERTISE_100FULL) {
791 bp->line_speed = SPEED_100;
792 bp->duplex = DUPLEX_FULL;
793 }
794 else if (common & ADVERTISE_100HALF) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_HALF;
797 }
798 else if (common & ADVERTISE_10FULL) {
799 bp->line_speed = SPEED_10;
800 bp->duplex = DUPLEX_FULL;
801 }
802 else if (common & ADVERTISE_10HALF) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_HALF;
805 }
806 else {
807 bp->line_speed = 0;
808 bp->link_up = 0;
809 }
810 }
811 }
812 else {
813 if (bmcr & BMCR_SPEED100) {
814 bp->line_speed = SPEED_100;
815 }
816 else {
817 bp->line_speed = SPEED_10;
818 }
819 if (bmcr & BMCR_FULLDPLX) {
820 bp->duplex = DUPLEX_FULL;
821 }
822 else {
823 bp->duplex = DUPLEX_HALF;
824 }
825 }
826
827 return 0;
828}
829
830static int
831bnx2_set_mac_link(struct bnx2 *bp)
832{
833 u32 val;
834
835 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
836 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
837 (bp->duplex == DUPLEX_HALF)) {
838 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
839 }
840
841 /* Configure the EMAC mode register. */
842 val = REG_RD(bp, BNX2_EMAC_MODE);
843
844 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800845 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800846 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700847
848 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800849 switch (bp->line_speed) {
850 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800851 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
852 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800853 break;
854 }
855 /* fall through */
856 case SPEED_100:
857 val |= BNX2_EMAC_MODE_PORT_MII;
858 break;
859 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800860 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800861 /* fall through */
862 case SPEED_1000:
863 val |= BNX2_EMAC_MODE_PORT_GMII;
864 break;
865 }
Michael Chanb6016b72005-05-26 13:03:09 -0700866 }
867 else {
868 val |= BNX2_EMAC_MODE_PORT_GMII;
869 }
870
871 /* Set the MAC to operate in the appropriate duplex mode. */
872 if (bp->duplex == DUPLEX_HALF)
873 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
874 REG_WR(bp, BNX2_EMAC_MODE, val);
875
876 /* Enable/disable rx PAUSE. */
877 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
878
879 if (bp->flow_ctrl & FLOW_CTRL_RX)
880 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
881 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
882
883 /* Enable/disable tx PAUSE. */
884 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
885 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
886
887 if (bp->flow_ctrl & FLOW_CTRL_TX)
888 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
889 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
890
891 /* Acknowledge the interrupt. */
892 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
893
894 return 0;
895}
896
897static int
898bnx2_set_link(struct bnx2 *bp)
899{
900 u32 bmsr;
901 u8 link_up;
902
Michael Chan80be4432006-11-19 14:07:28 -0800903 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700904 bp->link_up = 1;
905 return 0;
906 }
907
908 link_up = bp->link_up;
909
910 bnx2_read_phy(bp, MII_BMSR, &bmsr);
911 bnx2_read_phy(bp, MII_BMSR, &bmsr);
912
913 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
914 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
915 u32 val;
916
917 val = REG_RD(bp, BNX2_EMAC_STATUS);
918 if (val & BNX2_EMAC_STATUS_LINK)
919 bmsr |= BMSR_LSTATUS;
920 else
921 bmsr &= ~BMSR_LSTATUS;
922 }
923
924 if (bmsr & BMSR_LSTATUS) {
925 bp->link_up = 1;
926
927 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800928 if (CHIP_NUM(bp) == CHIP_NUM_5706)
929 bnx2_5706s_linkup(bp);
930 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
931 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700932 }
933 else {
934 bnx2_copper_linkup(bp);
935 }
936 bnx2_resolve_flow_ctrl(bp);
937 }
938 else {
939 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
940 (bp->autoneg & AUTONEG_SPEED)) {
941
942 u32 bmcr;
943
944 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800945 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700946 if (!(bmcr & BMCR_ANENABLE)) {
947 bnx2_write_phy(bp, MII_BMCR, bmcr |
948 BMCR_ANENABLE);
949 }
950 }
951 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
952 bp->link_up = 0;
953 }
954
955 if (bp->link_up != link_up) {
956 bnx2_report_link(bp);
957 }
958
959 bnx2_set_mac_link(bp);
960
961 return 0;
962}
963
964static int
965bnx2_reset_phy(struct bnx2 *bp)
966{
967 int i;
968 u32 reg;
969
970 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
971
972#define PHY_RESET_MAX_WAIT 100
973 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
974 udelay(10);
975
976 bnx2_read_phy(bp, MII_BMCR, &reg);
977 if (!(reg & BMCR_RESET)) {
978 udelay(20);
979 break;
980 }
981 }
982 if (i == PHY_RESET_MAX_WAIT) {
983 return -EBUSY;
984 }
985 return 0;
986}
987
988static u32
989bnx2_phy_get_pause_adv(struct bnx2 *bp)
990{
991 u32 adv = 0;
992
993 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
994 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
995
996 if (bp->phy_flags & PHY_SERDES_FLAG) {
997 adv = ADVERTISE_1000XPAUSE;
998 }
999 else {
1000 adv = ADVERTISE_PAUSE_CAP;
1001 }
1002 }
1003 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1004 if (bp->phy_flags & PHY_SERDES_FLAG) {
1005 adv = ADVERTISE_1000XPSE_ASYM;
1006 }
1007 else {
1008 adv = ADVERTISE_PAUSE_ASYM;
1009 }
1010 }
1011 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1012 if (bp->phy_flags & PHY_SERDES_FLAG) {
1013 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1014 }
1015 else {
1016 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1017 }
1018 }
1019 return adv;
1020}
1021
1022static int
1023bnx2_setup_serdes_phy(struct bnx2 *bp)
1024{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001025 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001026 u32 new_adv = 0;
1027
1028 if (!(bp->autoneg & AUTONEG_SPEED)) {
1029 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001030 int force_link_down = 0;
1031
Michael Chan80be4432006-11-19 14:07:28 -08001032 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1033 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1034
1035 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1036 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1037 new_bmcr |= BMCR_SPEED1000;
1038 if (bp->req_line_speed == SPEED_2500) {
1039 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1040 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1041 if (!(up1 & BCM5708S_UP1_2G5)) {
1042 up1 |= BCM5708S_UP1_2G5;
1043 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1044 force_link_down = 1;
1045 }
1046 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (up1 & BCM5708S_UP1_2G5) {
1049 up1 &= ~BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1052 }
1053 }
1054
Michael Chanb6016b72005-05-26 13:03:09 -07001055 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001056 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001057 new_bmcr |= BMCR_FULLDPLX;
1058 }
1059 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001060 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001061 new_bmcr &= ~BMCR_FULLDPLX;
1062 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001063 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001064 /* Force a link down visible on the other side */
1065 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001066 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1067 ~(ADVERTISE_1000XFULL |
1068 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001069 bnx2_write_phy(bp, MII_BMCR, bmcr |
1070 BMCR_ANRESTART | BMCR_ANENABLE);
1071
1072 bp->link_up = 0;
1073 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001074 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001075 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001076 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001077 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001078 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1079 }
1080 return 0;
1081 }
1082
Michael Chan5b0c76a2005-11-04 08:45:49 -08001083 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1084 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1085 up1 |= BCM5708S_UP1_2G5;
1086 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1087 }
1088
Michael Chanb6016b72005-05-26 13:03:09 -07001089 if (bp->advertising & ADVERTISED_1000baseT_Full)
1090 new_adv |= ADVERTISE_1000XFULL;
1091
1092 new_adv |= bnx2_phy_get_pause_adv(bp);
1093
1094 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1095 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1096
1097 bp->serdes_an_pending = 0;
1098 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1099 /* Force a link down visible on the other side */
1100 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001101 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001102 spin_unlock_bh(&bp->phy_lock);
1103 msleep(20);
1104 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001105 }
1106
1107 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1108 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1109 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001110 /* Speed up link-up time when the link partner
1111 * does not autonegotiate which is very common
1112 * in blade servers. Some blade servers use
1113 * IPMI for kerboard input and it's important
1114 * to minimize link disruptions. Autoneg. involves
1115 * exchanging base pages plus 3 next pages and
1116 * normally completes in about 120 msec.
1117 */
1118 bp->current_interval = SERDES_AN_TIMEOUT;
1119 bp->serdes_an_pending = 1;
1120 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001121 }
1122
1123 return 0;
1124}
1125
1126#define ETHTOOL_ALL_FIBRE_SPEED \
1127 (ADVERTISED_1000baseT_Full)
1128
1129#define ETHTOOL_ALL_COPPER_SPEED \
1130 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1131 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1132 ADVERTISED_1000baseT_Full)
1133
1134#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1135 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001136
Michael Chanb6016b72005-05-26 13:03:09 -07001137#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1138
1139static int
1140bnx2_setup_copper_phy(struct bnx2 *bp)
1141{
1142 u32 bmcr;
1143 u32 new_bmcr;
1144
1145 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1146
1147 if (bp->autoneg & AUTONEG_SPEED) {
1148 u32 adv_reg, adv1000_reg;
1149 u32 new_adv_reg = 0;
1150 u32 new_adv1000_reg = 0;
1151
1152 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1153 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1154 ADVERTISE_PAUSE_ASYM);
1155
1156 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1157 adv1000_reg &= PHY_ALL_1000_SPEED;
1158
1159 if (bp->advertising & ADVERTISED_10baseT_Half)
1160 new_adv_reg |= ADVERTISE_10HALF;
1161 if (bp->advertising & ADVERTISED_10baseT_Full)
1162 new_adv_reg |= ADVERTISE_10FULL;
1163 if (bp->advertising & ADVERTISED_100baseT_Half)
1164 new_adv_reg |= ADVERTISE_100HALF;
1165 if (bp->advertising & ADVERTISED_100baseT_Full)
1166 new_adv_reg |= ADVERTISE_100FULL;
1167 if (bp->advertising & ADVERTISED_1000baseT_Full)
1168 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001169
Michael Chanb6016b72005-05-26 13:03:09 -07001170 new_adv_reg |= ADVERTISE_CSMA;
1171
1172 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1173
1174 if ((adv1000_reg != new_adv1000_reg) ||
1175 (adv_reg != new_adv_reg) ||
1176 ((bmcr & BMCR_ANENABLE) == 0)) {
1177
1178 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1179 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1180 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1181 BMCR_ANENABLE);
1182 }
1183 else if (bp->link_up) {
1184 /* Flow ctrl may have changed from auto to forced */
1185 /* or vice-versa. */
1186
1187 bnx2_resolve_flow_ctrl(bp);
1188 bnx2_set_mac_link(bp);
1189 }
1190 return 0;
1191 }
1192
1193 new_bmcr = 0;
1194 if (bp->req_line_speed == SPEED_100) {
1195 new_bmcr |= BMCR_SPEED100;
1196 }
1197 if (bp->req_duplex == DUPLEX_FULL) {
1198 new_bmcr |= BMCR_FULLDPLX;
1199 }
1200 if (new_bmcr != bmcr) {
1201 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001202
1203 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1204 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001205
Michael Chanb6016b72005-05-26 13:03:09 -07001206 if (bmsr & BMSR_LSTATUS) {
1207 /* Force link down */
1208 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001209 spin_unlock_bh(&bp->phy_lock);
1210 msleep(50);
1211 spin_lock_bh(&bp->phy_lock);
1212
1213 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1214 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001215 }
1216
1217 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1218
1219 /* Normally, the new speed is setup after the link has
1220 * gone down and up again. In some cases, link will not go
1221 * down so we need to set up the new speed here.
1222 */
1223 if (bmsr & BMSR_LSTATUS) {
1224 bp->line_speed = bp->req_line_speed;
1225 bp->duplex = bp->req_duplex;
1226 bnx2_resolve_flow_ctrl(bp);
1227 bnx2_set_mac_link(bp);
1228 }
1229 }
1230 return 0;
1231}
1232
1233static int
1234bnx2_setup_phy(struct bnx2 *bp)
1235{
1236 if (bp->loopback == MAC_LOOPBACK)
1237 return 0;
1238
1239 if (bp->phy_flags & PHY_SERDES_FLAG) {
1240 return (bnx2_setup_serdes_phy(bp));
1241 }
1242 else {
1243 return (bnx2_setup_copper_phy(bp));
1244 }
1245}
1246
1247static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001248bnx2_init_5708s_phy(struct bnx2 *bp)
1249{
1250 u32 val;
1251
1252 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1253 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1255
1256 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1257 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1258 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1259
1260 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1261 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1262 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1263
1264 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1265 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1266 val |= BCM5708S_UP1_2G5;
1267 bnx2_write_phy(bp, BCM5708S_UP1, val);
1268 }
1269
1270 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001271 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1272 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001273 /* increase tx signal amplitude */
1274 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1275 BCM5708S_BLK_ADDR_TX_MISC);
1276 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1277 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1278 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1279 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1280 }
1281
Michael Chane3648b32005-11-04 08:51:21 -08001282 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001283 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1284
1285 if (val) {
1286 u32 is_backplane;
1287
Michael Chane3648b32005-11-04 08:51:21 -08001288 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001289 BNX2_SHARED_HW_CFG_CONFIG);
1290 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1291 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1292 BCM5708S_BLK_ADDR_TX_MISC);
1293 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1294 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1295 BCM5708S_BLK_ADDR_DIG);
1296 }
1297 }
1298 return 0;
1299}
1300
1301static int
1302bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001303{
1304 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1305
Michael Chan59b47d82006-11-19 14:10:45 -08001306 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1307 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001308
1309 if (bp->dev->mtu > 1500) {
1310 u32 val;
1311
1312 /* Set extended packet length bit */
1313 bnx2_write_phy(bp, 0x18, 0x7);
1314 bnx2_read_phy(bp, 0x18, &val);
1315 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1316
1317 bnx2_write_phy(bp, 0x1c, 0x6c00);
1318 bnx2_read_phy(bp, 0x1c, &val);
1319 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1320 }
1321 else {
1322 u32 val;
1323
1324 bnx2_write_phy(bp, 0x18, 0x7);
1325 bnx2_read_phy(bp, 0x18, &val);
1326 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1327
1328 bnx2_write_phy(bp, 0x1c, 0x6c00);
1329 bnx2_read_phy(bp, 0x1c, &val);
1330 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1331 }
1332
1333 return 0;
1334}
1335
1336static int
1337bnx2_init_copper_phy(struct bnx2 *bp)
1338{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001339 u32 val;
1340
Michael Chanb6016b72005-05-26 13:03:09 -07001341 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1342
1343 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1344 bnx2_write_phy(bp, 0x18, 0x0c00);
1345 bnx2_write_phy(bp, 0x17, 0x000a);
1346 bnx2_write_phy(bp, 0x15, 0x310b);
1347 bnx2_write_phy(bp, 0x17, 0x201f);
1348 bnx2_write_phy(bp, 0x15, 0x9506);
1349 bnx2_write_phy(bp, 0x17, 0x401f);
1350 bnx2_write_phy(bp, 0x15, 0x14e2);
1351 bnx2_write_phy(bp, 0x18, 0x0400);
1352 }
1353
1354 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001355 /* Set extended packet length bit */
1356 bnx2_write_phy(bp, 0x18, 0x7);
1357 bnx2_read_phy(bp, 0x18, &val);
1358 bnx2_write_phy(bp, 0x18, val | 0x4000);
1359
1360 bnx2_read_phy(bp, 0x10, &val);
1361 bnx2_write_phy(bp, 0x10, val | 0x1);
1362 }
1363 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001364 bnx2_write_phy(bp, 0x18, 0x7);
1365 bnx2_read_phy(bp, 0x18, &val);
1366 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1367
1368 bnx2_read_phy(bp, 0x10, &val);
1369 bnx2_write_phy(bp, 0x10, val & ~0x1);
1370 }
1371
Michael Chan5b0c76a2005-11-04 08:45:49 -08001372 /* ethernet@wirespeed */
1373 bnx2_write_phy(bp, 0x18, 0x7007);
1374 bnx2_read_phy(bp, 0x18, &val);
1375 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001376 return 0;
1377}
1378
1379
1380static int
1381bnx2_init_phy(struct bnx2 *bp)
1382{
1383 u32 val;
1384 int rc = 0;
1385
1386 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1387 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1388
1389 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1390
1391 bnx2_reset_phy(bp);
1392
1393 bnx2_read_phy(bp, MII_PHYSID1, &val);
1394 bp->phy_id = val << 16;
1395 bnx2_read_phy(bp, MII_PHYSID2, &val);
1396 bp->phy_id |= val & 0xffff;
1397
1398 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001399 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1400 rc = bnx2_init_5706s_phy(bp);
1401 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1402 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001403 }
1404 else {
1405 rc = bnx2_init_copper_phy(bp);
1406 }
1407
1408 bnx2_setup_phy(bp);
1409
1410 return rc;
1411}
1412
1413static int
1414bnx2_set_mac_loopback(struct bnx2 *bp)
1415{
1416 u32 mac_mode;
1417
1418 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1419 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1420 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1421 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1422 bp->link_up = 1;
1423 return 0;
1424}
1425
Michael Chanbc5a0692006-01-23 16:13:22 -08001426static int bnx2_test_link(struct bnx2 *);
1427
1428static int
1429bnx2_set_phy_loopback(struct bnx2 *bp)
1430{
1431 u32 mac_mode;
1432 int rc, i;
1433
1434 spin_lock_bh(&bp->phy_lock);
1435 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1436 BMCR_SPEED1000);
1437 spin_unlock_bh(&bp->phy_lock);
1438 if (rc)
1439 return rc;
1440
1441 for (i = 0; i < 10; i++) {
1442 if (bnx2_test_link(bp) == 0)
1443 break;
Michael Chan80be4432006-11-19 14:07:28 -08001444 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001445 }
1446
1447 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1448 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1449 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001450 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001451
1452 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1453 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1454 bp->link_up = 1;
1455 return 0;
1456}
1457
Michael Chanb6016b72005-05-26 13:03:09 -07001458static int
Michael Chanb090ae22006-01-23 16:07:10 -08001459bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001460{
1461 int i;
1462 u32 val;
1463
Michael Chanb6016b72005-05-26 13:03:09 -07001464 bp->fw_wr_seq++;
1465 msg_data |= bp->fw_wr_seq;
1466
Michael Chane3648b32005-11-04 08:51:21 -08001467 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001468
1469 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001470 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1471 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001472
Michael Chane3648b32005-11-04 08:51:21 -08001473 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001474
1475 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1476 break;
1477 }
Michael Chanb090ae22006-01-23 16:07:10 -08001478 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1479 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001480
1481 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001482 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1483 if (!silent)
1484 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1485 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001486
1487 msg_data &= ~BNX2_DRV_MSG_CODE;
1488 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1489
Michael Chane3648b32005-11-04 08:51:21 -08001490 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001491
Michael Chanb6016b72005-05-26 13:03:09 -07001492 return -EBUSY;
1493 }
1494
Michael Chanb090ae22006-01-23 16:07:10 -08001495 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1496 return -EIO;
1497
Michael Chanb6016b72005-05-26 13:03:09 -07001498 return 0;
1499}
1500
Michael Chan59b47d82006-11-19 14:10:45 -08001501static int
1502bnx2_init_5709_context(struct bnx2 *bp)
1503{
1504 int i, ret = 0;
1505 u32 val;
1506
1507 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1508 val |= (BCM_PAGE_BITS - 8) << 16;
1509 REG_WR(bp, BNX2_CTX_COMMAND, val);
1510 for (i = 0; i < bp->ctx_pages; i++) {
1511 int j;
1512
1513 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1514 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1515 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1516 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1517 (u64) bp->ctx_blk_mapping[i] >> 32);
1518 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1519 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1520 for (j = 0; j < 10; j++) {
1521
1522 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1523 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1524 break;
1525 udelay(5);
1526 }
1527 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1528 ret = -EBUSY;
1529 break;
1530 }
1531 }
1532 return ret;
1533}
1534
Michael Chanb6016b72005-05-26 13:03:09 -07001535static void
1536bnx2_init_context(struct bnx2 *bp)
1537{
1538 u32 vcid;
1539
1540 vcid = 96;
1541 while (vcid) {
1542 u32 vcid_addr, pcid_addr, offset;
1543
1544 vcid--;
1545
1546 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1547 u32 new_vcid;
1548
1549 vcid_addr = GET_PCID_ADDR(vcid);
1550 if (vcid & 0x8) {
1551 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1552 }
1553 else {
1554 new_vcid = vcid;
1555 }
1556 pcid_addr = GET_PCID_ADDR(new_vcid);
1557 }
1558 else {
1559 vcid_addr = GET_CID_ADDR(vcid);
1560 pcid_addr = vcid_addr;
1561 }
1562
1563 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1564 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1565
1566 /* Zero out the context. */
1567 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1568 CTX_WR(bp, 0x00, offset, 0);
1569 }
1570
1571 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1572 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1573 }
1574}
1575
1576static int
1577bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1578{
1579 u16 *good_mbuf;
1580 u32 good_mbuf_cnt;
1581 u32 val;
1582
1583 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1584 if (good_mbuf == NULL) {
1585 printk(KERN_ERR PFX "Failed to allocate memory in "
1586 "bnx2_alloc_bad_rbuf\n");
1587 return -ENOMEM;
1588 }
1589
1590 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1591 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1592
1593 good_mbuf_cnt = 0;
1594
1595 /* Allocate a bunch of mbufs and save the good ones in an array. */
1596 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1597 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1598 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1599
1600 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1601
1602 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1603
1604 /* The addresses with Bit 9 set are bad memory blocks. */
1605 if (!(val & (1 << 9))) {
1606 good_mbuf[good_mbuf_cnt] = (u16) val;
1607 good_mbuf_cnt++;
1608 }
1609
1610 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1611 }
1612
1613 /* Free the good ones back to the mbuf pool thus discarding
1614 * all the bad ones. */
1615 while (good_mbuf_cnt) {
1616 good_mbuf_cnt--;
1617
1618 val = good_mbuf[good_mbuf_cnt];
1619 val = (val << 9) | val | 1;
1620
1621 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1622 }
1623 kfree(good_mbuf);
1624 return 0;
1625}
1626
1627static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001628bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001629{
1630 u32 val;
1631 u8 *mac_addr = bp->dev->dev_addr;
1632
1633 val = (mac_addr[0] << 8) | mac_addr[1];
1634
1635 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1636
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001637 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001638 (mac_addr[4] << 8) | mac_addr[5];
1639
1640 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1641}
1642
1643static inline int
1644bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1645{
1646 struct sk_buff *skb;
1647 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1648 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001649 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001650 unsigned long align;
1651
Michael Chan932f3772006-08-15 01:39:36 -07001652 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001653 if (skb == NULL) {
1654 return -ENOMEM;
1655 }
1656
Michael Chan59b47d82006-11-19 14:10:45 -08001657 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1658 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001659
Michael Chanb6016b72005-05-26 13:03:09 -07001660 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1661 PCI_DMA_FROMDEVICE);
1662
1663 rx_buf->skb = skb;
1664 pci_unmap_addr_set(rx_buf, mapping, mapping);
1665
1666 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1667 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1668
1669 bp->rx_prod_bseq += bp->rx_buf_use_size;
1670
1671 return 0;
1672}
1673
1674static void
1675bnx2_phy_int(struct bnx2 *bp)
1676{
1677 u32 new_link_state, old_link_state;
1678
1679 new_link_state = bp->status_blk->status_attn_bits &
1680 STATUS_ATTN_BITS_LINK_STATE;
1681 old_link_state = bp->status_blk->status_attn_bits_ack &
1682 STATUS_ATTN_BITS_LINK_STATE;
1683 if (new_link_state != old_link_state) {
1684 if (new_link_state) {
1685 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1686 STATUS_ATTN_BITS_LINK_STATE);
1687 }
1688 else {
1689 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1690 STATUS_ATTN_BITS_LINK_STATE);
1691 }
1692 bnx2_set_link(bp);
1693 }
1694}
1695
1696static void
1697bnx2_tx_int(struct bnx2 *bp)
1698{
Michael Chanf4e418f2005-11-04 08:53:48 -08001699 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001700 u16 hw_cons, sw_cons, sw_ring_cons;
1701 int tx_free_bd = 0;
1702
Michael Chanf4e418f2005-11-04 08:53:48 -08001703 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001704 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1705 hw_cons++;
1706 }
1707 sw_cons = bp->tx_cons;
1708
1709 while (sw_cons != hw_cons) {
1710 struct sw_bd *tx_buf;
1711 struct sk_buff *skb;
1712 int i, last;
1713
1714 sw_ring_cons = TX_RING_IDX(sw_cons);
1715
1716 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1717 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001718#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001719 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001720 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001721 u16 last_idx, last_ring_idx;
1722
1723 last_idx = sw_cons +
1724 skb_shinfo(skb)->nr_frags + 1;
1725 last_ring_idx = sw_ring_cons +
1726 skb_shinfo(skb)->nr_frags + 1;
1727 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1728 last_idx++;
1729 }
1730 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1731 break;
1732 }
1733 }
1734#endif
1735 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1736 skb_headlen(skb), PCI_DMA_TODEVICE);
1737
1738 tx_buf->skb = NULL;
1739 last = skb_shinfo(skb)->nr_frags;
1740
1741 for (i = 0; i < last; i++) {
1742 sw_cons = NEXT_TX_BD(sw_cons);
1743
1744 pci_unmap_page(bp->pdev,
1745 pci_unmap_addr(
1746 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1747 mapping),
1748 skb_shinfo(skb)->frags[i].size,
1749 PCI_DMA_TODEVICE);
1750 }
1751
1752 sw_cons = NEXT_TX_BD(sw_cons);
1753
1754 tx_free_bd += last + 1;
1755
Michael Chan745720e2006-06-29 12:37:41 -07001756 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001757
Michael Chanf4e418f2005-11-04 08:53:48 -08001758 hw_cons = bp->hw_tx_cons =
1759 sblk->status_tx_quick_consumer_index0;
1760
Michael Chanb6016b72005-05-26 13:03:09 -07001761 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1762 hw_cons++;
1763 }
1764 }
1765
Michael Chane89bbf12005-08-25 15:36:58 -07001766 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001767 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1768 * before checking for netif_queue_stopped(). Without the
1769 * memory barrier, there is a small possibility that bnx2_start_xmit()
1770 * will miss it and cause the queue to be stopped forever.
1771 */
1772 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001773
Michael Chan2f8af122006-08-15 01:39:10 -07001774 if (unlikely(netif_queue_stopped(bp->dev)) &&
1775 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1776 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001777 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001778 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001779 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001780 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001781 }
Michael Chanb6016b72005-05-26 13:03:09 -07001782}
1783
1784static inline void
1785bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1786 u16 cons, u16 prod)
1787{
Michael Chan236b6392006-03-20 17:49:02 -08001788 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1789 struct rx_bd *cons_bd, *prod_bd;
1790
1791 cons_rx_buf = &bp->rx_buf_ring[cons];
1792 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001793
1794 pci_dma_sync_single_for_device(bp->pdev,
1795 pci_unmap_addr(cons_rx_buf, mapping),
1796 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1797
Michael Chan236b6392006-03-20 17:49:02 -08001798 bp->rx_prod_bseq += bp->rx_buf_use_size;
1799
1800 prod_rx_buf->skb = skb;
1801
1802 if (cons == prod)
1803 return;
1804
Michael Chanb6016b72005-05-26 13:03:09 -07001805 pci_unmap_addr_set(prod_rx_buf, mapping,
1806 pci_unmap_addr(cons_rx_buf, mapping));
1807
Michael Chan3fdfcc22006-03-20 17:49:49 -08001808 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1809 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001810 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1811 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001812}
1813
1814static int
1815bnx2_rx_int(struct bnx2 *bp, int budget)
1816{
Michael Chanf4e418f2005-11-04 08:53:48 -08001817 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001818 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1819 struct l2_fhdr *rx_hdr;
1820 int rx_pkt = 0;
1821
Michael Chanf4e418f2005-11-04 08:53:48 -08001822 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001823 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1824 hw_cons++;
1825 }
1826 sw_cons = bp->rx_cons;
1827 sw_prod = bp->rx_prod;
1828
1829 /* Memory barrier necessary as speculative reads of the rx
1830 * buffer can be ahead of the index in the status block
1831 */
1832 rmb();
1833 while (sw_cons != hw_cons) {
1834 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001835 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001836 struct sw_bd *rx_buf;
1837 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001838 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001839
1840 sw_ring_cons = RX_RING_IDX(sw_cons);
1841 sw_ring_prod = RX_RING_IDX(sw_prod);
1842
1843 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1844 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001845
1846 rx_buf->skb = NULL;
1847
1848 dma_addr = pci_unmap_addr(rx_buf, mapping);
1849
1850 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001851 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1852
1853 rx_hdr = (struct l2_fhdr *) skb->data;
1854 len = rx_hdr->l2_fhdr_pkt_len - 4;
1855
Michael Chanade2bfe2006-01-23 16:09:51 -08001856 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001857 (L2_FHDR_ERRORS_BAD_CRC |
1858 L2_FHDR_ERRORS_PHY_DECODE |
1859 L2_FHDR_ERRORS_ALIGNMENT |
1860 L2_FHDR_ERRORS_TOO_SHORT |
1861 L2_FHDR_ERRORS_GIANT_FRAME)) {
1862
1863 goto reuse_rx;
1864 }
1865
1866 /* Since we don't have a jumbo ring, copy small packets
1867 * if mtu > 1500
1868 */
1869 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1870 struct sk_buff *new_skb;
1871
Michael Chan932f3772006-08-15 01:39:36 -07001872 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001873 if (new_skb == NULL)
1874 goto reuse_rx;
1875
1876 /* aligned copy */
1877 memcpy(new_skb->data,
1878 skb->data + bp->rx_offset - 2,
1879 len + 2);
1880
1881 skb_reserve(new_skb, 2);
1882 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001883
1884 bnx2_reuse_rx_skb(bp, skb,
1885 sw_ring_cons, sw_ring_prod);
1886
1887 skb = new_skb;
1888 }
1889 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001890 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001891 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1892
1893 skb_reserve(skb, bp->rx_offset);
1894 skb_put(skb, len);
1895 }
1896 else {
1897reuse_rx:
1898 bnx2_reuse_rx_skb(bp, skb,
1899 sw_ring_cons, sw_ring_prod);
1900 goto next_rx;
1901 }
1902
1903 skb->protocol = eth_type_trans(skb, bp->dev);
1904
1905 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001906 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001907
Michael Chan745720e2006-06-29 12:37:41 -07001908 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001909 goto next_rx;
1910
1911 }
1912
Michael Chanb6016b72005-05-26 13:03:09 -07001913 skb->ip_summed = CHECKSUM_NONE;
1914 if (bp->rx_csum &&
1915 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1916 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1917
Michael Chanade2bfe2006-01-23 16:09:51 -08001918 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1919 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001920 skb->ip_summed = CHECKSUM_UNNECESSARY;
1921 }
1922
1923#ifdef BCM_VLAN
1924 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1925 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1926 rx_hdr->l2_fhdr_vlan_tag);
1927 }
1928 else
1929#endif
1930 netif_receive_skb(skb);
1931
1932 bp->dev->last_rx = jiffies;
1933 rx_pkt++;
1934
1935next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001936 sw_cons = NEXT_RX_BD(sw_cons);
1937 sw_prod = NEXT_RX_BD(sw_prod);
1938
1939 if ((rx_pkt == budget))
1940 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001941
1942 /* Refresh hw_cons to see if there is new work */
1943 if (sw_cons == hw_cons) {
1944 hw_cons = bp->hw_rx_cons =
1945 sblk->status_rx_quick_consumer_index0;
1946 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1947 hw_cons++;
1948 rmb();
1949 }
Michael Chanb6016b72005-05-26 13:03:09 -07001950 }
1951 bp->rx_cons = sw_cons;
1952 bp->rx_prod = sw_prod;
1953
1954 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1955
1956 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1957
1958 mmiowb();
1959
1960 return rx_pkt;
1961
1962}
1963
1964/* MSI ISR - The only difference between this and the INTx ISR
1965 * is that the MSI interrupt is always serviced.
1966 */
1967static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001968bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001969{
1970 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001971 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001972
Michael Chanc921e4c2005-09-08 13:15:32 -07001973 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001974 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1975 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1976 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1977
1978 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001979 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1980 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001981
Michael Chan73eef4c2005-08-25 15:39:15 -07001982 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001983
Michael Chan73eef4c2005-08-25 15:39:15 -07001984 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001985}
1986
1987static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001988bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001989{
1990 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001991 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001992
1993 /* When using INTx, it is possible for the interrupt to arrive
1994 * at the CPU before the status block posted prior to the
1995 * interrupt. Reading a register will flush the status block.
1996 * When using MSI, the MSI message will always complete after
1997 * the status block write.
1998 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001999 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002000 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2001 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002002 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002003
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2006 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2007
2008 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002009 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2010 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002011
Michael Chan73eef4c2005-08-25 15:39:15 -07002012 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002013
Michael Chan73eef4c2005-08-25 15:39:15 -07002014 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002015}
2016
Michael Chanf4e418f2005-11-04 08:53:48 -08002017static inline int
2018bnx2_has_work(struct bnx2 *bp)
2019{
2020 struct status_block *sblk = bp->status_blk;
2021
2022 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2023 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2024 return 1;
2025
2026 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2027 bp->link_up)
2028 return 1;
2029
2030 return 0;
2031}
2032
Michael Chanb6016b72005-05-26 13:03:09 -07002033static int
2034bnx2_poll(struct net_device *dev, int *budget)
2035{
Michael Chan972ec0d2006-01-23 16:12:43 -08002036 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002037
Michael Chanb6016b72005-05-26 13:03:09 -07002038 if ((bp->status_blk->status_attn_bits &
2039 STATUS_ATTN_BITS_LINK_STATE) !=
2040 (bp->status_blk->status_attn_bits_ack &
2041 STATUS_ATTN_BITS_LINK_STATE)) {
2042
Michael Chanc770a652005-08-25 15:38:39 -07002043 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002044 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002045 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002046
2047 /* This is needed to take care of transient status
2048 * during link changes.
2049 */
2050 REG_WR(bp, BNX2_HC_COMMAND,
2051 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2052 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002053 }
2054
Michael Chanf4e418f2005-11-04 08:53:48 -08002055 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002056 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002057
Michael Chanf4e418f2005-11-04 08:53:48 -08002058 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002059 int orig_budget = *budget;
2060 int work_done;
2061
2062 if (orig_budget > dev->quota)
2063 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002064
Michael Chanb6016b72005-05-26 13:03:09 -07002065 work_done = bnx2_rx_int(bp, orig_budget);
2066 *budget -= work_done;
2067 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002068 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002069
Michael Chanf4e418f2005-11-04 08:53:48 -08002070 bp->last_status_idx = bp->status_blk->status_idx;
2071 rmb();
2072
2073 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002074 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002075 if (likely(bp->flags & USING_MSI_FLAG)) {
2076 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2077 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2078 bp->last_status_idx);
2079 return 0;
2080 }
Michael Chanb6016b72005-05-26 13:03:09 -07002081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2084 bp->last_status_idx);
2085
2086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002089 return 0;
2090 }
2091
2092 return 1;
2093}
2094
Herbert Xu932ff272006-06-09 12:20:56 -07002095/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002096 * from set_multicast.
2097 */
2098static void
2099bnx2_set_rx_mode(struct net_device *dev)
2100{
Michael Chan972ec0d2006-01-23 16:12:43 -08002101 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002102 u32 rx_mode, sort_mode;
2103 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002104
Michael Chanc770a652005-08-25 15:38:39 -07002105 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002106
2107 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2108 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2109 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2110#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002111 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002112 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002113#else
Michael Chane29054f2006-01-23 16:06:06 -08002114 if (!(bp->flags & ASF_ENABLE_FLAG))
2115 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002116#endif
2117 if (dev->flags & IFF_PROMISC) {
2118 /* Promiscuous mode. */
2119 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002120 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2121 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002122 }
2123 else if (dev->flags & IFF_ALLMULTI) {
2124 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2125 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2126 0xffffffff);
2127 }
2128 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2129 }
2130 else {
2131 /* Accept one or more multicast(s). */
2132 struct dev_mc_list *mclist;
2133 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2134 u32 regidx;
2135 u32 bit;
2136 u32 crc;
2137
2138 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2139
2140 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2141 i++, mclist = mclist->next) {
2142
2143 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2144 bit = crc & 0xff;
2145 regidx = (bit & 0xe0) >> 5;
2146 bit &= 0x1f;
2147 mc_filter[regidx] |= (1 << bit);
2148 }
2149
2150 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2151 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2152 mc_filter[i]);
2153 }
2154
2155 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2156 }
2157
2158 if (rx_mode != bp->rx_mode) {
2159 bp->rx_mode = rx_mode;
2160 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2161 }
2162
2163 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2164 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2165 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2166
Michael Chanc770a652005-08-25 15:38:39 -07002167 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002168}
2169
Michael Chanfba9fe92006-06-12 22:21:25 -07002170#define FW_BUF_SIZE 0x8000
2171
2172static int
2173bnx2_gunzip_init(struct bnx2 *bp)
2174{
2175 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2176 goto gunzip_nomem1;
2177
2178 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2179 goto gunzip_nomem2;
2180
2181 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2182 if (bp->strm->workspace == NULL)
2183 goto gunzip_nomem3;
2184
2185 return 0;
2186
2187gunzip_nomem3:
2188 kfree(bp->strm);
2189 bp->strm = NULL;
2190
2191gunzip_nomem2:
2192 vfree(bp->gunzip_buf);
2193 bp->gunzip_buf = NULL;
2194
2195gunzip_nomem1:
2196 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2197 "uncompression.\n", bp->dev->name);
2198 return -ENOMEM;
2199}
2200
2201static void
2202bnx2_gunzip_end(struct bnx2 *bp)
2203{
2204 kfree(bp->strm->workspace);
2205
2206 kfree(bp->strm);
2207 bp->strm = NULL;
2208
2209 if (bp->gunzip_buf) {
2210 vfree(bp->gunzip_buf);
2211 bp->gunzip_buf = NULL;
2212 }
2213}
2214
2215static int
2216bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2217{
2218 int n, rc;
2219
2220 /* check gzip header */
2221 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2222 return -EINVAL;
2223
2224 n = 10;
2225
2226#define FNAME 0x8
2227 if (zbuf[3] & FNAME)
2228 while ((zbuf[n++] != 0) && (n < len));
2229
2230 bp->strm->next_in = zbuf + n;
2231 bp->strm->avail_in = len - n;
2232 bp->strm->next_out = bp->gunzip_buf;
2233 bp->strm->avail_out = FW_BUF_SIZE;
2234
2235 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2236 if (rc != Z_OK)
2237 return rc;
2238
2239 rc = zlib_inflate(bp->strm, Z_FINISH);
2240
2241 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2242 *outbuf = bp->gunzip_buf;
2243
2244 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2245 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2246 bp->dev->name, bp->strm->msg);
2247
2248 zlib_inflateEnd(bp->strm);
2249
2250 if (rc == Z_STREAM_END)
2251 return 0;
2252
2253 return rc;
2254}
2255
Michael Chanb6016b72005-05-26 13:03:09 -07002256static void
2257load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2258 u32 rv2p_proc)
2259{
2260 int i;
2261 u32 val;
2262
2263
2264 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002265 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002266 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002267 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002268 rv2p_code++;
2269
2270 if (rv2p_proc == RV2P_PROC1) {
2271 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2272 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2273 }
2274 else {
2275 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2276 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2277 }
2278 }
2279
2280 /* Reset the processor, un-stall is done later. */
2281 if (rv2p_proc == RV2P_PROC1) {
2282 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2283 }
2284 else {
2285 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2286 }
2287}
2288
Michael Chanaf3ee512006-11-19 14:09:25 -08002289static int
Michael Chanb6016b72005-05-26 13:03:09 -07002290load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2291{
2292 u32 offset;
2293 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002294 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002295
2296 /* Halt the CPU. */
2297 val = REG_RD_IND(bp, cpu_reg->mode);
2298 val |= cpu_reg->mode_value_halt;
2299 REG_WR_IND(bp, cpu_reg->mode, val);
2300 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2301
2302 /* Load the Text area. */
2303 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002304 if (fw->gz_text) {
2305 u32 text_len;
2306 void *text;
2307
2308 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2309 &text_len);
2310 if (rc)
2311 return rc;
2312
2313 fw->text = text;
2314 }
2315 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002316 int j;
2317
2318 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002319 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002320 }
2321 }
2322
2323 /* Load the Data area. */
2324 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2325 if (fw->data) {
2326 int j;
2327
2328 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2329 REG_WR_IND(bp, offset, fw->data[j]);
2330 }
2331 }
2332
2333 /* Load the SBSS area. */
2334 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2335 if (fw->sbss) {
2336 int j;
2337
2338 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2339 REG_WR_IND(bp, offset, fw->sbss[j]);
2340 }
2341 }
2342
2343 /* Load the BSS area. */
2344 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2345 if (fw->bss) {
2346 int j;
2347
2348 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2349 REG_WR_IND(bp, offset, fw->bss[j]);
2350 }
2351 }
2352
2353 /* Load the Read-Only area. */
2354 offset = cpu_reg->spad_base +
2355 (fw->rodata_addr - cpu_reg->mips_view_base);
2356 if (fw->rodata) {
2357 int j;
2358
2359 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2360 REG_WR_IND(bp, offset, fw->rodata[j]);
2361 }
2362 }
2363
2364 /* Clear the pre-fetch instruction. */
2365 REG_WR_IND(bp, cpu_reg->inst, 0);
2366 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2367
2368 /* Start the CPU. */
2369 val = REG_RD_IND(bp, cpu_reg->mode);
2370 val &= ~cpu_reg->mode_value_halt;
2371 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2372 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002373
2374 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002375}
2376
Michael Chanfba9fe92006-06-12 22:21:25 -07002377static int
Michael Chanb6016b72005-05-26 13:03:09 -07002378bnx2_init_cpus(struct bnx2 *bp)
2379{
2380 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002381 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002382 int rc = 0;
2383 void *text;
2384 u32 text_len;
2385
2386 if ((rc = bnx2_gunzip_init(bp)) != 0)
2387 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002388
2389 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002390 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2391 &text_len);
2392 if (rc)
2393 goto init_cpu_err;
2394
2395 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2396
2397 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2398 &text_len);
2399 if (rc)
2400 goto init_cpu_err;
2401
2402 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002403
2404 /* Initialize the RX Processor. */
2405 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2406 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2407 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2408 cpu_reg.state = BNX2_RXP_CPU_STATE;
2409 cpu_reg.state_value_clear = 0xffffff;
2410 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2411 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2412 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2413 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2414 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2415 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2416 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002417
Michael Chand43584c2006-11-19 14:14:35 -08002418 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2419 fw = &bnx2_rxp_fw_09;
2420 else
2421 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002422
Michael Chanaf3ee512006-11-19 14:09:25 -08002423 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002424 if (rc)
2425 goto init_cpu_err;
2426
Michael Chanb6016b72005-05-26 13:03:09 -07002427 /* Initialize the TX Processor. */
2428 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2429 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2430 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2431 cpu_reg.state = BNX2_TXP_CPU_STATE;
2432 cpu_reg.state_value_clear = 0xffffff;
2433 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2434 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2435 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2436 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2437 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2438 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2439 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002440
Michael Chand43584c2006-11-19 14:14:35 -08002441 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2442 fw = &bnx2_txp_fw_09;
2443 else
2444 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002445
Michael Chanaf3ee512006-11-19 14:09:25 -08002446 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002447 if (rc)
2448 goto init_cpu_err;
2449
Michael Chanb6016b72005-05-26 13:03:09 -07002450 /* Initialize the TX Patch-up Processor. */
2451 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2452 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2453 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2454 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2455 cpu_reg.state_value_clear = 0xffffff;
2456 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2457 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2458 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2459 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2460 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2461 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2462 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002463
Michael Chand43584c2006-11-19 14:14:35 -08002464 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2465 fw = &bnx2_tpat_fw_09;
2466 else
2467 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002468
Michael Chanaf3ee512006-11-19 14:09:25 -08002469 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002470 if (rc)
2471 goto init_cpu_err;
2472
Michael Chanb6016b72005-05-26 13:03:09 -07002473 /* Initialize the Completion Processor. */
2474 cpu_reg.mode = BNX2_COM_CPU_MODE;
2475 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2476 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2477 cpu_reg.state = BNX2_COM_CPU_STATE;
2478 cpu_reg.state_value_clear = 0xffffff;
2479 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2480 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2481 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2482 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2483 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2484 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2485 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002486
Michael Chand43584c2006-11-19 14:14:35 -08002487 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2488 fw = &bnx2_com_fw_09;
2489 else
2490 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002491
Michael Chanaf3ee512006-11-19 14:09:25 -08002492 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002493 if (rc)
2494 goto init_cpu_err;
2495
Michael Chand43584c2006-11-19 14:14:35 -08002496 /* Initialize the Command Processor. */
2497 cpu_reg.mode = BNX2_CP_CPU_MODE;
2498 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2499 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2500 cpu_reg.state = BNX2_CP_CPU_STATE;
2501 cpu_reg.state_value_clear = 0xffffff;
2502 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2503 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2504 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2505 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2506 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2507 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2508 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002509
Michael Chand43584c2006-11-19 14:14:35 -08002510 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2511 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002512
Michael Chand43584c2006-11-19 14:14:35 -08002513 load_cpu_fw(bp, &cpu_reg, fw);
2514 if (rc)
2515 goto init_cpu_err;
2516 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002517init_cpu_err:
2518 bnx2_gunzip_end(bp);
2519 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002520}
2521
2522static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002523bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002524{
2525 u16 pmcsr;
2526
2527 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2528
2529 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002530 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002531 u32 val;
2532
2533 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2534 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2535 PCI_PM_CTRL_PME_STATUS);
2536
2537 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2538 /* delay required during transition out of D3hot */
2539 msleep(20);
2540
2541 val = REG_RD(bp, BNX2_EMAC_MODE);
2542 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2543 val &= ~BNX2_EMAC_MODE_MPKT;
2544 REG_WR(bp, BNX2_EMAC_MODE, val);
2545
2546 val = REG_RD(bp, BNX2_RPM_CONFIG);
2547 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2548 REG_WR(bp, BNX2_RPM_CONFIG, val);
2549 break;
2550 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002551 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002552 int i;
2553 u32 val, wol_msg;
2554
2555 if (bp->wol) {
2556 u32 advertising;
2557 u8 autoneg;
2558
2559 autoneg = bp->autoneg;
2560 advertising = bp->advertising;
2561
2562 bp->autoneg = AUTONEG_SPEED;
2563 bp->advertising = ADVERTISED_10baseT_Half |
2564 ADVERTISED_10baseT_Full |
2565 ADVERTISED_100baseT_Half |
2566 ADVERTISED_100baseT_Full |
2567 ADVERTISED_Autoneg;
2568
2569 bnx2_setup_copper_phy(bp);
2570
2571 bp->autoneg = autoneg;
2572 bp->advertising = advertising;
2573
2574 bnx2_set_mac_addr(bp);
2575
2576 val = REG_RD(bp, BNX2_EMAC_MODE);
2577
2578 /* Enable port mode. */
2579 val &= ~BNX2_EMAC_MODE_PORT;
2580 val |= BNX2_EMAC_MODE_PORT_MII |
2581 BNX2_EMAC_MODE_MPKT_RCVD |
2582 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002583 BNX2_EMAC_MODE_MPKT;
2584
2585 REG_WR(bp, BNX2_EMAC_MODE, val);
2586
2587 /* receive all multicast */
2588 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2589 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2590 0xffffffff);
2591 }
2592 REG_WR(bp, BNX2_EMAC_RX_MODE,
2593 BNX2_EMAC_RX_MODE_SORT_MODE);
2594
2595 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2596 BNX2_RPM_SORT_USER0_MC_EN;
2597 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2598 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2599 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2600 BNX2_RPM_SORT_USER0_ENA);
2601
2602 /* Need to enable EMAC and RPM for WOL. */
2603 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2605 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2606 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2607
2608 val = REG_RD(bp, BNX2_RPM_CONFIG);
2609 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2610 REG_WR(bp, BNX2_RPM_CONFIG, val);
2611
2612 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2613 }
2614 else {
2615 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2616 }
2617
Michael Chandda1e392006-01-23 16:08:14 -08002618 if (!(bp->flags & NO_WOL_FLAG))
2619 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002620
2621 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2622 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2623 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2624
2625 if (bp->wol)
2626 pmcsr |= 3;
2627 }
2628 else {
2629 pmcsr |= 3;
2630 }
2631 if (bp->wol) {
2632 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2633 }
2634 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2635 pmcsr);
2636
2637 /* No more memory access after this point until
2638 * device is brought back to D0.
2639 */
2640 udelay(50);
2641 break;
2642 }
2643 default:
2644 return -EINVAL;
2645 }
2646 return 0;
2647}
2648
2649static int
2650bnx2_acquire_nvram_lock(struct bnx2 *bp)
2651{
2652 u32 val;
2653 int j;
2654
2655 /* Request access to the flash interface. */
2656 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2657 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2658 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2659 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2660 break;
2661
2662 udelay(5);
2663 }
2664
2665 if (j >= NVRAM_TIMEOUT_COUNT)
2666 return -EBUSY;
2667
2668 return 0;
2669}
2670
2671static int
2672bnx2_release_nvram_lock(struct bnx2 *bp)
2673{
2674 int j;
2675 u32 val;
2676
2677 /* Relinquish nvram interface. */
2678 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2679
2680 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2681 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2682 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2683 break;
2684
2685 udelay(5);
2686 }
2687
2688 if (j >= NVRAM_TIMEOUT_COUNT)
2689 return -EBUSY;
2690
2691 return 0;
2692}
2693
2694
2695static int
2696bnx2_enable_nvram_write(struct bnx2 *bp)
2697{
2698 u32 val;
2699
2700 val = REG_RD(bp, BNX2_MISC_CFG);
2701 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2702
2703 if (!bp->flash_info->buffered) {
2704 int j;
2705
2706 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2707 REG_WR(bp, BNX2_NVM_COMMAND,
2708 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2709
2710 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2711 udelay(5);
2712
2713 val = REG_RD(bp, BNX2_NVM_COMMAND);
2714 if (val & BNX2_NVM_COMMAND_DONE)
2715 break;
2716 }
2717
2718 if (j >= NVRAM_TIMEOUT_COUNT)
2719 return -EBUSY;
2720 }
2721 return 0;
2722}
2723
2724static void
2725bnx2_disable_nvram_write(struct bnx2 *bp)
2726{
2727 u32 val;
2728
2729 val = REG_RD(bp, BNX2_MISC_CFG);
2730 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2731}
2732
2733
2734static void
2735bnx2_enable_nvram_access(struct bnx2 *bp)
2736{
2737 u32 val;
2738
2739 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2740 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002741 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002742 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2743}
2744
2745static void
2746bnx2_disable_nvram_access(struct bnx2 *bp)
2747{
2748 u32 val;
2749
2750 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2751 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002752 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002753 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2754 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2755}
2756
2757static int
2758bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2759{
2760 u32 cmd;
2761 int j;
2762
2763 if (bp->flash_info->buffered)
2764 /* Buffered flash, no erase needed */
2765 return 0;
2766
2767 /* Build an erase command */
2768 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2769 BNX2_NVM_COMMAND_DOIT;
2770
2771 /* Need to clear DONE bit separately. */
2772 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2773
2774 /* Address of the NVRAM to read from. */
2775 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2776
2777 /* Issue an erase command. */
2778 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2779
2780 /* Wait for completion. */
2781 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2782 u32 val;
2783
2784 udelay(5);
2785
2786 val = REG_RD(bp, BNX2_NVM_COMMAND);
2787 if (val & BNX2_NVM_COMMAND_DONE)
2788 break;
2789 }
2790
2791 if (j >= NVRAM_TIMEOUT_COUNT)
2792 return -EBUSY;
2793
2794 return 0;
2795}
2796
2797static int
2798bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2799{
2800 u32 cmd;
2801 int j;
2802
2803 /* Build the command word. */
2804 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2805
2806 /* Calculate an offset of a buffered flash. */
2807 if (bp->flash_info->buffered) {
2808 offset = ((offset / bp->flash_info->page_size) <<
2809 bp->flash_info->page_bits) +
2810 (offset % bp->flash_info->page_size);
2811 }
2812
2813 /* Need to clear DONE bit separately. */
2814 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2815
2816 /* Address of the NVRAM to read from. */
2817 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2818
2819 /* Issue a read command. */
2820 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2821
2822 /* Wait for completion. */
2823 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2824 u32 val;
2825
2826 udelay(5);
2827
2828 val = REG_RD(bp, BNX2_NVM_COMMAND);
2829 if (val & BNX2_NVM_COMMAND_DONE) {
2830 val = REG_RD(bp, BNX2_NVM_READ);
2831
2832 val = be32_to_cpu(val);
2833 memcpy(ret_val, &val, 4);
2834 break;
2835 }
2836 }
2837 if (j >= NVRAM_TIMEOUT_COUNT)
2838 return -EBUSY;
2839
2840 return 0;
2841}
2842
2843
2844static int
2845bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2846{
2847 u32 cmd, val32;
2848 int j;
2849
2850 /* Build the command word. */
2851 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2852
2853 /* Calculate an offset of a buffered flash. */
2854 if (bp->flash_info->buffered) {
2855 offset = ((offset / bp->flash_info->page_size) <<
2856 bp->flash_info->page_bits) +
2857 (offset % bp->flash_info->page_size);
2858 }
2859
2860 /* Need to clear DONE bit separately. */
2861 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2862
2863 memcpy(&val32, val, 4);
2864 val32 = cpu_to_be32(val32);
2865
2866 /* Write the data. */
2867 REG_WR(bp, BNX2_NVM_WRITE, val32);
2868
2869 /* Address of the NVRAM to write to. */
2870 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2871
2872 /* Issue the write command. */
2873 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2874
2875 /* Wait for completion. */
2876 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2877 udelay(5);
2878
2879 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2880 break;
2881 }
2882 if (j >= NVRAM_TIMEOUT_COUNT)
2883 return -EBUSY;
2884
2885 return 0;
2886}
2887
2888static int
2889bnx2_init_nvram(struct bnx2 *bp)
2890{
2891 u32 val;
2892 int j, entry_count, rc;
2893 struct flash_spec *flash;
2894
2895 /* Determine the selected interface. */
2896 val = REG_RD(bp, BNX2_NVM_CFG1);
2897
2898 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2899
2900 rc = 0;
2901 if (val & 0x40000000) {
2902
2903 /* Flash interface has been reconfigured */
2904 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002905 j++, flash++) {
2906 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2907 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002908 bp->flash_info = flash;
2909 break;
2910 }
2911 }
2912 }
2913 else {
Michael Chan37137702005-11-04 08:49:17 -08002914 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002915 /* Not yet been reconfigured */
2916
Michael Chan37137702005-11-04 08:49:17 -08002917 if (val & (1 << 23))
2918 mask = FLASH_BACKUP_STRAP_MASK;
2919 else
2920 mask = FLASH_STRAP_MASK;
2921
Michael Chanb6016b72005-05-26 13:03:09 -07002922 for (j = 0, flash = &flash_table[0]; j < entry_count;
2923 j++, flash++) {
2924
Michael Chan37137702005-11-04 08:49:17 -08002925 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002926 bp->flash_info = flash;
2927
2928 /* Request access to the flash interface. */
2929 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2930 return rc;
2931
2932 /* Enable access to flash interface */
2933 bnx2_enable_nvram_access(bp);
2934
2935 /* Reconfigure the flash interface */
2936 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2937 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2938 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2939 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2940
2941 /* Disable access to flash interface */
2942 bnx2_disable_nvram_access(bp);
2943 bnx2_release_nvram_lock(bp);
2944
2945 break;
2946 }
2947 }
2948 } /* if (val & 0x40000000) */
2949
2950 if (j == entry_count) {
2951 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002952 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002953 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002954 }
2955
Michael Chan1122db72006-01-23 16:11:42 -08002956 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2957 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2958 if (val)
2959 bp->flash_size = val;
2960 else
2961 bp->flash_size = bp->flash_info->total_size;
2962
Michael Chanb6016b72005-05-26 13:03:09 -07002963 return rc;
2964}
2965
2966static int
2967bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2968 int buf_size)
2969{
2970 int rc = 0;
2971 u32 cmd_flags, offset32, len32, extra;
2972
2973 if (buf_size == 0)
2974 return 0;
2975
2976 /* Request access to the flash interface. */
2977 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2978 return rc;
2979
2980 /* Enable access to flash interface */
2981 bnx2_enable_nvram_access(bp);
2982
2983 len32 = buf_size;
2984 offset32 = offset;
2985 extra = 0;
2986
2987 cmd_flags = 0;
2988
2989 if (offset32 & 3) {
2990 u8 buf[4];
2991 u32 pre_len;
2992
2993 offset32 &= ~3;
2994 pre_len = 4 - (offset & 3);
2995
2996 if (pre_len >= len32) {
2997 pre_len = len32;
2998 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2999 BNX2_NVM_COMMAND_LAST;
3000 }
3001 else {
3002 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3003 }
3004
3005 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3006
3007 if (rc)
3008 return rc;
3009
3010 memcpy(ret_buf, buf + (offset & 3), pre_len);
3011
3012 offset32 += 4;
3013 ret_buf += pre_len;
3014 len32 -= pre_len;
3015 }
3016 if (len32 & 3) {
3017 extra = 4 - (len32 & 3);
3018 len32 = (len32 + 4) & ~3;
3019 }
3020
3021 if (len32 == 4) {
3022 u8 buf[4];
3023
3024 if (cmd_flags)
3025 cmd_flags = BNX2_NVM_COMMAND_LAST;
3026 else
3027 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3028 BNX2_NVM_COMMAND_LAST;
3029
3030 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3031
3032 memcpy(ret_buf, buf, 4 - extra);
3033 }
3034 else if (len32 > 0) {
3035 u8 buf[4];
3036
3037 /* Read the first word. */
3038 if (cmd_flags)
3039 cmd_flags = 0;
3040 else
3041 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3042
3043 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3044
3045 /* Advance to the next dword. */
3046 offset32 += 4;
3047 ret_buf += 4;
3048 len32 -= 4;
3049
3050 while (len32 > 4 && rc == 0) {
3051 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3052
3053 /* Advance to the next dword. */
3054 offset32 += 4;
3055 ret_buf += 4;
3056 len32 -= 4;
3057 }
3058
3059 if (rc)
3060 return rc;
3061
3062 cmd_flags = BNX2_NVM_COMMAND_LAST;
3063 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3064
3065 memcpy(ret_buf, buf, 4 - extra);
3066 }
3067
3068 /* Disable access to flash interface */
3069 bnx2_disable_nvram_access(bp);
3070
3071 bnx2_release_nvram_lock(bp);
3072
3073 return rc;
3074}
3075
3076static int
3077bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3078 int buf_size)
3079{
3080 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07003081 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003082 int rc = 0;
3083 int align_start, align_end;
3084
3085 buf = data_buf;
3086 offset32 = offset;
3087 len32 = buf_size;
3088 align_start = align_end = 0;
3089
3090 if ((align_start = (offset32 & 3))) {
3091 offset32 &= ~3;
3092 len32 += align_start;
3093 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3094 return rc;
3095 }
3096
3097 if (len32 & 3) {
3098 if ((len32 > 4) || !align_start) {
3099 align_end = 4 - (len32 & 3);
3100 len32 += align_end;
3101 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3102 end, 4))) {
3103 return rc;
3104 }
3105 }
3106 }
3107
3108 if (align_start || align_end) {
3109 buf = kmalloc(len32, GFP_KERNEL);
3110 if (buf == 0)
3111 return -ENOMEM;
3112 if (align_start) {
3113 memcpy(buf, start, 4);
3114 }
3115 if (align_end) {
3116 memcpy(buf + len32 - 4, end, 4);
3117 }
3118 memcpy(buf + align_start, data_buf, buf_size);
3119 }
3120
Michael Chanae181bc2006-05-22 16:39:20 -07003121 if (bp->flash_info->buffered == 0) {
3122 flash_buffer = kmalloc(264, GFP_KERNEL);
3123 if (flash_buffer == NULL) {
3124 rc = -ENOMEM;
3125 goto nvram_write_end;
3126 }
3127 }
3128
Michael Chanb6016b72005-05-26 13:03:09 -07003129 written = 0;
3130 while ((written < len32) && (rc == 0)) {
3131 u32 page_start, page_end, data_start, data_end;
3132 u32 addr, cmd_flags;
3133 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003134
3135 /* Find the page_start addr */
3136 page_start = offset32 + written;
3137 page_start -= (page_start % bp->flash_info->page_size);
3138 /* Find the page_end addr */
3139 page_end = page_start + bp->flash_info->page_size;
3140 /* Find the data_start addr */
3141 data_start = (written == 0) ? offset32 : page_start;
3142 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003143 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003144 (offset32 + len32) : page_end;
3145
3146 /* Request access to the flash interface. */
3147 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3148 goto nvram_write_end;
3149
3150 /* Enable access to flash interface */
3151 bnx2_enable_nvram_access(bp);
3152
3153 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3154 if (bp->flash_info->buffered == 0) {
3155 int j;
3156
3157 /* Read the whole page into the buffer
3158 * (non-buffer flash only) */
3159 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3160 if (j == (bp->flash_info->page_size - 4)) {
3161 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3162 }
3163 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003164 page_start + j,
3165 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003166 cmd_flags);
3167
3168 if (rc)
3169 goto nvram_write_end;
3170
3171 cmd_flags = 0;
3172 }
3173 }
3174
3175 /* Enable writes to flash interface (unlock write-protect) */
3176 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3177 goto nvram_write_end;
3178
3179 /* Erase the page */
3180 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3181 goto nvram_write_end;
3182
3183 /* Re-enable the write again for the actual write */
3184 bnx2_enable_nvram_write(bp);
3185
3186 /* Loop to write back the buffer data from page_start to
3187 * data_start */
3188 i = 0;
3189 if (bp->flash_info->buffered == 0) {
3190 for (addr = page_start; addr < data_start;
3191 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003192
Michael Chanb6016b72005-05-26 13:03:09 -07003193 rc = bnx2_nvram_write_dword(bp, addr,
3194 &flash_buffer[i], cmd_flags);
3195
3196 if (rc != 0)
3197 goto nvram_write_end;
3198
3199 cmd_flags = 0;
3200 }
3201 }
3202
3203 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003204 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003205 if ((addr == page_end - 4) ||
3206 ((bp->flash_info->buffered) &&
3207 (addr == data_end - 4))) {
3208
3209 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3210 }
3211 rc = bnx2_nvram_write_dword(bp, addr, buf,
3212 cmd_flags);
3213
3214 if (rc != 0)
3215 goto nvram_write_end;
3216
3217 cmd_flags = 0;
3218 buf += 4;
3219 }
3220
3221 /* Loop to write back the buffer data from data_end
3222 * to page_end */
3223 if (bp->flash_info->buffered == 0) {
3224 for (addr = data_end; addr < page_end;
3225 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003226
Michael Chanb6016b72005-05-26 13:03:09 -07003227 if (addr == page_end-4) {
3228 cmd_flags = BNX2_NVM_COMMAND_LAST;
3229 }
3230 rc = bnx2_nvram_write_dword(bp, addr,
3231 &flash_buffer[i], cmd_flags);
3232
3233 if (rc != 0)
3234 goto nvram_write_end;
3235
3236 cmd_flags = 0;
3237 }
3238 }
3239
3240 /* Disable writes to flash interface (lock write-protect) */
3241 bnx2_disable_nvram_write(bp);
3242
3243 /* Disable access to flash interface */
3244 bnx2_disable_nvram_access(bp);
3245 bnx2_release_nvram_lock(bp);
3246
3247 /* Increment written */
3248 written += data_end - data_start;
3249 }
3250
3251nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003252 if (bp->flash_info->buffered == 0)
3253 kfree(flash_buffer);
3254
Michael Chanb6016b72005-05-26 13:03:09 -07003255 if (align_start || align_end)
3256 kfree(buf);
3257 return rc;
3258}
3259
3260static int
3261bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3262{
3263 u32 val;
3264 int i, rc = 0;
3265
3266 /* Wait for the current PCI transaction to complete before
3267 * issuing a reset. */
3268 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3269 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3270 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3271 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3272 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3273 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3274 udelay(5);
3275
Michael Chanb090ae22006-01-23 16:07:10 -08003276 /* Wait for the firmware to tell us it is ok to issue a reset. */
3277 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3278
Michael Chanb6016b72005-05-26 13:03:09 -07003279 /* Deposit a driver reset signature so the firmware knows that
3280 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003281 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003282 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3283
Michael Chanb6016b72005-05-26 13:03:09 -07003284 /* Do a dummy read to force the chip to complete all current transaction
3285 * before we issue a reset. */
3286 val = REG_RD(bp, BNX2_MISC_ID);
3287
Michael Chan234754d2006-11-19 14:11:41 -08003288 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3289 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3290 REG_RD(bp, BNX2_MISC_COMMAND);
3291 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003292
Michael Chan234754d2006-11-19 14:11:41 -08003293 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3294 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003295
Michael Chan234754d2006-11-19 14:11:41 -08003296 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003297
Michael Chan234754d2006-11-19 14:11:41 -08003298 } else {
3299 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3300 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3301 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3302
3303 /* Chip reset. */
3304 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3305
3306 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3307 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3308 current->state = TASK_UNINTERRUPTIBLE;
3309 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003310 }
Michael Chanb6016b72005-05-26 13:03:09 -07003311
Michael Chan234754d2006-11-19 14:11:41 -08003312 /* Reset takes approximate 30 usec */
3313 for (i = 0; i < 10; i++) {
3314 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3315 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3316 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3317 break;
3318 udelay(10);
3319 }
3320
3321 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3322 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3323 printk(KERN_ERR PFX "Chip reset did not complete\n");
3324 return -EBUSY;
3325 }
Michael Chanb6016b72005-05-26 13:03:09 -07003326 }
3327
3328 /* Make sure byte swapping is properly configured. */
3329 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3330 if (val != 0x01020304) {
3331 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3332 return -ENODEV;
3333 }
3334
Michael Chanb6016b72005-05-26 13:03:09 -07003335 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003336 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3337 if (rc)
3338 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003339
3340 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3341 /* Adjust the voltage regular to two steps lower. The default
3342 * of this register is 0x0000000e. */
3343 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3344
3345 /* Remove bad rbuf memory from the free pool. */
3346 rc = bnx2_alloc_bad_rbuf(bp);
3347 }
3348
3349 return rc;
3350}
3351
3352static int
3353bnx2_init_chip(struct bnx2 *bp)
3354{
3355 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003356 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003357
3358 /* Make sure the interrupt is not active. */
3359 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3360
3361 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3362 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3363#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003364 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003365#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003366 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003367 DMA_READ_CHANS << 12 |
3368 DMA_WRITE_CHANS << 16;
3369
3370 val |= (0x2 << 20) | (1 << 11);
3371
Michael Chandda1e392006-01-23 16:08:14 -08003372 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003373 val |= (1 << 23);
3374
3375 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3376 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3377 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3378
3379 REG_WR(bp, BNX2_DMA_CONFIG, val);
3380
3381 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3382 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3383 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3384 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3385 }
3386
3387 if (bp->flags & PCIX_FLAG) {
3388 u16 val16;
3389
3390 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3391 &val16);
3392 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3393 val16 & ~PCI_X_CMD_ERO);
3394 }
3395
3396 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3397 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3398 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3399 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3400
3401 /* Initialize context mapping and zero out the quick contexts. The
3402 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003403 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3404 bnx2_init_5709_context(bp);
3405 else
3406 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003407
Michael Chanfba9fe92006-06-12 22:21:25 -07003408 if ((rc = bnx2_init_cpus(bp)) != 0)
3409 return rc;
3410
Michael Chanb6016b72005-05-26 13:03:09 -07003411 bnx2_init_nvram(bp);
3412
3413 bnx2_set_mac_addr(bp);
3414
3415 val = REG_RD(bp, BNX2_MQ_CONFIG);
3416 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3417 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3418 REG_WR(bp, BNX2_MQ_CONFIG, val);
3419
3420 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3421 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3422 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3423
3424 val = (BCM_PAGE_BITS - 8) << 24;
3425 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3426
3427 /* Configure page size. */
3428 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3429 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3430 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3431 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3432
3433 val = bp->mac_addr[0] +
3434 (bp->mac_addr[1] << 8) +
3435 (bp->mac_addr[2] << 16) +
3436 bp->mac_addr[3] +
3437 (bp->mac_addr[4] << 8) +
3438 (bp->mac_addr[5] << 16);
3439 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3440
3441 /* Program the MTU. Also include 4 bytes for CRC32. */
3442 val = bp->dev->mtu + ETH_HLEN + 4;
3443 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3444 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3445 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3446
3447 bp->last_status_idx = 0;
3448 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3449
3450 /* Set up how to generate a link change interrupt. */
3451 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3452
3453 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3454 (u64) bp->status_blk_mapping & 0xffffffff);
3455 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3456
3457 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3458 (u64) bp->stats_blk_mapping & 0xffffffff);
3459 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3460 (u64) bp->stats_blk_mapping >> 32);
3461
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003462 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003463 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3464
3465 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3466 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3467
3468 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3469 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3470
3471 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3472
3473 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3474
3475 REG_WR(bp, BNX2_HC_COM_TICKS,
3476 (bp->com_ticks_int << 16) | bp->com_ticks);
3477
3478 REG_WR(bp, BNX2_HC_CMD_TICKS,
3479 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3480
3481 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3482 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3483
3484 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3485 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3486 else {
3487 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3488 BNX2_HC_CONFIG_TX_TMR_MODE |
3489 BNX2_HC_CONFIG_COLLECT_STATS);
3490 }
3491
3492 /* Clear internal stats counters. */
3493 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3494
3495 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3496
Michael Chane29054f2006-01-23 16:06:06 -08003497 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3498 BNX2_PORT_FEATURE_ASF_ENABLED)
3499 bp->flags |= ASF_ENABLE_FLAG;
3500
Michael Chanb6016b72005-05-26 13:03:09 -07003501 /* Initialize the receive filter. */
3502 bnx2_set_rx_mode(bp->dev);
3503
Michael Chanb090ae22006-01-23 16:07:10 -08003504 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3505 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003506
3507 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3508 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3509
3510 udelay(20);
3511
Michael Chanbf5295b2006-03-23 01:11:56 -08003512 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3513
Michael Chanb090ae22006-01-23 16:07:10 -08003514 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003515}
3516
Michael Chan59b47d82006-11-19 14:10:45 -08003517static void
3518bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3519{
3520 u32 val, offset0, offset1, offset2, offset3;
3521
3522 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3523 offset0 = BNX2_L2CTX_TYPE_XI;
3524 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3525 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3526 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3527 } else {
3528 offset0 = BNX2_L2CTX_TYPE;
3529 offset1 = BNX2_L2CTX_CMD_TYPE;
3530 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3531 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3532 }
3533 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3534 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3535
3536 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3537 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3538
3539 val = (u64) bp->tx_desc_mapping >> 32;
3540 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3541
3542 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3544}
Michael Chanb6016b72005-05-26 13:03:09 -07003545
3546static void
3547bnx2_init_tx_ring(struct bnx2 *bp)
3548{
3549 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003550 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003551
Michael Chan2f8af122006-08-15 01:39:10 -07003552 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3553
Michael Chanb6016b72005-05-26 13:03:09 -07003554 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003555
Michael Chanb6016b72005-05-26 13:03:09 -07003556 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3557 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3558
3559 bp->tx_prod = 0;
3560 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003561 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003562 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003563
Michael Chan59b47d82006-11-19 14:10:45 -08003564 cid = TX_CID;
3565 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3566 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003567
Michael Chan59b47d82006-11-19 14:10:45 -08003568 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003569}
3570
3571static void
3572bnx2_init_rx_ring(struct bnx2 *bp)
3573{
3574 struct rx_bd *rxbd;
3575 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003576 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003577 u32 val;
3578
3579 /* 8 for CRC and VLAN */
3580 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003581 /* hw alignment */
3582 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003583
3584 ring_prod = prod = bp->rx_prod = 0;
3585 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003586 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003587 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003588
Michael Chan13daffa2006-03-20 17:49:20 -08003589 for (i = 0; i < bp->rx_max_ring; i++) {
3590 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003591
Michael Chan13daffa2006-03-20 17:49:20 -08003592 rxbd = &bp->rx_desc_ring[i][0];
3593 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3594 rxbd->rx_bd_len = bp->rx_buf_use_size;
3595 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3596 }
3597 if (i == (bp->rx_max_ring - 1))
3598 j = 0;
3599 else
3600 j = i + 1;
3601 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3602 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3603 0xffffffff;
3604 }
Michael Chanb6016b72005-05-26 13:03:09 -07003605
3606 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3607 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3608 val |= 0x02 << 8;
3609 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3610
Michael Chan13daffa2006-03-20 17:49:20 -08003611 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003612 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3613
Michael Chan13daffa2006-03-20 17:49:20 -08003614 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003615 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3616
Michael Chan236b6392006-03-20 17:49:02 -08003617 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003618 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3619 break;
3620 }
3621 prod = NEXT_RX_BD(prod);
3622 ring_prod = RX_RING_IDX(prod);
3623 }
3624 bp->rx_prod = prod;
3625
3626 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3627
3628 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3629}
3630
3631static void
Michael Chan13daffa2006-03-20 17:49:20 -08003632bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3633{
3634 u32 num_rings, max;
3635
3636 bp->rx_ring_size = size;
3637 num_rings = 1;
3638 while (size > MAX_RX_DESC_CNT) {
3639 size -= MAX_RX_DESC_CNT;
3640 num_rings++;
3641 }
3642 /* round to next power of 2 */
3643 max = MAX_RX_RINGS;
3644 while ((max & num_rings) == 0)
3645 max >>= 1;
3646
3647 if (num_rings != max)
3648 max <<= 1;
3649
3650 bp->rx_max_ring = max;
3651 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3652}
3653
3654static void
Michael Chanb6016b72005-05-26 13:03:09 -07003655bnx2_free_tx_skbs(struct bnx2 *bp)
3656{
3657 int i;
3658
3659 if (bp->tx_buf_ring == NULL)
3660 return;
3661
3662 for (i = 0; i < TX_DESC_CNT; ) {
3663 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3664 struct sk_buff *skb = tx_buf->skb;
3665 int j, last;
3666
3667 if (skb == NULL) {
3668 i++;
3669 continue;
3670 }
3671
3672 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3673 skb_headlen(skb), PCI_DMA_TODEVICE);
3674
3675 tx_buf->skb = NULL;
3676
3677 last = skb_shinfo(skb)->nr_frags;
3678 for (j = 0; j < last; j++) {
3679 tx_buf = &bp->tx_buf_ring[i + j + 1];
3680 pci_unmap_page(bp->pdev,
3681 pci_unmap_addr(tx_buf, mapping),
3682 skb_shinfo(skb)->frags[j].size,
3683 PCI_DMA_TODEVICE);
3684 }
Michael Chan745720e2006-06-29 12:37:41 -07003685 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003686 i += j + 1;
3687 }
3688
3689}
3690
3691static void
3692bnx2_free_rx_skbs(struct bnx2 *bp)
3693{
3694 int i;
3695
3696 if (bp->rx_buf_ring == NULL)
3697 return;
3698
Michael Chan13daffa2006-03-20 17:49:20 -08003699 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003700 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3701 struct sk_buff *skb = rx_buf->skb;
3702
Michael Chan05d0f1c2005-11-04 08:53:48 -08003703 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003704 continue;
3705
3706 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3707 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3708
3709 rx_buf->skb = NULL;
3710
Michael Chan745720e2006-06-29 12:37:41 -07003711 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003712 }
3713}
3714
3715static void
3716bnx2_free_skbs(struct bnx2 *bp)
3717{
3718 bnx2_free_tx_skbs(bp);
3719 bnx2_free_rx_skbs(bp);
3720}
3721
3722static int
3723bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3724{
3725 int rc;
3726
3727 rc = bnx2_reset_chip(bp, reset_code);
3728 bnx2_free_skbs(bp);
3729 if (rc)
3730 return rc;
3731
Michael Chanfba9fe92006-06-12 22:21:25 -07003732 if ((rc = bnx2_init_chip(bp)) != 0)
3733 return rc;
3734
Michael Chanb6016b72005-05-26 13:03:09 -07003735 bnx2_init_tx_ring(bp);
3736 bnx2_init_rx_ring(bp);
3737 return 0;
3738}
3739
3740static int
3741bnx2_init_nic(struct bnx2 *bp)
3742{
3743 int rc;
3744
3745 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3746 return rc;
3747
Michael Chan80be4432006-11-19 14:07:28 -08003748 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003749 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003750 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003751 bnx2_set_link(bp);
3752 return 0;
3753}
3754
3755static int
3756bnx2_test_registers(struct bnx2 *bp)
3757{
3758 int ret;
3759 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003760 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003761 u16 offset;
3762 u16 flags;
3763 u32 rw_mask;
3764 u32 ro_mask;
3765 } reg_tbl[] = {
3766 { 0x006c, 0, 0x00000000, 0x0000003f },
3767 { 0x0090, 0, 0xffffffff, 0x00000000 },
3768 { 0x0094, 0, 0x00000000, 0x00000000 },
3769
3770 { 0x0404, 0, 0x00003f00, 0x00000000 },
3771 { 0x0418, 0, 0x00000000, 0xffffffff },
3772 { 0x041c, 0, 0x00000000, 0xffffffff },
3773 { 0x0420, 0, 0x00000000, 0x80ffffff },
3774 { 0x0424, 0, 0x00000000, 0x00000000 },
3775 { 0x0428, 0, 0x00000000, 0x00000001 },
3776 { 0x0450, 0, 0x00000000, 0x0000ffff },
3777 { 0x0454, 0, 0x00000000, 0xffffffff },
3778 { 0x0458, 0, 0x00000000, 0xffffffff },
3779
3780 { 0x0808, 0, 0x00000000, 0xffffffff },
3781 { 0x0854, 0, 0x00000000, 0xffffffff },
3782 { 0x0868, 0, 0x00000000, 0x77777777 },
3783 { 0x086c, 0, 0x00000000, 0x77777777 },
3784 { 0x0870, 0, 0x00000000, 0x77777777 },
3785 { 0x0874, 0, 0x00000000, 0x77777777 },
3786
3787 { 0x0c00, 0, 0x00000000, 0x00000001 },
3788 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3789 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003790
3791 { 0x1000, 0, 0x00000000, 0x00000001 },
3792 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003793
3794 { 0x1408, 0, 0x01c00800, 0x00000000 },
3795 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3796 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003797 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003798 { 0x14b0, 0, 0x00000002, 0x00000001 },
3799 { 0x14b8, 0, 0x00000000, 0x00000000 },
3800 { 0x14c0, 0, 0x00000000, 0x00000009 },
3801 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3802 { 0x14cc, 0, 0x00000000, 0x00000001 },
3803 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003804
3805 { 0x1800, 0, 0x00000000, 0x00000001 },
3806 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003807
3808 { 0x2800, 0, 0x00000000, 0x00000001 },
3809 { 0x2804, 0, 0x00000000, 0x00003f01 },
3810 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3811 { 0x2810, 0, 0xffff0000, 0x00000000 },
3812 { 0x2814, 0, 0xffff0000, 0x00000000 },
3813 { 0x2818, 0, 0xffff0000, 0x00000000 },
3814 { 0x281c, 0, 0xffff0000, 0x00000000 },
3815 { 0x2834, 0, 0xffffffff, 0x00000000 },
3816 { 0x2840, 0, 0x00000000, 0xffffffff },
3817 { 0x2844, 0, 0x00000000, 0xffffffff },
3818 { 0x2848, 0, 0xffffffff, 0x00000000 },
3819 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3820
3821 { 0x2c00, 0, 0x00000000, 0x00000011 },
3822 { 0x2c04, 0, 0x00000000, 0x00030007 },
3823
Michael Chanb6016b72005-05-26 13:03:09 -07003824 { 0x3c00, 0, 0x00000000, 0x00000001 },
3825 { 0x3c04, 0, 0x00000000, 0x00070000 },
3826 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3827 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3828 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3829 { 0x3c14, 0, 0x00000000, 0xffffffff },
3830 { 0x3c18, 0, 0x00000000, 0xffffffff },
3831 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3832 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003833
3834 { 0x5004, 0, 0x00000000, 0x0000007f },
3835 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3836 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3837
Michael Chanb6016b72005-05-26 13:03:09 -07003838 { 0x5c00, 0, 0x00000000, 0x00000001 },
3839 { 0x5c04, 0, 0x00000000, 0x0003000f },
3840 { 0x5c08, 0, 0x00000003, 0x00000000 },
3841 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3842 { 0x5c10, 0, 0x00000000, 0xffffffff },
3843 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3844 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3845 { 0x5c88, 0, 0x00000000, 0x00077373 },
3846 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3847
3848 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3849 { 0x680c, 0, 0xffffffff, 0x00000000 },
3850 { 0x6810, 0, 0xffffffff, 0x00000000 },
3851 { 0x6814, 0, 0xffffffff, 0x00000000 },
3852 { 0x6818, 0, 0xffffffff, 0x00000000 },
3853 { 0x681c, 0, 0xffffffff, 0x00000000 },
3854 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3855 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3856 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3857 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3858 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3859 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3860 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3861 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3862 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3863 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3864 { 0x684c, 0, 0xffffffff, 0x00000000 },
3865 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3866 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3867 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3868 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3869 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3870 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3871
3872 { 0xffff, 0, 0x00000000, 0x00000000 },
3873 };
3874
3875 ret = 0;
3876 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3877 u32 offset, rw_mask, ro_mask, save_val, val;
3878
3879 offset = (u32) reg_tbl[i].offset;
3880 rw_mask = reg_tbl[i].rw_mask;
3881 ro_mask = reg_tbl[i].ro_mask;
3882
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003883 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003884
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003885 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003886
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003887 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003888 if ((val & rw_mask) != 0) {
3889 goto reg_test_err;
3890 }
3891
3892 if ((val & ro_mask) != (save_val & ro_mask)) {
3893 goto reg_test_err;
3894 }
3895
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003896 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003897
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003898 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003899 if ((val & rw_mask) != rw_mask) {
3900 goto reg_test_err;
3901 }
3902
3903 if ((val & ro_mask) != (save_val & ro_mask)) {
3904 goto reg_test_err;
3905 }
3906
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003907 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003908 continue;
3909
3910reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003911 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003912 ret = -ENODEV;
3913 break;
3914 }
3915 return ret;
3916}
3917
3918static int
3919bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3920{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003921 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003922 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3923 int i;
3924
3925 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3926 u32 offset;
3927
3928 for (offset = 0; offset < size; offset += 4) {
3929
3930 REG_WR_IND(bp, start + offset, test_pattern[i]);
3931
3932 if (REG_RD_IND(bp, start + offset) !=
3933 test_pattern[i]) {
3934 return -ENODEV;
3935 }
3936 }
3937 }
3938 return 0;
3939}
3940
3941static int
3942bnx2_test_memory(struct bnx2 *bp)
3943{
3944 int ret = 0;
3945 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003946 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003947 u32 offset;
3948 u32 len;
3949 } mem_tbl[] = {
3950 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003951 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003952 { 0xe0000, 0x4000 },
3953 { 0x120000, 0x4000 },
3954 { 0x1a0000, 0x4000 },
3955 { 0x160000, 0x4000 },
3956 { 0xffffffff, 0 },
3957 };
3958
3959 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3960 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3961 mem_tbl[i].len)) != 0) {
3962 return ret;
3963 }
3964 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003965
Michael Chanb6016b72005-05-26 13:03:09 -07003966 return ret;
3967}
3968
Michael Chanbc5a0692006-01-23 16:13:22 -08003969#define BNX2_MAC_LOOPBACK 0
3970#define BNX2_PHY_LOOPBACK 1
3971
Michael Chanb6016b72005-05-26 13:03:09 -07003972static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003973bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003974{
3975 unsigned int pkt_size, num_pkts, i;
3976 struct sk_buff *skb, *rx_skb;
3977 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003978 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003979 dma_addr_t map;
3980 struct tx_bd *txbd;
3981 struct sw_bd *rx_buf;
3982 struct l2_fhdr *rx_hdr;
3983 int ret = -ENODEV;
3984
Michael Chanbc5a0692006-01-23 16:13:22 -08003985 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3986 bp->loopback = MAC_LOOPBACK;
3987 bnx2_set_mac_loopback(bp);
3988 }
3989 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003990 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003991 bnx2_set_phy_loopback(bp);
3992 }
3993 else
3994 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003995
3996 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07003997 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003998 if (!skb)
3999 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004000 packet = skb_put(skb, pkt_size);
4001 memcpy(packet, bp->mac_addr, 6);
4002 memset(packet + 6, 0x0, 8);
4003 for (i = 14; i < pkt_size; i++)
4004 packet[i] = (unsigned char) (i & 0xff);
4005
4006 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4007 PCI_DMA_TODEVICE);
4008
Michael Chanbf5295b2006-03-23 01:11:56 -08004009 REG_WR(bp, BNX2_HC_COMMAND,
4010 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4011
Michael Chanb6016b72005-05-26 13:03:09 -07004012 REG_RD(bp, BNX2_HC_COMMAND);
4013
4014 udelay(5);
4015 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4016
Michael Chanb6016b72005-05-26 13:03:09 -07004017 num_pkts = 0;
4018
Michael Chanbc5a0692006-01-23 16:13:22 -08004019 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004020
4021 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4022 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4023 txbd->tx_bd_mss_nbytes = pkt_size;
4024 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4025
4026 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004027 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4028 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004029
Michael Chan234754d2006-11-19 14:11:41 -08004030 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4031 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004032
4033 udelay(100);
4034
Michael Chanbf5295b2006-03-23 01:11:56 -08004035 REG_WR(bp, BNX2_HC_COMMAND,
4036 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4037
Michael Chanb6016b72005-05-26 13:03:09 -07004038 REG_RD(bp, BNX2_HC_COMMAND);
4039
4040 udelay(5);
4041
4042 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004043 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004044
Michael Chanbc5a0692006-01-23 16:13:22 -08004045 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004046 goto loopback_test_done;
4047 }
4048
4049 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4050 if (rx_idx != rx_start_idx + num_pkts) {
4051 goto loopback_test_done;
4052 }
4053
4054 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4055 rx_skb = rx_buf->skb;
4056
4057 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4058 skb_reserve(rx_skb, bp->rx_offset);
4059
4060 pci_dma_sync_single_for_cpu(bp->pdev,
4061 pci_unmap_addr(rx_buf, mapping),
4062 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4063
Michael Chanade2bfe2006-01-23 16:09:51 -08004064 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004065 (L2_FHDR_ERRORS_BAD_CRC |
4066 L2_FHDR_ERRORS_PHY_DECODE |
4067 L2_FHDR_ERRORS_ALIGNMENT |
4068 L2_FHDR_ERRORS_TOO_SHORT |
4069 L2_FHDR_ERRORS_GIANT_FRAME)) {
4070
4071 goto loopback_test_done;
4072 }
4073
4074 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4075 goto loopback_test_done;
4076 }
4077
4078 for (i = 14; i < pkt_size; i++) {
4079 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4080 goto loopback_test_done;
4081 }
4082 }
4083
4084 ret = 0;
4085
4086loopback_test_done:
4087 bp->loopback = 0;
4088 return ret;
4089}
4090
Michael Chanbc5a0692006-01-23 16:13:22 -08004091#define BNX2_MAC_LOOPBACK_FAILED 1
4092#define BNX2_PHY_LOOPBACK_FAILED 2
4093#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4094 BNX2_PHY_LOOPBACK_FAILED)
4095
4096static int
4097bnx2_test_loopback(struct bnx2 *bp)
4098{
4099 int rc = 0;
4100
4101 if (!netif_running(bp->dev))
4102 return BNX2_LOOPBACK_FAILED;
4103
4104 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4105 spin_lock_bh(&bp->phy_lock);
4106 bnx2_init_phy(bp);
4107 spin_unlock_bh(&bp->phy_lock);
4108 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4109 rc |= BNX2_MAC_LOOPBACK_FAILED;
4110 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4111 rc |= BNX2_PHY_LOOPBACK_FAILED;
4112 return rc;
4113}
4114
Michael Chanb6016b72005-05-26 13:03:09 -07004115#define NVRAM_SIZE 0x200
4116#define CRC32_RESIDUAL 0xdebb20e3
4117
4118static int
4119bnx2_test_nvram(struct bnx2 *bp)
4120{
4121 u32 buf[NVRAM_SIZE / 4];
4122 u8 *data = (u8 *) buf;
4123 int rc = 0;
4124 u32 magic, csum;
4125
4126 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4127 goto test_nvram_done;
4128
4129 magic = be32_to_cpu(buf[0]);
4130 if (magic != 0x669955aa) {
4131 rc = -ENODEV;
4132 goto test_nvram_done;
4133 }
4134
4135 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4136 goto test_nvram_done;
4137
4138 csum = ether_crc_le(0x100, data);
4139 if (csum != CRC32_RESIDUAL) {
4140 rc = -ENODEV;
4141 goto test_nvram_done;
4142 }
4143
4144 csum = ether_crc_le(0x100, data + 0x100);
4145 if (csum != CRC32_RESIDUAL) {
4146 rc = -ENODEV;
4147 }
4148
4149test_nvram_done:
4150 return rc;
4151}
4152
4153static int
4154bnx2_test_link(struct bnx2 *bp)
4155{
4156 u32 bmsr;
4157
Michael Chanc770a652005-08-25 15:38:39 -07004158 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004159 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4160 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004161 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004162
Michael Chanb6016b72005-05-26 13:03:09 -07004163 if (bmsr & BMSR_LSTATUS) {
4164 return 0;
4165 }
4166 return -ENODEV;
4167}
4168
4169static int
4170bnx2_test_intr(struct bnx2 *bp)
4171{
4172 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004173 u16 status_idx;
4174
4175 if (!netif_running(bp->dev))
4176 return -ENODEV;
4177
4178 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4179
4180 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004181 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004182 REG_RD(bp, BNX2_HC_COMMAND);
4183
4184 for (i = 0; i < 10; i++) {
4185 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4186 status_idx) {
4187
4188 break;
4189 }
4190
4191 msleep_interruptible(10);
4192 }
4193 if (i < 10)
4194 return 0;
4195
4196 return -ENODEV;
4197}
4198
4199static void
Michael Chan48b01e22006-11-19 14:08:00 -08004200bnx2_5706_serdes_timer(struct bnx2 *bp)
4201{
4202 spin_lock(&bp->phy_lock);
4203 if (bp->serdes_an_pending)
4204 bp->serdes_an_pending--;
4205 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4206 u32 bmcr;
4207
4208 bp->current_interval = bp->timer_interval;
4209
4210 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4211
4212 if (bmcr & BMCR_ANENABLE) {
4213 u32 phy1, phy2;
4214
4215 bnx2_write_phy(bp, 0x1c, 0x7c00);
4216 bnx2_read_phy(bp, 0x1c, &phy1);
4217
4218 bnx2_write_phy(bp, 0x17, 0x0f01);
4219 bnx2_read_phy(bp, 0x15, &phy2);
4220 bnx2_write_phy(bp, 0x17, 0x0f01);
4221 bnx2_read_phy(bp, 0x15, &phy2);
4222
4223 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4224 !(phy2 & 0x20)) { /* no CONFIG */
4225
4226 bmcr &= ~BMCR_ANENABLE;
4227 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4228 bnx2_write_phy(bp, MII_BMCR, bmcr);
4229 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4230 }
4231 }
4232 }
4233 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4234 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4235 u32 phy2;
4236
4237 bnx2_write_phy(bp, 0x17, 0x0f01);
4238 bnx2_read_phy(bp, 0x15, &phy2);
4239 if (phy2 & 0x20) {
4240 u32 bmcr;
4241
4242 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4243 bmcr |= BMCR_ANENABLE;
4244 bnx2_write_phy(bp, MII_BMCR, bmcr);
4245
4246 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4247 }
4248 } else
4249 bp->current_interval = bp->timer_interval;
4250
4251 spin_unlock(&bp->phy_lock);
4252}
4253
4254static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004255bnx2_5708_serdes_timer(struct bnx2 *bp)
4256{
4257 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4258 bp->serdes_an_pending = 0;
4259 return;
4260 }
4261
4262 spin_lock(&bp->phy_lock);
4263 if (bp->serdes_an_pending)
4264 bp->serdes_an_pending--;
4265 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4266 u32 bmcr;
4267
4268 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4269
4270 if (bmcr & BMCR_ANENABLE) {
4271 bmcr &= ~BMCR_ANENABLE;
4272 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4273 bnx2_write_phy(bp, MII_BMCR, bmcr);
4274 bp->current_interval = SERDES_FORCED_TIMEOUT;
4275 } else {
4276 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4277 bmcr |= BMCR_ANENABLE;
4278 bnx2_write_phy(bp, MII_BMCR, bmcr);
4279 bp->serdes_an_pending = 2;
4280 bp->current_interval = bp->timer_interval;
4281 }
4282
4283 } else
4284 bp->current_interval = bp->timer_interval;
4285
4286 spin_unlock(&bp->phy_lock);
4287}
4288
4289static void
Michael Chanb6016b72005-05-26 13:03:09 -07004290bnx2_timer(unsigned long data)
4291{
4292 struct bnx2 *bp = (struct bnx2 *) data;
4293 u32 msg;
4294
Michael Chancd339a02005-08-25 15:35:24 -07004295 if (!netif_running(bp->dev))
4296 return;
4297
Michael Chanb6016b72005-05-26 13:03:09 -07004298 if (atomic_read(&bp->intr_sem) != 0)
4299 goto bnx2_restart_timer;
4300
4301 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004302 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004303
Michael Chancea94db2006-06-12 22:16:13 -07004304 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4305
Michael Chanf8dd0642006-11-19 14:08:29 -08004306 if (bp->phy_flags & PHY_SERDES_FLAG) {
4307 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4308 bnx2_5706_serdes_timer(bp);
4309 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4310 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004311 }
4312
4313bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004314 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004315}
4316
4317/* Called with rtnl_lock */
4318static int
4319bnx2_open(struct net_device *dev)
4320{
Michael Chan972ec0d2006-01-23 16:12:43 -08004321 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004322 int rc;
4323
Pavel Machek829ca9a2005-09-03 15:56:56 -07004324 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004325 bnx2_disable_int(bp);
4326
4327 rc = bnx2_alloc_mem(bp);
4328 if (rc)
4329 return rc;
4330
4331 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4332 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4333 !disable_msi) {
4334
4335 if (pci_enable_msi(bp->pdev) == 0) {
4336 bp->flags |= USING_MSI_FLAG;
4337 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4338 dev);
4339 }
4340 else {
4341 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004342 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004343 }
4344 }
4345 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004346 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004347 dev->name, dev);
4348 }
4349 if (rc) {
4350 bnx2_free_mem(bp);
4351 return rc;
4352 }
4353
4354 rc = bnx2_init_nic(bp);
4355
4356 if (rc) {
4357 free_irq(bp->pdev->irq, dev);
4358 if (bp->flags & USING_MSI_FLAG) {
4359 pci_disable_msi(bp->pdev);
4360 bp->flags &= ~USING_MSI_FLAG;
4361 }
4362 bnx2_free_skbs(bp);
4363 bnx2_free_mem(bp);
4364 return rc;
4365 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004366
Michael Chancd339a02005-08-25 15:35:24 -07004367 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004368
4369 atomic_set(&bp->intr_sem, 0);
4370
4371 bnx2_enable_int(bp);
4372
4373 if (bp->flags & USING_MSI_FLAG) {
4374 /* Test MSI to make sure it is working
4375 * If MSI test fails, go back to INTx mode
4376 */
4377 if (bnx2_test_intr(bp) != 0) {
4378 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4379 " using MSI, switching to INTx mode. Please"
4380 " report this failure to the PCI maintainer"
4381 " and include system chipset information.\n",
4382 bp->dev->name);
4383
4384 bnx2_disable_int(bp);
4385 free_irq(bp->pdev->irq, dev);
4386 pci_disable_msi(bp->pdev);
4387 bp->flags &= ~USING_MSI_FLAG;
4388
4389 rc = bnx2_init_nic(bp);
4390
4391 if (!rc) {
4392 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004393 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004394 }
4395 if (rc) {
4396 bnx2_free_skbs(bp);
4397 bnx2_free_mem(bp);
4398 del_timer_sync(&bp->timer);
4399 return rc;
4400 }
4401 bnx2_enable_int(bp);
4402 }
4403 }
4404 if (bp->flags & USING_MSI_FLAG) {
4405 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4406 }
4407
4408 netif_start_queue(dev);
4409
4410 return 0;
4411}
4412
4413static void
David Howellsc4028952006-11-22 14:57:56 +00004414bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004415{
David Howellsc4028952006-11-22 14:57:56 +00004416 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004417
Michael Chanafdc08b2005-08-25 15:34:29 -07004418 if (!netif_running(bp->dev))
4419 return;
4420
4421 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004422 bnx2_netif_stop(bp);
4423
4424 bnx2_init_nic(bp);
4425
4426 atomic_set(&bp->intr_sem, 1);
4427 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004428 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004429}
4430
4431static void
4432bnx2_tx_timeout(struct net_device *dev)
4433{
Michael Chan972ec0d2006-01-23 16:12:43 -08004434 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004435
4436 /* This allows the netif to be shutdown gracefully before resetting */
4437 schedule_work(&bp->reset_task);
4438}
4439
4440#ifdef BCM_VLAN
4441/* Called with rtnl_lock */
4442static void
4443bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4444{
Michael Chan972ec0d2006-01-23 16:12:43 -08004445 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004446
4447 bnx2_netif_stop(bp);
4448
4449 bp->vlgrp = vlgrp;
4450 bnx2_set_rx_mode(dev);
4451
4452 bnx2_netif_start(bp);
4453}
4454
4455/* Called with rtnl_lock */
4456static void
4457bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4458{
Michael Chan972ec0d2006-01-23 16:12:43 -08004459 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004460
4461 bnx2_netif_stop(bp);
4462
4463 if (bp->vlgrp)
4464 bp->vlgrp->vlan_devices[vid] = NULL;
4465 bnx2_set_rx_mode(dev);
4466
4467 bnx2_netif_start(bp);
4468}
4469#endif
4470
Herbert Xu932ff272006-06-09 12:20:56 -07004471/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004472 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4473 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004474 */
4475static int
4476bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4477{
Michael Chan972ec0d2006-01-23 16:12:43 -08004478 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004479 dma_addr_t mapping;
4480 struct tx_bd *txbd;
4481 struct sw_bd *tx_buf;
4482 u32 len, vlan_tag_flags, last_frag, mss;
4483 u16 prod, ring_prod;
4484 int i;
4485
Michael Chane89bbf12005-08-25 15:36:58 -07004486 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004487 netif_stop_queue(dev);
4488 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4489 dev->name);
4490
4491 return NETDEV_TX_BUSY;
4492 }
4493 len = skb_headlen(skb);
4494 prod = bp->tx_prod;
4495 ring_prod = TX_RING_IDX(prod);
4496
4497 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004498 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004499 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4500 }
4501
4502 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4503 vlan_tag_flags |=
4504 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4505 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004506#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004507 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004508 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4509 u32 tcp_opt_len, ip_tcp_len;
4510
4511 if (skb_header_cloned(skb) &&
4512 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4513 dev_kfree_skb(skb);
4514 return NETDEV_TX_OK;
4515 }
4516
4517 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4518 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4519
4520 tcp_opt_len = 0;
4521 if (skb->h.th->doff > 5) {
4522 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4523 }
4524 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4525
4526 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004527 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004528 skb->h.th->check =
4529 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4530 skb->nh.iph->daddr,
4531 0, IPPROTO_TCP, 0);
4532
4533 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4534 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4535 (tcp_opt_len >> 2)) << 8;
4536 }
4537 }
4538 else
4539#endif
4540 {
4541 mss = 0;
4542 }
4543
4544 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004545
Michael Chanb6016b72005-05-26 13:03:09 -07004546 tx_buf = &bp->tx_buf_ring[ring_prod];
4547 tx_buf->skb = skb;
4548 pci_unmap_addr_set(tx_buf, mapping, mapping);
4549
4550 txbd = &bp->tx_desc_ring[ring_prod];
4551
4552 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4553 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4554 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4555 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4556
4557 last_frag = skb_shinfo(skb)->nr_frags;
4558
4559 for (i = 0; i < last_frag; i++) {
4560 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4561
4562 prod = NEXT_TX_BD(prod);
4563 ring_prod = TX_RING_IDX(prod);
4564 txbd = &bp->tx_desc_ring[ring_prod];
4565
4566 len = frag->size;
4567 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4568 len, PCI_DMA_TODEVICE);
4569 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4570 mapping, mapping);
4571
4572 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4573 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4574 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4575 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4576
4577 }
4578 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4579
4580 prod = NEXT_TX_BD(prod);
4581 bp->tx_prod_bseq += skb->len;
4582
Michael Chan234754d2006-11-19 14:11:41 -08004583 REG_WR16(bp, bp->tx_bidx_addr, prod);
4584 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004585
4586 mmiowb();
4587
4588 bp->tx_prod = prod;
4589 dev->trans_start = jiffies;
4590
Michael Chane89bbf12005-08-25 15:36:58 -07004591 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004592 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004593 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004594 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004595 }
4596
4597 return NETDEV_TX_OK;
4598}
4599
4600/* Called with rtnl_lock */
4601static int
4602bnx2_close(struct net_device *dev)
4603{
Michael Chan972ec0d2006-01-23 16:12:43 -08004604 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004605 u32 reset_code;
4606
Michael Chanafdc08b2005-08-25 15:34:29 -07004607 /* Calling flush_scheduled_work() may deadlock because
4608 * linkwatch_event() may be on the workqueue and it will try to get
4609 * the rtnl_lock which we are holding.
4610 */
4611 while (bp->in_reset_task)
4612 msleep(1);
4613
Michael Chanb6016b72005-05-26 13:03:09 -07004614 bnx2_netif_stop(bp);
4615 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004616 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004617 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004618 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004619 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4620 else
4621 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4622 bnx2_reset_chip(bp, reset_code);
4623 free_irq(bp->pdev->irq, dev);
4624 if (bp->flags & USING_MSI_FLAG) {
4625 pci_disable_msi(bp->pdev);
4626 bp->flags &= ~USING_MSI_FLAG;
4627 }
4628 bnx2_free_skbs(bp);
4629 bnx2_free_mem(bp);
4630 bp->link_up = 0;
4631 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004632 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004633 return 0;
4634}
4635
4636#define GET_NET_STATS64(ctr) \
4637 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4638 (unsigned long) (ctr##_lo)
4639
4640#define GET_NET_STATS32(ctr) \
4641 (ctr##_lo)
4642
4643#if (BITS_PER_LONG == 64)
4644#define GET_NET_STATS GET_NET_STATS64
4645#else
4646#define GET_NET_STATS GET_NET_STATS32
4647#endif
4648
4649static struct net_device_stats *
4650bnx2_get_stats(struct net_device *dev)
4651{
Michael Chan972ec0d2006-01-23 16:12:43 -08004652 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004653 struct statistics_block *stats_blk = bp->stats_blk;
4654 struct net_device_stats *net_stats = &bp->net_stats;
4655
4656 if (bp->stats_blk == NULL) {
4657 return net_stats;
4658 }
4659 net_stats->rx_packets =
4660 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4661 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4662 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4663
4664 net_stats->tx_packets =
4665 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4666 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4667 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4668
4669 net_stats->rx_bytes =
4670 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4671
4672 net_stats->tx_bytes =
4673 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4674
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004675 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004676 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4677
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004678 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004679 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4680
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004681 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004682 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4683 stats_blk->stat_EtherStatsOverrsizePkts);
4684
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004685 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004686 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4687
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004688 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004689 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4690
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004691 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004692 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4693
4694 net_stats->rx_errors = net_stats->rx_length_errors +
4695 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4696 net_stats->rx_crc_errors;
4697
4698 net_stats->tx_aborted_errors =
4699 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4700 stats_blk->stat_Dot3StatsLateCollisions);
4701
Michael Chan5b0c76a2005-11-04 08:45:49 -08004702 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4703 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004704 net_stats->tx_carrier_errors = 0;
4705 else {
4706 net_stats->tx_carrier_errors =
4707 (unsigned long)
4708 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4709 }
4710
4711 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004712 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004713 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4714 +
4715 net_stats->tx_aborted_errors +
4716 net_stats->tx_carrier_errors;
4717
Michael Chancea94db2006-06-12 22:16:13 -07004718 net_stats->rx_missed_errors =
4719 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4720 stats_blk->stat_FwRxDrop);
4721
Michael Chanb6016b72005-05-26 13:03:09 -07004722 return net_stats;
4723}
4724
4725/* All ethtool functions called with rtnl_lock */
4726
4727static int
4728bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4729{
Michael Chan972ec0d2006-01-23 16:12:43 -08004730 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004731
4732 cmd->supported = SUPPORTED_Autoneg;
4733 if (bp->phy_flags & PHY_SERDES_FLAG) {
4734 cmd->supported |= SUPPORTED_1000baseT_Full |
4735 SUPPORTED_FIBRE;
4736
4737 cmd->port = PORT_FIBRE;
4738 }
4739 else {
4740 cmd->supported |= SUPPORTED_10baseT_Half |
4741 SUPPORTED_10baseT_Full |
4742 SUPPORTED_100baseT_Half |
4743 SUPPORTED_100baseT_Full |
4744 SUPPORTED_1000baseT_Full |
4745 SUPPORTED_TP;
4746
4747 cmd->port = PORT_TP;
4748 }
4749
4750 cmd->advertising = bp->advertising;
4751
4752 if (bp->autoneg & AUTONEG_SPEED) {
4753 cmd->autoneg = AUTONEG_ENABLE;
4754 }
4755 else {
4756 cmd->autoneg = AUTONEG_DISABLE;
4757 }
4758
4759 if (netif_carrier_ok(dev)) {
4760 cmd->speed = bp->line_speed;
4761 cmd->duplex = bp->duplex;
4762 }
4763 else {
4764 cmd->speed = -1;
4765 cmd->duplex = -1;
4766 }
4767
4768 cmd->transceiver = XCVR_INTERNAL;
4769 cmd->phy_address = bp->phy_addr;
4770
4771 return 0;
4772}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004773
Michael Chanb6016b72005-05-26 13:03:09 -07004774static int
4775bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4776{
Michael Chan972ec0d2006-01-23 16:12:43 -08004777 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004778 u8 autoneg = bp->autoneg;
4779 u8 req_duplex = bp->req_duplex;
4780 u16 req_line_speed = bp->req_line_speed;
4781 u32 advertising = bp->advertising;
4782
4783 if (cmd->autoneg == AUTONEG_ENABLE) {
4784 autoneg |= AUTONEG_SPEED;
4785
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004786 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004787
4788 /* allow advertising 1 speed */
4789 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4790 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4791 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4792 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4793
4794 if (bp->phy_flags & PHY_SERDES_FLAG)
4795 return -EINVAL;
4796
4797 advertising = cmd->advertising;
4798
4799 }
4800 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4801 advertising = cmd->advertising;
4802 }
4803 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4804 return -EINVAL;
4805 }
4806 else {
4807 if (bp->phy_flags & PHY_SERDES_FLAG) {
4808 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4809 }
4810 else {
4811 advertising = ETHTOOL_ALL_COPPER_SPEED;
4812 }
4813 }
4814 advertising |= ADVERTISED_Autoneg;
4815 }
4816 else {
4817 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004818 if ((cmd->speed != SPEED_1000 &&
4819 cmd->speed != SPEED_2500) ||
4820 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004821 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004822
4823 if (cmd->speed == SPEED_2500 &&
4824 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4825 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004826 }
4827 else if (cmd->speed == SPEED_1000) {
4828 return -EINVAL;
4829 }
4830 autoneg &= ~AUTONEG_SPEED;
4831 req_line_speed = cmd->speed;
4832 req_duplex = cmd->duplex;
4833 advertising = 0;
4834 }
4835
4836 bp->autoneg = autoneg;
4837 bp->advertising = advertising;
4838 bp->req_line_speed = req_line_speed;
4839 bp->req_duplex = req_duplex;
4840
Michael Chanc770a652005-08-25 15:38:39 -07004841 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004842
4843 bnx2_setup_phy(bp);
4844
Michael Chanc770a652005-08-25 15:38:39 -07004845 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004846
4847 return 0;
4848}
4849
4850static void
4851bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4852{
Michael Chan972ec0d2006-01-23 16:12:43 -08004853 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004854
4855 strcpy(info->driver, DRV_MODULE_NAME);
4856 strcpy(info->version, DRV_MODULE_VERSION);
4857 strcpy(info->bus_info, pci_name(bp->pdev));
4858 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4859 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4860 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004861 info->fw_version[1] = info->fw_version[3] = '.';
4862 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004863}
4864
Michael Chan244ac4f2006-03-20 17:48:46 -08004865#define BNX2_REGDUMP_LEN (32 * 1024)
4866
4867static int
4868bnx2_get_regs_len(struct net_device *dev)
4869{
4870 return BNX2_REGDUMP_LEN;
4871}
4872
4873static void
4874bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4875{
4876 u32 *p = _p, i, offset;
4877 u8 *orig_p = _p;
4878 struct bnx2 *bp = netdev_priv(dev);
4879 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4880 0x0800, 0x0880, 0x0c00, 0x0c10,
4881 0x0c30, 0x0d08, 0x1000, 0x101c,
4882 0x1040, 0x1048, 0x1080, 0x10a4,
4883 0x1400, 0x1490, 0x1498, 0x14f0,
4884 0x1500, 0x155c, 0x1580, 0x15dc,
4885 0x1600, 0x1658, 0x1680, 0x16d8,
4886 0x1800, 0x1820, 0x1840, 0x1854,
4887 0x1880, 0x1894, 0x1900, 0x1984,
4888 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4889 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4890 0x2000, 0x2030, 0x23c0, 0x2400,
4891 0x2800, 0x2820, 0x2830, 0x2850,
4892 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4893 0x3c00, 0x3c94, 0x4000, 0x4010,
4894 0x4080, 0x4090, 0x43c0, 0x4458,
4895 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4896 0x4fc0, 0x5010, 0x53c0, 0x5444,
4897 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4898 0x5fc0, 0x6000, 0x6400, 0x6428,
4899 0x6800, 0x6848, 0x684c, 0x6860,
4900 0x6888, 0x6910, 0x8000 };
4901
4902 regs->version = 0;
4903
4904 memset(p, 0, BNX2_REGDUMP_LEN);
4905
4906 if (!netif_running(bp->dev))
4907 return;
4908
4909 i = 0;
4910 offset = reg_boundaries[0];
4911 p += offset;
4912 while (offset < BNX2_REGDUMP_LEN) {
4913 *p++ = REG_RD(bp, offset);
4914 offset += 4;
4915 if (offset == reg_boundaries[i + 1]) {
4916 offset = reg_boundaries[i + 2];
4917 p = (u32 *) (orig_p + offset);
4918 i += 2;
4919 }
4920 }
4921}
4922
Michael Chanb6016b72005-05-26 13:03:09 -07004923static void
4924bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4925{
Michael Chan972ec0d2006-01-23 16:12:43 -08004926 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004927
4928 if (bp->flags & NO_WOL_FLAG) {
4929 wol->supported = 0;
4930 wol->wolopts = 0;
4931 }
4932 else {
4933 wol->supported = WAKE_MAGIC;
4934 if (bp->wol)
4935 wol->wolopts = WAKE_MAGIC;
4936 else
4937 wol->wolopts = 0;
4938 }
4939 memset(&wol->sopass, 0, sizeof(wol->sopass));
4940}
4941
4942static int
4943bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4944{
Michael Chan972ec0d2006-01-23 16:12:43 -08004945 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004946
4947 if (wol->wolopts & ~WAKE_MAGIC)
4948 return -EINVAL;
4949
4950 if (wol->wolopts & WAKE_MAGIC) {
4951 if (bp->flags & NO_WOL_FLAG)
4952 return -EINVAL;
4953
4954 bp->wol = 1;
4955 }
4956 else {
4957 bp->wol = 0;
4958 }
4959 return 0;
4960}
4961
4962static int
4963bnx2_nway_reset(struct net_device *dev)
4964{
Michael Chan972ec0d2006-01-23 16:12:43 -08004965 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004966 u32 bmcr;
4967
4968 if (!(bp->autoneg & AUTONEG_SPEED)) {
4969 return -EINVAL;
4970 }
4971
Michael Chanc770a652005-08-25 15:38:39 -07004972 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004973
4974 /* Force a link down visible on the other side */
4975 if (bp->phy_flags & PHY_SERDES_FLAG) {
4976 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004977 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004978
4979 msleep(20);
4980
Michael Chanc770a652005-08-25 15:38:39 -07004981 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004982
4983 bp->current_interval = SERDES_AN_TIMEOUT;
4984 bp->serdes_an_pending = 1;
4985 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004986 }
4987
4988 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4989 bmcr &= ~BMCR_LOOPBACK;
4990 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4991
Michael Chanc770a652005-08-25 15:38:39 -07004992 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004993
4994 return 0;
4995}
4996
4997static int
4998bnx2_get_eeprom_len(struct net_device *dev)
4999{
Michael Chan972ec0d2006-01-23 16:12:43 -08005000 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005001
Michael Chan1122db72006-01-23 16:11:42 -08005002 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005003 return 0;
5004
Michael Chan1122db72006-01-23 16:11:42 -08005005 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005006}
5007
5008static int
5009bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5010 u8 *eebuf)
5011{
Michael Chan972ec0d2006-01-23 16:12:43 -08005012 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005013 int rc;
5014
John W. Linville1064e942005-11-10 12:58:24 -08005015 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005016
5017 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5018
5019 return rc;
5020}
5021
5022static int
5023bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5024 u8 *eebuf)
5025{
Michael Chan972ec0d2006-01-23 16:12:43 -08005026 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005027 int rc;
5028
John W. Linville1064e942005-11-10 12:58:24 -08005029 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005030
5031 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5032
5033 return rc;
5034}
5035
5036static int
5037bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5038{
Michael Chan972ec0d2006-01-23 16:12:43 -08005039 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005040
5041 memset(coal, 0, sizeof(struct ethtool_coalesce));
5042
5043 coal->rx_coalesce_usecs = bp->rx_ticks;
5044 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5045 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5046 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5047
5048 coal->tx_coalesce_usecs = bp->tx_ticks;
5049 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5050 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5051 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5052
5053 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5054
5055 return 0;
5056}
5057
5058static int
5059bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5060{
Michael Chan972ec0d2006-01-23 16:12:43 -08005061 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005062
5063 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5064 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5065
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005066 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005067 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5068
5069 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5070 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5071
5072 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5073 if (bp->rx_quick_cons_trip_int > 0xff)
5074 bp->rx_quick_cons_trip_int = 0xff;
5075
5076 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5077 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5078
5079 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5080 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5081
5082 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5083 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5084
5085 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5086 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5087 0xff;
5088
5089 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5090 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5091 bp->stats_ticks &= 0xffff00;
5092
5093 if (netif_running(bp->dev)) {
5094 bnx2_netif_stop(bp);
5095 bnx2_init_nic(bp);
5096 bnx2_netif_start(bp);
5097 }
5098
5099 return 0;
5100}
5101
5102static void
5103bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5104{
Michael Chan972ec0d2006-01-23 16:12:43 -08005105 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005106
Michael Chan13daffa2006-03-20 17:49:20 -08005107 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005108 ering->rx_mini_max_pending = 0;
5109 ering->rx_jumbo_max_pending = 0;
5110
5111 ering->rx_pending = bp->rx_ring_size;
5112 ering->rx_mini_pending = 0;
5113 ering->rx_jumbo_pending = 0;
5114
5115 ering->tx_max_pending = MAX_TX_DESC_CNT;
5116 ering->tx_pending = bp->tx_ring_size;
5117}
5118
5119static int
5120bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5121{
Michael Chan972ec0d2006-01-23 16:12:43 -08005122 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005123
Michael Chan13daffa2006-03-20 17:49:20 -08005124 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005125 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5126 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5127
5128 return -EINVAL;
5129 }
Michael Chan13daffa2006-03-20 17:49:20 -08005130 if (netif_running(bp->dev)) {
5131 bnx2_netif_stop(bp);
5132 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5133 bnx2_free_skbs(bp);
5134 bnx2_free_mem(bp);
5135 }
5136
5137 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005138 bp->tx_ring_size = ering->tx_pending;
5139
5140 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005141 int rc;
5142
5143 rc = bnx2_alloc_mem(bp);
5144 if (rc)
5145 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005146 bnx2_init_nic(bp);
5147 bnx2_netif_start(bp);
5148 }
5149
5150 return 0;
5151}
5152
5153static void
5154bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5155{
Michael Chan972ec0d2006-01-23 16:12:43 -08005156 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005157
5158 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5159 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5160 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5161}
5162
5163static int
5164bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5165{
Michael Chan972ec0d2006-01-23 16:12:43 -08005166 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005167
5168 bp->req_flow_ctrl = 0;
5169 if (epause->rx_pause)
5170 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5171 if (epause->tx_pause)
5172 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5173
5174 if (epause->autoneg) {
5175 bp->autoneg |= AUTONEG_FLOW_CTRL;
5176 }
5177 else {
5178 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5179 }
5180
Michael Chanc770a652005-08-25 15:38:39 -07005181 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005182
5183 bnx2_setup_phy(bp);
5184
Michael Chanc770a652005-08-25 15:38:39 -07005185 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005186
5187 return 0;
5188}
5189
5190static u32
5191bnx2_get_rx_csum(struct net_device *dev)
5192{
Michael Chan972ec0d2006-01-23 16:12:43 -08005193 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005194
5195 return bp->rx_csum;
5196}
5197
5198static int
5199bnx2_set_rx_csum(struct net_device *dev, u32 data)
5200{
Michael Chan972ec0d2006-01-23 16:12:43 -08005201 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005202
5203 bp->rx_csum = data;
5204 return 0;
5205}
5206
Michael Chanb11d6212006-06-29 12:31:21 -07005207static int
5208bnx2_set_tso(struct net_device *dev, u32 data)
5209{
5210 if (data)
5211 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5212 else
5213 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5214 return 0;
5215}
5216
Michael Chancea94db2006-06-12 22:16:13 -07005217#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005218
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005219static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005220 char string[ETH_GSTRING_LEN];
5221} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5222 { "rx_bytes" },
5223 { "rx_error_bytes" },
5224 { "tx_bytes" },
5225 { "tx_error_bytes" },
5226 { "rx_ucast_packets" },
5227 { "rx_mcast_packets" },
5228 { "rx_bcast_packets" },
5229 { "tx_ucast_packets" },
5230 { "tx_mcast_packets" },
5231 { "tx_bcast_packets" },
5232 { "tx_mac_errors" },
5233 { "tx_carrier_errors" },
5234 { "rx_crc_errors" },
5235 { "rx_align_errors" },
5236 { "tx_single_collisions" },
5237 { "tx_multi_collisions" },
5238 { "tx_deferred" },
5239 { "tx_excess_collisions" },
5240 { "tx_late_collisions" },
5241 { "tx_total_collisions" },
5242 { "rx_fragments" },
5243 { "rx_jabbers" },
5244 { "rx_undersize_packets" },
5245 { "rx_oversize_packets" },
5246 { "rx_64_byte_packets" },
5247 { "rx_65_to_127_byte_packets" },
5248 { "rx_128_to_255_byte_packets" },
5249 { "rx_256_to_511_byte_packets" },
5250 { "rx_512_to_1023_byte_packets" },
5251 { "rx_1024_to_1522_byte_packets" },
5252 { "rx_1523_to_9022_byte_packets" },
5253 { "tx_64_byte_packets" },
5254 { "tx_65_to_127_byte_packets" },
5255 { "tx_128_to_255_byte_packets" },
5256 { "tx_256_to_511_byte_packets" },
5257 { "tx_512_to_1023_byte_packets" },
5258 { "tx_1024_to_1522_byte_packets" },
5259 { "tx_1523_to_9022_byte_packets" },
5260 { "rx_xon_frames" },
5261 { "rx_xoff_frames" },
5262 { "tx_xon_frames" },
5263 { "tx_xoff_frames" },
5264 { "rx_mac_ctrl_frames" },
5265 { "rx_filtered_packets" },
5266 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005267 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005268};
5269
5270#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5271
Arjan van de Venf71e1302006-03-03 21:33:57 -05005272static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005273 STATS_OFFSET32(stat_IfHCInOctets_hi),
5274 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5275 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5276 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5277 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5278 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5279 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5280 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5281 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5282 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5283 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005284 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5285 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5286 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5287 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5288 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5289 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5290 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5291 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5292 STATS_OFFSET32(stat_EtherStatsCollisions),
5293 STATS_OFFSET32(stat_EtherStatsFragments),
5294 STATS_OFFSET32(stat_EtherStatsJabbers),
5295 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5296 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5297 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5298 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5299 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5300 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5301 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5311 STATS_OFFSET32(stat_XonPauseFramesReceived),
5312 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5313 STATS_OFFSET32(stat_OutXonSent),
5314 STATS_OFFSET32(stat_OutXoffSent),
5315 STATS_OFFSET32(stat_MacControlFramesReceived),
5316 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5317 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005318 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005319};
5320
5321/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5322 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005323 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005324static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005325 8,0,8,8,8,8,8,8,8,8,
5326 4,0,4,4,4,4,4,4,4,4,
5327 4,4,4,4,4,4,4,4,4,4,
5328 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005329 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005330};
5331
Michael Chan5b0c76a2005-11-04 08:45:49 -08005332static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5333 8,0,8,8,8,8,8,8,8,8,
5334 4,4,4,4,4,4,4,4,4,4,
5335 4,4,4,4,4,4,4,4,4,4,
5336 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005337 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005338};
5339
Michael Chanb6016b72005-05-26 13:03:09 -07005340#define BNX2_NUM_TESTS 6
5341
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005342static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005343 char string[ETH_GSTRING_LEN];
5344} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5345 { "register_test (offline)" },
5346 { "memory_test (offline)" },
5347 { "loopback_test (offline)" },
5348 { "nvram_test (online)" },
5349 { "interrupt_test (online)" },
5350 { "link_test (online)" },
5351};
5352
5353static int
5354bnx2_self_test_count(struct net_device *dev)
5355{
5356 return BNX2_NUM_TESTS;
5357}
5358
5359static void
5360bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5361{
Michael Chan972ec0d2006-01-23 16:12:43 -08005362 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005363
5364 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5365 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005366 int i;
5367
Michael Chanb6016b72005-05-26 13:03:09 -07005368 bnx2_netif_stop(bp);
5369 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5370 bnx2_free_skbs(bp);
5371
5372 if (bnx2_test_registers(bp) != 0) {
5373 buf[0] = 1;
5374 etest->flags |= ETH_TEST_FL_FAILED;
5375 }
5376 if (bnx2_test_memory(bp) != 0) {
5377 buf[1] = 1;
5378 etest->flags |= ETH_TEST_FL_FAILED;
5379 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005380 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005381 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005382
5383 if (!netif_running(bp->dev)) {
5384 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5385 }
5386 else {
5387 bnx2_init_nic(bp);
5388 bnx2_netif_start(bp);
5389 }
5390
5391 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005392 for (i = 0; i < 7; i++) {
5393 if (bp->link_up)
5394 break;
5395 msleep_interruptible(1000);
5396 }
Michael Chanb6016b72005-05-26 13:03:09 -07005397 }
5398
5399 if (bnx2_test_nvram(bp) != 0) {
5400 buf[3] = 1;
5401 etest->flags |= ETH_TEST_FL_FAILED;
5402 }
5403 if (bnx2_test_intr(bp) != 0) {
5404 buf[4] = 1;
5405 etest->flags |= ETH_TEST_FL_FAILED;
5406 }
5407
5408 if (bnx2_test_link(bp) != 0) {
5409 buf[5] = 1;
5410 etest->flags |= ETH_TEST_FL_FAILED;
5411
5412 }
5413}
5414
5415static void
5416bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5417{
5418 switch (stringset) {
5419 case ETH_SS_STATS:
5420 memcpy(buf, bnx2_stats_str_arr,
5421 sizeof(bnx2_stats_str_arr));
5422 break;
5423 case ETH_SS_TEST:
5424 memcpy(buf, bnx2_tests_str_arr,
5425 sizeof(bnx2_tests_str_arr));
5426 break;
5427 }
5428}
5429
5430static int
5431bnx2_get_stats_count(struct net_device *dev)
5432{
5433 return BNX2_NUM_STATS;
5434}
5435
5436static void
5437bnx2_get_ethtool_stats(struct net_device *dev,
5438 struct ethtool_stats *stats, u64 *buf)
5439{
Michael Chan972ec0d2006-01-23 16:12:43 -08005440 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005441 int i;
5442 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005443 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005444
5445 if (hw_stats == NULL) {
5446 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5447 return;
5448 }
5449
Michael Chan5b0c76a2005-11-04 08:45:49 -08005450 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5451 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5452 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5453 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005454 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005455 else
5456 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005457
5458 for (i = 0; i < BNX2_NUM_STATS; i++) {
5459 if (stats_len_arr[i] == 0) {
5460 /* skip this counter */
5461 buf[i] = 0;
5462 continue;
5463 }
5464 if (stats_len_arr[i] == 4) {
5465 /* 4-byte counter */
5466 buf[i] = (u64)
5467 *(hw_stats + bnx2_stats_offset_arr[i]);
5468 continue;
5469 }
5470 /* 8-byte counter */
5471 buf[i] = (((u64) *(hw_stats +
5472 bnx2_stats_offset_arr[i])) << 32) +
5473 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5474 }
5475}
5476
5477static int
5478bnx2_phys_id(struct net_device *dev, u32 data)
5479{
Michael Chan972ec0d2006-01-23 16:12:43 -08005480 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005481 int i;
5482 u32 save;
5483
5484 if (data == 0)
5485 data = 2;
5486
5487 save = REG_RD(bp, BNX2_MISC_CFG);
5488 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5489
5490 for (i = 0; i < (data * 2); i++) {
5491 if ((i % 2) == 0) {
5492 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5493 }
5494 else {
5495 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5496 BNX2_EMAC_LED_1000MB_OVERRIDE |
5497 BNX2_EMAC_LED_100MB_OVERRIDE |
5498 BNX2_EMAC_LED_10MB_OVERRIDE |
5499 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5500 BNX2_EMAC_LED_TRAFFIC);
5501 }
5502 msleep_interruptible(500);
5503 if (signal_pending(current))
5504 break;
5505 }
5506 REG_WR(bp, BNX2_EMAC_LED, 0);
5507 REG_WR(bp, BNX2_MISC_CFG, save);
5508 return 0;
5509}
5510
Jeff Garzik7282d492006-09-13 14:30:00 -04005511static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005512 .get_settings = bnx2_get_settings,
5513 .set_settings = bnx2_set_settings,
5514 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005515 .get_regs_len = bnx2_get_regs_len,
5516 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005517 .get_wol = bnx2_get_wol,
5518 .set_wol = bnx2_set_wol,
5519 .nway_reset = bnx2_nway_reset,
5520 .get_link = ethtool_op_get_link,
5521 .get_eeprom_len = bnx2_get_eeprom_len,
5522 .get_eeprom = bnx2_get_eeprom,
5523 .set_eeprom = bnx2_set_eeprom,
5524 .get_coalesce = bnx2_get_coalesce,
5525 .set_coalesce = bnx2_set_coalesce,
5526 .get_ringparam = bnx2_get_ringparam,
5527 .set_ringparam = bnx2_set_ringparam,
5528 .get_pauseparam = bnx2_get_pauseparam,
5529 .set_pauseparam = bnx2_set_pauseparam,
5530 .get_rx_csum = bnx2_get_rx_csum,
5531 .set_rx_csum = bnx2_set_rx_csum,
5532 .get_tx_csum = ethtool_op_get_tx_csum,
5533 .set_tx_csum = ethtool_op_set_tx_csum,
5534 .get_sg = ethtool_op_get_sg,
5535 .set_sg = ethtool_op_set_sg,
5536#ifdef BCM_TSO
5537 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005538 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005539#endif
5540 .self_test_count = bnx2_self_test_count,
5541 .self_test = bnx2_self_test,
5542 .get_strings = bnx2_get_strings,
5543 .phys_id = bnx2_phys_id,
5544 .get_stats_count = bnx2_get_stats_count,
5545 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005546 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005547};
5548
5549/* Called with rtnl_lock */
5550static int
5551bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5552{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005553 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005554 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005555 int err;
5556
5557 switch(cmd) {
5558 case SIOCGMIIPHY:
5559 data->phy_id = bp->phy_addr;
5560
5561 /* fallthru */
5562 case SIOCGMIIREG: {
5563 u32 mii_regval;
5564
Michael Chanc770a652005-08-25 15:38:39 -07005565 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005566 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005567 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005568
5569 data->val_out = mii_regval;
5570
5571 return err;
5572 }
5573
5574 case SIOCSMIIREG:
5575 if (!capable(CAP_NET_ADMIN))
5576 return -EPERM;
5577
Michael Chanc770a652005-08-25 15:38:39 -07005578 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005579 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005580 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005581
5582 return err;
5583
5584 default:
5585 /* do nothing */
5586 break;
5587 }
5588 return -EOPNOTSUPP;
5589}
5590
5591/* Called with rtnl_lock */
5592static int
5593bnx2_change_mac_addr(struct net_device *dev, void *p)
5594{
5595 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005596 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005597
Michael Chan73eef4c2005-08-25 15:39:15 -07005598 if (!is_valid_ether_addr(addr->sa_data))
5599 return -EINVAL;
5600
Michael Chanb6016b72005-05-26 13:03:09 -07005601 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5602 if (netif_running(dev))
5603 bnx2_set_mac_addr(bp);
5604
5605 return 0;
5606}
5607
5608/* Called with rtnl_lock */
5609static int
5610bnx2_change_mtu(struct net_device *dev, int new_mtu)
5611{
Michael Chan972ec0d2006-01-23 16:12:43 -08005612 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005613
5614 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5615 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5616 return -EINVAL;
5617
5618 dev->mtu = new_mtu;
5619 if (netif_running(dev)) {
5620 bnx2_netif_stop(bp);
5621
5622 bnx2_init_nic(bp);
5623
5624 bnx2_netif_start(bp);
5625 }
5626 return 0;
5627}
5628
5629#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5630static void
5631poll_bnx2(struct net_device *dev)
5632{
Michael Chan972ec0d2006-01-23 16:12:43 -08005633 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005634
5635 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005636 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005637 enable_irq(bp->pdev->irq);
5638}
5639#endif
5640
5641static int __devinit
5642bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5643{
5644 struct bnx2 *bp;
5645 unsigned long mem_len;
5646 int rc;
5647 u32 reg;
5648
5649 SET_MODULE_OWNER(dev);
5650 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005651 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005652
5653 bp->flags = 0;
5654 bp->phy_flags = 0;
5655
5656 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5657 rc = pci_enable_device(pdev);
5658 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005659 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005660 goto err_out;
5661 }
5662
5663 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005664 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005665 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005666 rc = -ENODEV;
5667 goto err_out_disable;
5668 }
5669
5670 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5671 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005672 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005673 goto err_out_disable;
5674 }
5675
5676 pci_set_master(pdev);
5677
5678 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5679 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005680 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005681 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005682 rc = -EIO;
5683 goto err_out_release;
5684 }
5685
Michael Chanb6016b72005-05-26 13:03:09 -07005686 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5687 bp->flags |= USING_DAC_FLAG;
5688 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005689 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005690 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005691 rc = -EIO;
5692 goto err_out_release;
5693 }
5694 }
5695 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005696 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005697 rc = -EIO;
5698 goto err_out_release;
5699 }
5700
5701 bp->dev = dev;
5702 bp->pdev = pdev;
5703
5704 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00005705 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005706
5707 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005708 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005709 dev->mem_end = dev->mem_start + mem_len;
5710 dev->irq = pdev->irq;
5711
5712 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5713
5714 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005715 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005716 rc = -ENOMEM;
5717 goto err_out_release;
5718 }
5719
5720 /* Configure byte swap and enable write to the reg_window registers.
5721 * Rely on CPU to do target byte swapping on big endian systems
5722 * The chip's target access swapping will not swap all accesses
5723 */
5724 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5725 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5726 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5727
Pavel Machek829ca9a2005-09-03 15:56:56 -07005728 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005729
5730 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5731
Michael Chan59b47d82006-11-19 14:10:45 -08005732 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5733 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5734 if (bp->pcix_cap == 0) {
5735 dev_err(&pdev->dev,
5736 "Cannot find PCIX capability, aborting.\n");
5737 rc = -EIO;
5738 goto err_out_unmap;
5739 }
5740 }
5741
Michael Chanb6016b72005-05-26 13:03:09 -07005742 /* Get bus information. */
5743 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5744 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5745 u32 clkreg;
5746
5747 bp->flags |= PCIX_FLAG;
5748
5749 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005750
Michael Chanb6016b72005-05-26 13:03:09 -07005751 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5752 switch (clkreg) {
5753 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5754 bp->bus_speed_mhz = 133;
5755 break;
5756
5757 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5758 bp->bus_speed_mhz = 100;
5759 break;
5760
5761 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5762 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5763 bp->bus_speed_mhz = 66;
5764 break;
5765
5766 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5767 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5768 bp->bus_speed_mhz = 50;
5769 break;
5770
5771 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5772 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5773 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5774 bp->bus_speed_mhz = 33;
5775 break;
5776 }
5777 }
5778 else {
5779 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5780 bp->bus_speed_mhz = 66;
5781 else
5782 bp->bus_speed_mhz = 33;
5783 }
5784
5785 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5786 bp->flags |= PCI_32BIT_FLAG;
5787
5788 /* 5706A0 may falsely detect SERR and PERR. */
5789 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5790 reg = REG_RD(bp, PCI_COMMAND);
5791 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5792 REG_WR(bp, PCI_COMMAND, reg);
5793 }
5794 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5795 !(bp->flags & PCIX_FLAG)) {
5796
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005797 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005798 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005799 goto err_out_unmap;
5800 }
5801
5802 bnx2_init_nvram(bp);
5803
Michael Chane3648b32005-11-04 08:51:21 -08005804 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5805
5806 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5807 BNX2_SHM_HDR_SIGNATURE_SIG)
5808 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5809 else
5810 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5811
Michael Chanb6016b72005-05-26 13:03:09 -07005812 /* Get the permanent MAC address. First we need to make sure the
5813 * firmware is actually running.
5814 */
Michael Chane3648b32005-11-04 08:51:21 -08005815 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005816
5817 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5818 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005819 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005820 rc = -ENODEV;
5821 goto err_out_unmap;
5822 }
5823
Michael Chane3648b32005-11-04 08:51:21 -08005824 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005825
Michael Chane3648b32005-11-04 08:51:21 -08005826 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005827 bp->mac_addr[0] = (u8) (reg >> 8);
5828 bp->mac_addr[1] = (u8) reg;
5829
Michael Chane3648b32005-11-04 08:51:21 -08005830 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005831 bp->mac_addr[2] = (u8) (reg >> 24);
5832 bp->mac_addr[3] = (u8) (reg >> 16);
5833 bp->mac_addr[4] = (u8) (reg >> 8);
5834 bp->mac_addr[5] = (u8) reg;
5835
5836 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005837 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005838
5839 bp->rx_csum = 1;
5840
5841 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5842
5843 bp->tx_quick_cons_trip_int = 20;
5844 bp->tx_quick_cons_trip = 20;
5845 bp->tx_ticks_int = 80;
5846 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005847
Michael Chanb6016b72005-05-26 13:03:09 -07005848 bp->rx_quick_cons_trip_int = 6;
5849 bp->rx_quick_cons_trip = 6;
5850 bp->rx_ticks_int = 18;
5851 bp->rx_ticks = 18;
5852
5853 bp->stats_ticks = 1000000 & 0xffff00;
5854
5855 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005856 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005857
Michael Chan5b0c76a2005-11-04 08:45:49 -08005858 bp->phy_addr = 1;
5859
Michael Chanb6016b72005-05-26 13:03:09 -07005860 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chanbac0dff2006-11-19 14:15:05 -08005861 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5862 if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5863 bp->phy_flags |= PHY_SERDES_FLAG;
5864 } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07005865 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005866
5867 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07005868 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005869 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08005870 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005871 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005872 BNX2_SHARED_HW_CFG_CONFIG);
5873 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5874 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5875 }
Michael Chanb6016b72005-05-26 13:03:09 -07005876 }
5877
Michael Chan16088272006-06-12 22:16:43 -07005878 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5879 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5880 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005881 bp->flags |= NO_WOL_FLAG;
5882
Michael Chanb6016b72005-05-26 13:03:09 -07005883 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5884 bp->tx_quick_cons_trip_int =
5885 bp->tx_quick_cons_trip;
5886 bp->tx_ticks_int = bp->tx_ticks;
5887 bp->rx_quick_cons_trip_int =
5888 bp->rx_quick_cons_trip;
5889 bp->rx_ticks_int = bp->rx_ticks;
5890 bp->comp_prod_trip_int = bp->comp_prod_trip;
5891 bp->com_ticks_int = bp->com_ticks;
5892 bp->cmd_ticks_int = bp->cmd_ticks;
5893 }
5894
Michael Chanf9317a42006-09-29 17:06:23 -07005895 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5896 *
5897 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5898 * with byte enables disabled on the unused 32-bit word. This is legal
5899 * but causes problems on the AMD 8132 which will eventually stop
5900 * responding after a while.
5901 *
5902 * AMD believes this incompatibility is unique to the 5706, and
5903 * prefers to locally disable MSI rather than globally disabling it
5904 * using pci_msi_quirk.
5905 */
5906 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5907 struct pci_dev *amd_8132 = NULL;
5908
5909 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5910 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5911 amd_8132))) {
5912 u8 rev;
5913
5914 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5915 if (rev >= 0x10 && rev <= 0x13) {
5916 disable_msi = 1;
5917 pci_dev_put(amd_8132);
5918 break;
5919 }
5920 }
5921 }
5922
Michael Chanb6016b72005-05-26 13:03:09 -07005923 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5924 bp->req_line_speed = 0;
5925 if (bp->phy_flags & PHY_SERDES_FLAG) {
5926 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005927
Michael Chane3648b32005-11-04 08:51:21 -08005928 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005929 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5930 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5931 bp->autoneg = 0;
5932 bp->req_line_speed = bp->line_speed = SPEED_1000;
5933 bp->req_duplex = DUPLEX_FULL;
5934 }
Michael Chanb6016b72005-05-26 13:03:09 -07005935 }
5936 else {
5937 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5938 }
5939
5940 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5941
Michael Chancd339a02005-08-25 15:35:24 -07005942 init_timer(&bp->timer);
5943 bp->timer.expires = RUN_AT(bp->timer_interval);
5944 bp->timer.data = (unsigned long) bp;
5945 bp->timer.function = bnx2_timer;
5946
Michael Chanb6016b72005-05-26 13:03:09 -07005947 return 0;
5948
5949err_out_unmap:
5950 if (bp->regview) {
5951 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005952 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005953 }
5954
5955err_out_release:
5956 pci_release_regions(pdev);
5957
5958err_out_disable:
5959 pci_disable_device(pdev);
5960 pci_set_drvdata(pdev, NULL);
5961
5962err_out:
5963 return rc;
5964}
5965
5966static int __devinit
5967bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5968{
5969 static int version_printed = 0;
5970 struct net_device *dev = NULL;
5971 struct bnx2 *bp;
5972 int rc, i;
5973
5974 if (version_printed++ == 0)
5975 printk(KERN_INFO "%s", version);
5976
5977 /* dev zeroed in init_etherdev */
5978 dev = alloc_etherdev(sizeof(*bp));
5979
5980 if (!dev)
5981 return -ENOMEM;
5982
5983 rc = bnx2_init_board(pdev, dev);
5984 if (rc < 0) {
5985 free_netdev(dev);
5986 return rc;
5987 }
5988
5989 dev->open = bnx2_open;
5990 dev->hard_start_xmit = bnx2_start_xmit;
5991 dev->stop = bnx2_close;
5992 dev->get_stats = bnx2_get_stats;
5993 dev->set_multicast_list = bnx2_set_rx_mode;
5994 dev->do_ioctl = bnx2_ioctl;
5995 dev->set_mac_address = bnx2_change_mac_addr;
5996 dev->change_mtu = bnx2_change_mtu;
5997 dev->tx_timeout = bnx2_tx_timeout;
5998 dev->watchdog_timeo = TX_TIMEOUT;
5999#ifdef BCM_VLAN
6000 dev->vlan_rx_register = bnx2_vlan_rx_register;
6001 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6002#endif
6003 dev->poll = bnx2_poll;
6004 dev->ethtool_ops = &bnx2_ethtool_ops;
6005 dev->weight = 64;
6006
Michael Chan972ec0d2006-01-23 16:12:43 -08006007 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006008
6009#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6010 dev->poll_controller = poll_bnx2;
6011#endif
6012
6013 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006014 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006015 if (bp->regview)
6016 iounmap(bp->regview);
6017 pci_release_regions(pdev);
6018 pci_disable_device(pdev);
6019 pci_set_drvdata(pdev, NULL);
6020 free_netdev(dev);
6021 return rc;
6022 }
6023
6024 pci_set_drvdata(pdev, dev);
6025
6026 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07006027 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07006028 bp->name = board_info[ent->driver_data].name,
6029 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6030 "IRQ %d, ",
6031 dev->name,
6032 bp->name,
6033 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6034 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6035 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6036 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6037 bp->bus_speed_mhz,
6038 dev->base_addr,
6039 bp->pdev->irq);
6040
6041 printk("node addr ");
6042 for (i = 0; i < 6; i++)
6043 printk("%2.2x", dev->dev_addr[i]);
6044 printk("\n");
6045
6046 dev->features |= NETIF_F_SG;
6047 if (bp->flags & USING_DAC_FLAG)
6048 dev->features |= NETIF_F_HIGHDMA;
6049 dev->features |= NETIF_F_IP_CSUM;
6050#ifdef BCM_VLAN
6051 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6052#endif
6053#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07006054 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006055#endif
6056
6057 netif_carrier_off(bp->dev);
6058
6059 return 0;
6060}
6061
6062static void __devexit
6063bnx2_remove_one(struct pci_dev *pdev)
6064{
6065 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006066 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006067
Michael Chanafdc08b2005-08-25 15:34:29 -07006068 flush_scheduled_work();
6069
Michael Chanb6016b72005-05-26 13:03:09 -07006070 unregister_netdev(dev);
6071
6072 if (bp->regview)
6073 iounmap(bp->regview);
6074
6075 free_netdev(dev);
6076 pci_release_regions(pdev);
6077 pci_disable_device(pdev);
6078 pci_set_drvdata(pdev, NULL);
6079}
6080
6081static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006082bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006083{
6084 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006085 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006086 u32 reset_code;
6087
6088 if (!netif_running(dev))
6089 return 0;
6090
Michael Chan1d60290f2006-03-20 17:50:08 -08006091 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006092 bnx2_netif_stop(bp);
6093 netif_device_detach(dev);
6094 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006095 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006096 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006097 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006098 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6099 else
6100 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6101 bnx2_reset_chip(bp, reset_code);
6102 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006103 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006104 return 0;
6105}
6106
6107static int
6108bnx2_resume(struct pci_dev *pdev)
6109{
6110 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006111 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006112
6113 if (!netif_running(dev))
6114 return 0;
6115
Pavel Machek829ca9a2005-09-03 15:56:56 -07006116 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006117 netif_device_attach(dev);
6118 bnx2_init_nic(bp);
6119 bnx2_netif_start(bp);
6120 return 0;
6121}
6122
6123static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006124 .name = DRV_MODULE_NAME,
6125 .id_table = bnx2_pci_tbl,
6126 .probe = bnx2_init_one,
6127 .remove = __devexit_p(bnx2_remove_one),
6128 .suspend = bnx2_suspend,
6129 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006130};
6131
6132static int __init bnx2_init(void)
6133{
Jeff Garzik29917622006-08-19 17:48:59 -04006134 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006135}
6136
6137static void __exit bnx2_cleanup(void)
6138{
6139 pci_unregister_driver(&bnx2_pci_driver);
6140}
6141
6142module_init(bnx2_init);
6143module_exit(bnx2_cleanup);
6144
6145
6146