blob: ab589090d634db7aec323d7425a2df973d746458 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan68c9f752007-04-24 15:35:53 -070057#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
237 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
238 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
239}
240
241static void
242bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
243{
244 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
245 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
246}
247
248static void
249bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
250{
251 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800252 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
253 int i;
254
255 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
256 REG_WR(bp, BNX2_CTX_CTX_CTRL,
257 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
258 for (i = 0; i < 5; i++) {
259 u32 val;
260 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
261 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
262 break;
263 udelay(5);
264 }
265 } else {
266 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
267 REG_WR(bp, BNX2_CTX_DATA, val);
268 }
Michael Chanb6016b72005-05-26 13:03:09 -0700269}
270
271static int
272bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
273{
274 u32 val1;
275 int i, ret;
276
277 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
278 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
280
281 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
282 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
283
284 udelay(40);
285 }
286
287 val1 = (bp->phy_addr << 21) | (reg << 16) |
288 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
289 BNX2_EMAC_MDIO_COMM_START_BUSY;
290 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
291
292 for (i = 0; i < 50; i++) {
293 udelay(10);
294
295 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
297 udelay(5);
298
299 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
300 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
301
302 break;
303 }
304 }
305
306 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
307 *val = 0x0;
308 ret = -EBUSY;
309 }
310 else {
311 *val = val1;
312 ret = 0;
313 }
314
315 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
316 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318
319 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321
322 udelay(40);
323 }
324
325 return ret;
326}
327
328static int
329bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
330{
331 u32 val1;
332 int i, ret;
333
334 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
335 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
337
338 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
339 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
340
341 udelay(40);
342 }
343
344 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
345 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
346 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
347 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400348
Michael Chanb6016b72005-05-26 13:03:09 -0700349 for (i = 0; i < 50; i++) {
350 udelay(10);
351
352 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
353 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
354 udelay(5);
355 break;
356 }
357 }
358
359 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
360 ret = -EBUSY;
361 else
362 ret = 0;
363
364 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
365 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
367
368 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
369 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370
371 udelay(40);
372 }
373
374 return ret;
375}
376
377static void
378bnx2_disable_int(struct bnx2 *bp)
379{
380 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
381 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
382 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
383}
384
385static void
386bnx2_enable_int(struct bnx2 *bp)
387{
Michael Chanb6016b72005-05-26 13:03:09 -0700388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
391
392 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700393 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
394
Michael Chanbf5295b2006-03-23 01:11:56 -0800395 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700396}
397
398static void
399bnx2_disable_int_sync(struct bnx2 *bp)
400{
401 atomic_inc(&bp->intr_sem);
402 bnx2_disable_int(bp);
403 synchronize_irq(bp->pdev->irq);
404}
405
406static void
407bnx2_netif_stop(struct bnx2 *bp)
408{
409 bnx2_disable_int_sync(bp);
410 if (netif_running(bp->dev)) {
411 netif_poll_disable(bp->dev);
412 netif_tx_disable(bp->dev);
413 bp->dev->trans_start = jiffies; /* prevent tx timeout */
414 }
415}
416
417static void
418bnx2_netif_start(struct bnx2 *bp)
419{
420 if (atomic_dec_and_test(&bp->intr_sem)) {
421 if (netif_running(bp->dev)) {
422 netif_wake_queue(bp->dev);
423 netif_poll_enable(bp->dev);
424 bnx2_enable_int(bp);
425 }
426 }
427}
428
429static void
430bnx2_free_mem(struct bnx2 *bp)
431{
Michael Chan13daffa2006-03-20 17:49:20 -0800432 int i;
433
Michael Chan59b47d82006-11-19 14:10:45 -0800434 for (i = 0; i < bp->ctx_pages; i++) {
435 if (bp->ctx_blk[i]) {
436 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
437 bp->ctx_blk[i],
438 bp->ctx_blk_mapping[i]);
439 bp->ctx_blk[i] = NULL;
440 }
441 }
Michael Chanb6016b72005-05-26 13:03:09 -0700442 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800443 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700444 bp->status_blk, bp->status_blk_mapping);
445 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800446 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700447 }
448 if (bp->tx_desc_ring) {
449 pci_free_consistent(bp->pdev,
450 sizeof(struct tx_bd) * TX_DESC_CNT,
451 bp->tx_desc_ring, bp->tx_desc_mapping);
452 bp->tx_desc_ring = NULL;
453 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400454 kfree(bp->tx_buf_ring);
455 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800456 for (i = 0; i < bp->rx_max_ring; i++) {
457 if (bp->rx_desc_ring[i])
458 pci_free_consistent(bp->pdev,
459 sizeof(struct rx_bd) * RX_DESC_CNT,
460 bp->rx_desc_ring[i],
461 bp->rx_desc_mapping[i]);
462 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700463 }
Michael Chan13daffa2006-03-20 17:49:20 -0800464 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400465 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700466}
467
468static int
469bnx2_alloc_mem(struct bnx2 *bp)
470{
Michael Chan0f31f992006-03-23 01:12:38 -0800471 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800472
Michael Chan0f31f992006-03-23 01:12:38 -0800473 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
474 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700475 if (bp->tx_buf_ring == NULL)
476 return -ENOMEM;
477
Michael Chanb6016b72005-05-26 13:03:09 -0700478 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
479 sizeof(struct tx_bd) *
480 TX_DESC_CNT,
481 &bp->tx_desc_mapping);
482 if (bp->tx_desc_ring == NULL)
483 goto alloc_mem_err;
484
Michael Chan13daffa2006-03-20 17:49:20 -0800485 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
486 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700487 if (bp->rx_buf_ring == NULL)
488 goto alloc_mem_err;
489
Michael Chan13daffa2006-03-20 17:49:20 -0800490 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
491 bp->rx_max_ring);
492
493 for (i = 0; i < bp->rx_max_ring; i++) {
494 bp->rx_desc_ring[i] =
495 pci_alloc_consistent(bp->pdev,
496 sizeof(struct rx_bd) * RX_DESC_CNT,
497 &bp->rx_desc_mapping[i]);
498 if (bp->rx_desc_ring[i] == NULL)
499 goto alloc_mem_err;
500
501 }
Michael Chanb6016b72005-05-26 13:03:09 -0700502
Michael Chan0f31f992006-03-23 01:12:38 -0800503 /* Combine status and statistics blocks into one allocation. */
504 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
505 bp->status_stats_size = status_blk_size +
506 sizeof(struct statistics_block);
507
508 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700509 &bp->status_blk_mapping);
510 if (bp->status_blk == NULL)
511 goto alloc_mem_err;
512
Michael Chan0f31f992006-03-23 01:12:38 -0800513 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700514
Michael Chan0f31f992006-03-23 01:12:38 -0800515 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
516 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700517
Michael Chan0f31f992006-03-23 01:12:38 -0800518 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700519
Michael Chan59b47d82006-11-19 14:10:45 -0800520 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
521 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
522 if (bp->ctx_pages == 0)
523 bp->ctx_pages = 1;
524 for (i = 0; i < bp->ctx_pages; i++) {
525 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
526 BCM_PAGE_SIZE,
527 &bp->ctx_blk_mapping[i]);
528 if (bp->ctx_blk[i] == NULL)
529 goto alloc_mem_err;
530 }
531 }
Michael Chanb6016b72005-05-26 13:03:09 -0700532 return 0;
533
534alloc_mem_err:
535 bnx2_free_mem(bp);
536 return -ENOMEM;
537}
538
539static void
Michael Chane3648b32005-11-04 08:51:21 -0800540bnx2_report_fw_link(struct bnx2 *bp)
541{
542 u32 fw_link_status = 0;
543
544 if (bp->link_up) {
545 u32 bmsr;
546
547 switch (bp->line_speed) {
548 case SPEED_10:
549 if (bp->duplex == DUPLEX_HALF)
550 fw_link_status = BNX2_LINK_STATUS_10HALF;
551 else
552 fw_link_status = BNX2_LINK_STATUS_10FULL;
553 break;
554 case SPEED_100:
555 if (bp->duplex == DUPLEX_HALF)
556 fw_link_status = BNX2_LINK_STATUS_100HALF;
557 else
558 fw_link_status = BNX2_LINK_STATUS_100FULL;
559 break;
560 case SPEED_1000:
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_1000HALF;
563 else
564 fw_link_status = BNX2_LINK_STATUS_1000FULL;
565 break;
566 case SPEED_2500:
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_2500HALF;
569 else
570 fw_link_status = BNX2_LINK_STATUS_2500FULL;
571 break;
572 }
573
574 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
575
576 if (bp->autoneg) {
577 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
578
Michael Chanca58c3a2007-05-03 13:22:52 -0700579 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
580 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800581
582 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
584 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
585 else
586 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
587 }
588 }
589 else
590 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
591
592 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
593}
594
595static void
Michael Chanb6016b72005-05-26 13:03:09 -0700596bnx2_report_link(struct bnx2 *bp)
597{
598 if (bp->link_up) {
599 netif_carrier_on(bp->dev);
600 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
601
602 printk("%d Mbps ", bp->line_speed);
603
604 if (bp->duplex == DUPLEX_FULL)
605 printk("full duplex");
606 else
607 printk("half duplex");
608
609 if (bp->flow_ctrl) {
610 if (bp->flow_ctrl & FLOW_CTRL_RX) {
611 printk(", receive ");
612 if (bp->flow_ctrl & FLOW_CTRL_TX)
613 printk("& transmit ");
614 }
615 else {
616 printk(", transmit ");
617 }
618 printk("flow control ON");
619 }
620 printk("\n");
621 }
622 else {
623 netif_carrier_off(bp->dev);
624 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
625 }
Michael Chane3648b32005-11-04 08:51:21 -0800626
627 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700628}
629
630static void
631bnx2_resolve_flow_ctrl(struct bnx2 *bp)
632{
633 u32 local_adv, remote_adv;
634
635 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400636 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700637 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
638
639 if (bp->duplex == DUPLEX_FULL) {
640 bp->flow_ctrl = bp->req_flow_ctrl;
641 }
642 return;
643 }
644
645 if (bp->duplex != DUPLEX_FULL) {
646 return;
647 }
648
Michael Chan5b0c76a2005-11-04 08:45:49 -0800649 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
650 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
651 u32 val;
652
653 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
654 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
655 bp->flow_ctrl |= FLOW_CTRL_TX;
656 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
657 bp->flow_ctrl |= FLOW_CTRL_RX;
658 return;
659 }
660
Michael Chanca58c3a2007-05-03 13:22:52 -0700661 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
662 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700663
664 if (bp->phy_flags & PHY_SERDES_FLAG) {
665 u32 new_local_adv = 0;
666 u32 new_remote_adv = 0;
667
668 if (local_adv & ADVERTISE_1000XPAUSE)
669 new_local_adv |= ADVERTISE_PAUSE_CAP;
670 if (local_adv & ADVERTISE_1000XPSE_ASYM)
671 new_local_adv |= ADVERTISE_PAUSE_ASYM;
672 if (remote_adv & ADVERTISE_1000XPAUSE)
673 new_remote_adv |= ADVERTISE_PAUSE_CAP;
674 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
675 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
676
677 local_adv = new_local_adv;
678 remote_adv = new_remote_adv;
679 }
680
681 /* See Table 28B-3 of 802.3ab-1999 spec. */
682 if (local_adv & ADVERTISE_PAUSE_CAP) {
683 if(local_adv & ADVERTISE_PAUSE_ASYM) {
684 if (remote_adv & ADVERTISE_PAUSE_CAP) {
685 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
686 }
687 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
688 bp->flow_ctrl = FLOW_CTRL_RX;
689 }
690 }
691 else {
692 if (remote_adv & ADVERTISE_PAUSE_CAP) {
693 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
694 }
695 }
696 }
697 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
698 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
699 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
700
701 bp->flow_ctrl = FLOW_CTRL_TX;
702 }
703 }
704}
705
706static int
Michael Chan27a005b2007-05-03 13:23:41 -0700707bnx2_5709s_linkup(struct bnx2 *bp)
708{
709 u32 val, speed;
710
711 bp->link_up = 1;
712
713 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
714 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
715 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
716
717 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
718 bp->line_speed = bp->req_line_speed;
719 bp->duplex = bp->req_duplex;
720 return 0;
721 }
722 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
723 switch (speed) {
724 case MII_BNX2_GP_TOP_AN_SPEED_10:
725 bp->line_speed = SPEED_10;
726 break;
727 case MII_BNX2_GP_TOP_AN_SPEED_100:
728 bp->line_speed = SPEED_100;
729 break;
730 case MII_BNX2_GP_TOP_AN_SPEED_1G:
731 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
732 bp->line_speed = SPEED_1000;
733 break;
734 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
735 bp->line_speed = SPEED_2500;
736 break;
737 }
738 if (val & MII_BNX2_GP_TOP_AN_FD)
739 bp->duplex = DUPLEX_FULL;
740 else
741 bp->duplex = DUPLEX_HALF;
742 return 0;
743}
744
745static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800746bnx2_5708s_linkup(struct bnx2 *bp)
747{
748 u32 val;
749
750 bp->link_up = 1;
751 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
752 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
753 case BCM5708S_1000X_STAT1_SPEED_10:
754 bp->line_speed = SPEED_10;
755 break;
756 case BCM5708S_1000X_STAT1_SPEED_100:
757 bp->line_speed = SPEED_100;
758 break;
759 case BCM5708S_1000X_STAT1_SPEED_1G:
760 bp->line_speed = SPEED_1000;
761 break;
762 case BCM5708S_1000X_STAT1_SPEED_2G5:
763 bp->line_speed = SPEED_2500;
764 break;
765 }
766 if (val & BCM5708S_1000X_STAT1_FD)
767 bp->duplex = DUPLEX_FULL;
768 else
769 bp->duplex = DUPLEX_HALF;
770
771 return 0;
772}
773
774static int
775bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700776{
777 u32 bmcr, local_adv, remote_adv, common;
778
779 bp->link_up = 1;
780 bp->line_speed = SPEED_1000;
781
Michael Chanca58c3a2007-05-03 13:22:52 -0700782 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700783 if (bmcr & BMCR_FULLDPLX) {
784 bp->duplex = DUPLEX_FULL;
785 }
786 else {
787 bp->duplex = DUPLEX_HALF;
788 }
789
790 if (!(bmcr & BMCR_ANENABLE)) {
791 return 0;
792 }
793
Michael Chanca58c3a2007-05-03 13:22:52 -0700794 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
795 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700796
797 common = local_adv & remote_adv;
798 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
799
800 if (common & ADVERTISE_1000XFULL) {
801 bp->duplex = DUPLEX_FULL;
802 }
803 else {
804 bp->duplex = DUPLEX_HALF;
805 }
806 }
807
808 return 0;
809}
810
811static int
812bnx2_copper_linkup(struct bnx2 *bp)
813{
814 u32 bmcr;
815
Michael Chanca58c3a2007-05-03 13:22:52 -0700816 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700817 if (bmcr & BMCR_ANENABLE) {
818 u32 local_adv, remote_adv, common;
819
820 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
821 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
822
823 common = local_adv & (remote_adv >> 2);
824 if (common & ADVERTISE_1000FULL) {
825 bp->line_speed = SPEED_1000;
826 bp->duplex = DUPLEX_FULL;
827 }
828 else if (common & ADVERTISE_1000HALF) {
829 bp->line_speed = SPEED_1000;
830 bp->duplex = DUPLEX_HALF;
831 }
832 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700833 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
834 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700835
836 common = local_adv & remote_adv;
837 if (common & ADVERTISE_100FULL) {
838 bp->line_speed = SPEED_100;
839 bp->duplex = DUPLEX_FULL;
840 }
841 else if (common & ADVERTISE_100HALF) {
842 bp->line_speed = SPEED_100;
843 bp->duplex = DUPLEX_HALF;
844 }
845 else if (common & ADVERTISE_10FULL) {
846 bp->line_speed = SPEED_10;
847 bp->duplex = DUPLEX_FULL;
848 }
849 else if (common & ADVERTISE_10HALF) {
850 bp->line_speed = SPEED_10;
851 bp->duplex = DUPLEX_HALF;
852 }
853 else {
854 bp->line_speed = 0;
855 bp->link_up = 0;
856 }
857 }
858 }
859 else {
860 if (bmcr & BMCR_SPEED100) {
861 bp->line_speed = SPEED_100;
862 }
863 else {
864 bp->line_speed = SPEED_10;
865 }
866 if (bmcr & BMCR_FULLDPLX) {
867 bp->duplex = DUPLEX_FULL;
868 }
869 else {
870 bp->duplex = DUPLEX_HALF;
871 }
872 }
873
874 return 0;
875}
876
877static int
878bnx2_set_mac_link(struct bnx2 *bp)
879{
880 u32 val;
881
882 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
883 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
884 (bp->duplex == DUPLEX_HALF)) {
885 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
886 }
887
888 /* Configure the EMAC mode register. */
889 val = REG_RD(bp, BNX2_EMAC_MODE);
890
891 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800892 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800893 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700894
895 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800896 switch (bp->line_speed) {
897 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800898 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
899 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800900 break;
901 }
902 /* fall through */
903 case SPEED_100:
904 val |= BNX2_EMAC_MODE_PORT_MII;
905 break;
906 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800907 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800908 /* fall through */
909 case SPEED_1000:
910 val |= BNX2_EMAC_MODE_PORT_GMII;
911 break;
912 }
Michael Chanb6016b72005-05-26 13:03:09 -0700913 }
914 else {
915 val |= BNX2_EMAC_MODE_PORT_GMII;
916 }
917
918 /* Set the MAC to operate in the appropriate duplex mode. */
919 if (bp->duplex == DUPLEX_HALF)
920 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
921 REG_WR(bp, BNX2_EMAC_MODE, val);
922
923 /* Enable/disable rx PAUSE. */
924 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
925
926 if (bp->flow_ctrl & FLOW_CTRL_RX)
927 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
928 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
929
930 /* Enable/disable tx PAUSE. */
931 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
932 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
933
934 if (bp->flow_ctrl & FLOW_CTRL_TX)
935 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
936 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
937
938 /* Acknowledge the interrupt. */
939 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
940
941 return 0;
942}
943
Michael Chan27a005b2007-05-03 13:23:41 -0700944static void
945bnx2_enable_bmsr1(struct bnx2 *bp)
946{
947 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
948 (CHIP_NUM(bp) == CHIP_NUM_5709))
949 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
950 MII_BNX2_BLK_ADDR_GP_STATUS);
951}
952
953static void
954bnx2_disable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
960}
961
Michael Chanb6016b72005-05-26 13:03:09 -0700962static int
Michael Chan605a9e22007-05-03 13:23:13 -0700963bnx2_test_and_enable_2g5(struct bnx2 *bp)
964{
965 u32 up1;
966 int ret = 1;
967
968 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
969 return 0;
970
971 if (bp->autoneg & AUTONEG_SPEED)
972 bp->advertising |= ADVERTISED_2500baseX_Full;
973
Michael Chan27a005b2007-05-03 13:23:41 -0700974 if (CHIP_NUM(bp) == CHIP_NUM_5709)
975 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
976
Michael Chan605a9e22007-05-03 13:23:13 -0700977 bnx2_read_phy(bp, bp->mii_up1, &up1);
978 if (!(up1 & BCM5708S_UP1_2G5)) {
979 up1 |= BCM5708S_UP1_2G5;
980 bnx2_write_phy(bp, bp->mii_up1, up1);
981 ret = 0;
982 }
983
Michael Chan27a005b2007-05-03 13:23:41 -0700984 if (CHIP_NUM(bp) == CHIP_NUM_5709)
985 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987
Michael Chan605a9e22007-05-03 13:23:13 -0700988 return ret;
989}
990
991static int
992bnx2_test_and_disable_2g5(struct bnx2 *bp)
993{
994 u32 up1;
995 int ret = 0;
996
997 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
998 return 0;
999
Michael Chan27a005b2007-05-03 13:23:41 -07001000 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1001 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1002
Michael Chan605a9e22007-05-03 13:23:13 -07001003 bnx2_read_phy(bp, bp->mii_up1, &up1);
1004 if (up1 & BCM5708S_UP1_2G5) {
1005 up1 &= ~BCM5708S_UP1_2G5;
1006 bnx2_write_phy(bp, bp->mii_up1, up1);
1007 ret = 1;
1008 }
1009
Michael Chan27a005b2007-05-03 13:23:41 -07001010 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1011 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1012 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1013
Michael Chan605a9e22007-05-03 13:23:13 -07001014 return ret;
1015}
1016
1017static void
1018bnx2_enable_forced_2g5(struct bnx2 *bp)
1019{
1020 u32 bmcr;
1021
1022 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1023 return;
1024
Michael Chan27a005b2007-05-03 13:23:41 -07001025 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1026 u32 val;
1027
1028 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1029 MII_BNX2_BLK_ADDR_SERDES_DIG);
1030 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1031 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1032 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1033 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1034
1035 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1036 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1037 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1038
1039 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001040 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1041 bmcr |= BCM5708S_BMCR_FORCE_2500;
1042 }
1043
1044 if (bp->autoneg & AUTONEG_SPEED) {
1045 bmcr &= ~BMCR_ANENABLE;
1046 if (bp->req_duplex == DUPLEX_FULL)
1047 bmcr |= BMCR_FULLDPLX;
1048 }
1049 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1050}
1051
1052static void
1053bnx2_disable_forced_2g5(struct bnx2 *bp)
1054{
1055 u32 bmcr;
1056
1057 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1058 return;
1059
Michael Chan27a005b2007-05-03 13:23:41 -07001060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1061 u32 val;
1062
1063 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064 MII_BNX2_BLK_ADDR_SERDES_DIG);
1065 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066 val &= ~MII_BNX2_SD_MISC1_FORCE;
1067 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1068
1069 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1070 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1071 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1072
1073 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001074 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1075 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1076 }
1077
1078 if (bp->autoneg & AUTONEG_SPEED)
1079 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1080 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1081}
1082
1083static int
Michael Chanb6016b72005-05-26 13:03:09 -07001084bnx2_set_link(struct bnx2 *bp)
1085{
1086 u32 bmsr;
1087 u8 link_up;
1088
Michael Chan80be4432006-11-19 14:07:28 -08001089 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001090 bp->link_up = 1;
1091 return 0;
1092 }
1093
1094 link_up = bp->link_up;
1095
Michael Chan27a005b2007-05-03 13:23:41 -07001096 bnx2_enable_bmsr1(bp);
1097 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1098 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1099 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001100
1101 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1102 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1103 u32 val;
1104
1105 val = REG_RD(bp, BNX2_EMAC_STATUS);
1106 if (val & BNX2_EMAC_STATUS_LINK)
1107 bmsr |= BMSR_LSTATUS;
1108 else
1109 bmsr &= ~BMSR_LSTATUS;
1110 }
1111
1112 if (bmsr & BMSR_LSTATUS) {
1113 bp->link_up = 1;
1114
1115 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001116 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1117 bnx2_5706s_linkup(bp);
1118 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1119 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001120 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1121 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001122 }
1123 else {
1124 bnx2_copper_linkup(bp);
1125 }
1126 bnx2_resolve_flow_ctrl(bp);
1127 }
1128 else {
1129 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001130 (bp->autoneg & AUTONEG_SPEED))
1131 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001132
Michael Chanb6016b72005-05-26 13:03:09 -07001133 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1134 bp->link_up = 0;
1135 }
1136
1137 if (bp->link_up != link_up) {
1138 bnx2_report_link(bp);
1139 }
1140
1141 bnx2_set_mac_link(bp);
1142
1143 return 0;
1144}
1145
1146static int
1147bnx2_reset_phy(struct bnx2 *bp)
1148{
1149 int i;
1150 u32 reg;
1151
Michael Chanca58c3a2007-05-03 13:22:52 -07001152 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001153
1154#define PHY_RESET_MAX_WAIT 100
1155 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1156 udelay(10);
1157
Michael Chanca58c3a2007-05-03 13:22:52 -07001158 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001159 if (!(reg & BMCR_RESET)) {
1160 udelay(20);
1161 break;
1162 }
1163 }
1164 if (i == PHY_RESET_MAX_WAIT) {
1165 return -EBUSY;
1166 }
1167 return 0;
1168}
1169
1170static u32
1171bnx2_phy_get_pause_adv(struct bnx2 *bp)
1172{
1173 u32 adv = 0;
1174
1175 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1176 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1177
1178 if (bp->phy_flags & PHY_SERDES_FLAG) {
1179 adv = ADVERTISE_1000XPAUSE;
1180 }
1181 else {
1182 adv = ADVERTISE_PAUSE_CAP;
1183 }
1184 }
1185 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1186 if (bp->phy_flags & PHY_SERDES_FLAG) {
1187 adv = ADVERTISE_1000XPSE_ASYM;
1188 }
1189 else {
1190 adv = ADVERTISE_PAUSE_ASYM;
1191 }
1192 }
1193 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1194 if (bp->phy_flags & PHY_SERDES_FLAG) {
1195 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1196 }
1197 else {
1198 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1199 }
1200 }
1201 return adv;
1202}
1203
1204static int
1205bnx2_setup_serdes_phy(struct bnx2 *bp)
1206{
Michael Chan605a9e22007-05-03 13:23:13 -07001207 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001208 u32 new_adv = 0;
1209
1210 if (!(bp->autoneg & AUTONEG_SPEED)) {
1211 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001212 int force_link_down = 0;
1213
Michael Chan605a9e22007-05-03 13:23:13 -07001214 if (bp->req_line_speed == SPEED_2500) {
1215 if (!bnx2_test_and_enable_2g5(bp))
1216 force_link_down = 1;
1217 } else if (bp->req_line_speed == SPEED_1000) {
1218 if (bnx2_test_and_disable_2g5(bp))
1219 force_link_down = 1;
1220 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001221 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001222 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1223
Michael Chanca58c3a2007-05-03 13:22:52 -07001224 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001225 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001226 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001227
Michael Chan27a005b2007-05-03 13:23:41 -07001228 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1229 if (bp->req_line_speed == SPEED_2500)
1230 bnx2_enable_forced_2g5(bp);
1231 else if (bp->req_line_speed == SPEED_1000) {
1232 bnx2_disable_forced_2g5(bp);
1233 new_bmcr &= ~0x2000;
1234 }
1235
1236 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001237 if (bp->req_line_speed == SPEED_2500)
1238 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1239 else
1240 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001241 }
1242
Michael Chanb6016b72005-05-26 13:03:09 -07001243 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001244 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001245 new_bmcr |= BMCR_FULLDPLX;
1246 }
1247 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001248 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001249 new_bmcr &= ~BMCR_FULLDPLX;
1250 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001251 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001252 /* Force a link down visible on the other side */
1253 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001254 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001255 ~(ADVERTISE_1000XFULL |
1256 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001257 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001258 BMCR_ANRESTART | BMCR_ANENABLE);
1259
1260 bp->link_up = 0;
1261 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001262 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001263 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001264 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001265 bnx2_write_phy(bp, bp->mii_adv, adv);
1266 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001267 } else {
1268 bnx2_resolve_flow_ctrl(bp);
1269 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001270 }
1271 return 0;
1272 }
1273
Michael Chan605a9e22007-05-03 13:23:13 -07001274 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001275
Michael Chanb6016b72005-05-26 13:03:09 -07001276 if (bp->advertising & ADVERTISED_1000baseT_Full)
1277 new_adv |= ADVERTISE_1000XFULL;
1278
1279 new_adv |= bnx2_phy_get_pause_adv(bp);
1280
Michael Chanca58c3a2007-05-03 13:22:52 -07001281 bnx2_read_phy(bp, bp->mii_adv, &adv);
1282 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001283
1284 bp->serdes_an_pending = 0;
1285 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1286 /* Force a link down visible on the other side */
1287 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001288 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001289 spin_unlock_bh(&bp->phy_lock);
1290 msleep(20);
1291 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001292 }
1293
Michael Chanca58c3a2007-05-03 13:22:52 -07001294 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1295 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001296 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001297 /* Speed up link-up time when the link partner
1298 * does not autonegotiate which is very common
1299 * in blade servers. Some blade servers use
1300 * IPMI for kerboard input and it's important
1301 * to minimize link disruptions. Autoneg. involves
1302 * exchanging base pages plus 3 next pages and
1303 * normally completes in about 120 msec.
1304 */
1305 bp->current_interval = SERDES_AN_TIMEOUT;
1306 bp->serdes_an_pending = 1;
1307 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001308 } else {
1309 bnx2_resolve_flow_ctrl(bp);
1310 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001311 }
1312
1313 return 0;
1314}
1315
1316#define ETHTOOL_ALL_FIBRE_SPEED \
1317 (ADVERTISED_1000baseT_Full)
1318
1319#define ETHTOOL_ALL_COPPER_SPEED \
1320 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1321 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1322 ADVERTISED_1000baseT_Full)
1323
1324#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1325 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001326
Michael Chanb6016b72005-05-26 13:03:09 -07001327#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1328
1329static int
1330bnx2_setup_copper_phy(struct bnx2 *bp)
1331{
1332 u32 bmcr;
1333 u32 new_bmcr;
1334
Michael Chanca58c3a2007-05-03 13:22:52 -07001335 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001336
1337 if (bp->autoneg & AUTONEG_SPEED) {
1338 u32 adv_reg, adv1000_reg;
1339 u32 new_adv_reg = 0;
1340 u32 new_adv1000_reg = 0;
1341
Michael Chanca58c3a2007-05-03 13:22:52 -07001342 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001343 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1344 ADVERTISE_PAUSE_ASYM);
1345
1346 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1347 adv1000_reg &= PHY_ALL_1000_SPEED;
1348
1349 if (bp->advertising & ADVERTISED_10baseT_Half)
1350 new_adv_reg |= ADVERTISE_10HALF;
1351 if (bp->advertising & ADVERTISED_10baseT_Full)
1352 new_adv_reg |= ADVERTISE_10FULL;
1353 if (bp->advertising & ADVERTISED_100baseT_Half)
1354 new_adv_reg |= ADVERTISE_100HALF;
1355 if (bp->advertising & ADVERTISED_100baseT_Full)
1356 new_adv_reg |= ADVERTISE_100FULL;
1357 if (bp->advertising & ADVERTISED_1000baseT_Full)
1358 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001359
Michael Chanb6016b72005-05-26 13:03:09 -07001360 new_adv_reg |= ADVERTISE_CSMA;
1361
1362 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1363
1364 if ((adv1000_reg != new_adv1000_reg) ||
1365 (adv_reg != new_adv_reg) ||
1366 ((bmcr & BMCR_ANENABLE) == 0)) {
1367
Michael Chanca58c3a2007-05-03 13:22:52 -07001368 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001369 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001370 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001371 BMCR_ANENABLE);
1372 }
1373 else if (bp->link_up) {
1374 /* Flow ctrl may have changed from auto to forced */
1375 /* or vice-versa. */
1376
1377 bnx2_resolve_flow_ctrl(bp);
1378 bnx2_set_mac_link(bp);
1379 }
1380 return 0;
1381 }
1382
1383 new_bmcr = 0;
1384 if (bp->req_line_speed == SPEED_100) {
1385 new_bmcr |= BMCR_SPEED100;
1386 }
1387 if (bp->req_duplex == DUPLEX_FULL) {
1388 new_bmcr |= BMCR_FULLDPLX;
1389 }
1390 if (new_bmcr != bmcr) {
1391 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001392
Michael Chanca58c3a2007-05-03 13:22:52 -07001393 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1394 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001395
Michael Chanb6016b72005-05-26 13:03:09 -07001396 if (bmsr & BMSR_LSTATUS) {
1397 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001398 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001399 spin_unlock_bh(&bp->phy_lock);
1400 msleep(50);
1401 spin_lock_bh(&bp->phy_lock);
1402
Michael Chanca58c3a2007-05-03 13:22:52 -07001403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1404 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001405 }
1406
Michael Chanca58c3a2007-05-03 13:22:52 -07001407 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001408
1409 /* Normally, the new speed is setup after the link has
1410 * gone down and up again. In some cases, link will not go
1411 * down so we need to set up the new speed here.
1412 */
1413 if (bmsr & BMSR_LSTATUS) {
1414 bp->line_speed = bp->req_line_speed;
1415 bp->duplex = bp->req_duplex;
1416 bnx2_resolve_flow_ctrl(bp);
1417 bnx2_set_mac_link(bp);
1418 }
Michael Chan27a005b2007-05-03 13:23:41 -07001419 } else {
1420 bnx2_resolve_flow_ctrl(bp);
1421 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001422 }
1423 return 0;
1424}
1425
1426static int
1427bnx2_setup_phy(struct bnx2 *bp)
1428{
1429 if (bp->loopback == MAC_LOOPBACK)
1430 return 0;
1431
1432 if (bp->phy_flags & PHY_SERDES_FLAG) {
1433 return (bnx2_setup_serdes_phy(bp));
1434 }
1435 else {
1436 return (bnx2_setup_copper_phy(bp));
1437 }
1438}
1439
1440static int
Michael Chan27a005b2007-05-03 13:23:41 -07001441bnx2_init_5709s_phy(struct bnx2 *bp)
1442{
1443 u32 val;
1444
1445 bp->mii_bmcr = MII_BMCR + 0x10;
1446 bp->mii_bmsr = MII_BMSR + 0x10;
1447 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1448 bp->mii_adv = MII_ADVERTISE + 0x10;
1449 bp->mii_lpa = MII_LPA + 0x10;
1450 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1451
1452 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1453 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1454
1455 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1456 bnx2_reset_phy(bp);
1457
1458 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1459
1460 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1461 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1462 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1463 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1464
1465 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1466 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1467 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1468 val |= BCM5708S_UP1_2G5;
1469 else
1470 val &= ~BCM5708S_UP1_2G5;
1471 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1472
1473 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1474 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1475 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1476 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1477
1478 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1479
1480 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1481 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1482 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1483
1484 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1485
1486 return 0;
1487}
1488
1489static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001490bnx2_init_5708s_phy(struct bnx2 *bp)
1491{
1492 u32 val;
1493
Michael Chan27a005b2007-05-03 13:23:41 -07001494 bnx2_reset_phy(bp);
1495
1496 bp->mii_up1 = BCM5708S_UP1;
1497
Michael Chan5b0c76a2005-11-04 08:45:49 -08001498 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1499 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1500 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1501
1502 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1503 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1504 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1505
1506 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1507 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1508 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1509
1510 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1511 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1512 val |= BCM5708S_UP1_2G5;
1513 bnx2_write_phy(bp, BCM5708S_UP1, val);
1514 }
1515
1516 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001517 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1518 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001519 /* increase tx signal amplitude */
1520 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1521 BCM5708S_BLK_ADDR_TX_MISC);
1522 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1523 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1524 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1525 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1526 }
1527
Michael Chane3648b32005-11-04 08:51:21 -08001528 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001529 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1530
1531 if (val) {
1532 u32 is_backplane;
1533
Michael Chane3648b32005-11-04 08:51:21 -08001534 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001535 BNX2_SHARED_HW_CFG_CONFIG);
1536 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1537 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1538 BCM5708S_BLK_ADDR_TX_MISC);
1539 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1540 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1541 BCM5708S_BLK_ADDR_DIG);
1542 }
1543 }
1544 return 0;
1545}
1546
1547static int
1548bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001549{
Michael Chan27a005b2007-05-03 13:23:41 -07001550 bnx2_reset_phy(bp);
1551
Michael Chanb6016b72005-05-26 13:03:09 -07001552 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1553
Michael Chan59b47d82006-11-19 14:10:45 -08001554 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1555 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001556
1557 if (bp->dev->mtu > 1500) {
1558 u32 val;
1559
1560 /* Set extended packet length bit */
1561 bnx2_write_phy(bp, 0x18, 0x7);
1562 bnx2_read_phy(bp, 0x18, &val);
1563 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1564
1565 bnx2_write_phy(bp, 0x1c, 0x6c00);
1566 bnx2_read_phy(bp, 0x1c, &val);
1567 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1568 }
1569 else {
1570 u32 val;
1571
1572 bnx2_write_phy(bp, 0x18, 0x7);
1573 bnx2_read_phy(bp, 0x18, &val);
1574 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1575
1576 bnx2_write_phy(bp, 0x1c, 0x6c00);
1577 bnx2_read_phy(bp, 0x1c, &val);
1578 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1579 }
1580
1581 return 0;
1582}
1583
1584static int
1585bnx2_init_copper_phy(struct bnx2 *bp)
1586{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001587 u32 val;
1588
Michael Chan27a005b2007-05-03 13:23:41 -07001589 bnx2_reset_phy(bp);
1590
Michael Chanb6016b72005-05-26 13:03:09 -07001591 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1592 bnx2_write_phy(bp, 0x18, 0x0c00);
1593 bnx2_write_phy(bp, 0x17, 0x000a);
1594 bnx2_write_phy(bp, 0x15, 0x310b);
1595 bnx2_write_phy(bp, 0x17, 0x201f);
1596 bnx2_write_phy(bp, 0x15, 0x9506);
1597 bnx2_write_phy(bp, 0x17, 0x401f);
1598 bnx2_write_phy(bp, 0x15, 0x14e2);
1599 bnx2_write_phy(bp, 0x18, 0x0400);
1600 }
1601
Michael Chanb659f442007-02-02 00:46:35 -08001602 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1603 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1604 MII_BNX2_DSP_EXPAND_REG | 0x8);
1605 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1606 val &= ~(1 << 8);
1607 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1608 }
1609
Michael Chanb6016b72005-05-26 13:03:09 -07001610 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001611 /* Set extended packet length bit */
1612 bnx2_write_phy(bp, 0x18, 0x7);
1613 bnx2_read_phy(bp, 0x18, &val);
1614 bnx2_write_phy(bp, 0x18, val | 0x4000);
1615
1616 bnx2_read_phy(bp, 0x10, &val);
1617 bnx2_write_phy(bp, 0x10, val | 0x1);
1618 }
1619 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001620 bnx2_write_phy(bp, 0x18, 0x7);
1621 bnx2_read_phy(bp, 0x18, &val);
1622 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1623
1624 bnx2_read_phy(bp, 0x10, &val);
1625 bnx2_write_phy(bp, 0x10, val & ~0x1);
1626 }
1627
Michael Chan5b0c76a2005-11-04 08:45:49 -08001628 /* ethernet@wirespeed */
1629 bnx2_write_phy(bp, 0x18, 0x7007);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001632 return 0;
1633}
1634
1635
1636static int
1637bnx2_init_phy(struct bnx2 *bp)
1638{
1639 u32 val;
1640 int rc = 0;
1641
1642 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1643 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1644
Michael Chanca58c3a2007-05-03 13:22:52 -07001645 bp->mii_bmcr = MII_BMCR;
1646 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001647 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001648 bp->mii_adv = MII_ADVERTISE;
1649 bp->mii_lpa = MII_LPA;
1650
Michael Chanb6016b72005-05-26 13:03:09 -07001651 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1652
Michael Chanb6016b72005-05-26 13:03:09 -07001653 bnx2_read_phy(bp, MII_PHYSID1, &val);
1654 bp->phy_id = val << 16;
1655 bnx2_read_phy(bp, MII_PHYSID2, &val);
1656 bp->phy_id |= val & 0xffff;
1657
1658 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001659 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1660 rc = bnx2_init_5706s_phy(bp);
1661 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1662 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001663 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1664 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001665 }
1666 else {
1667 rc = bnx2_init_copper_phy(bp);
1668 }
1669
1670 bnx2_setup_phy(bp);
1671
1672 return rc;
1673}
1674
1675static int
1676bnx2_set_mac_loopback(struct bnx2 *bp)
1677{
1678 u32 mac_mode;
1679
1680 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1681 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1682 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1683 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1684 bp->link_up = 1;
1685 return 0;
1686}
1687
Michael Chanbc5a0692006-01-23 16:13:22 -08001688static int bnx2_test_link(struct bnx2 *);
1689
1690static int
1691bnx2_set_phy_loopback(struct bnx2 *bp)
1692{
1693 u32 mac_mode;
1694 int rc, i;
1695
1696 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001697 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001698 BMCR_SPEED1000);
1699 spin_unlock_bh(&bp->phy_lock);
1700 if (rc)
1701 return rc;
1702
1703 for (i = 0; i < 10; i++) {
1704 if (bnx2_test_link(bp) == 0)
1705 break;
Michael Chan80be4432006-11-19 14:07:28 -08001706 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001707 }
1708
1709 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1710 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1711 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001712 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001713
1714 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1715 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1716 bp->link_up = 1;
1717 return 0;
1718}
1719
Michael Chanb6016b72005-05-26 13:03:09 -07001720static int
Michael Chanb090ae22006-01-23 16:07:10 -08001721bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001722{
1723 int i;
1724 u32 val;
1725
Michael Chanb6016b72005-05-26 13:03:09 -07001726 bp->fw_wr_seq++;
1727 msg_data |= bp->fw_wr_seq;
1728
Michael Chane3648b32005-11-04 08:51:21 -08001729 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001730
1731 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001732 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1733 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001734
Michael Chane3648b32005-11-04 08:51:21 -08001735 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001736
1737 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1738 break;
1739 }
Michael Chanb090ae22006-01-23 16:07:10 -08001740 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1741 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001742
1743 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001744 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1745 if (!silent)
1746 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1747 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001748
1749 msg_data &= ~BNX2_DRV_MSG_CODE;
1750 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1751
Michael Chane3648b32005-11-04 08:51:21 -08001752 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001753
Michael Chanb6016b72005-05-26 13:03:09 -07001754 return -EBUSY;
1755 }
1756
Michael Chanb090ae22006-01-23 16:07:10 -08001757 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1758 return -EIO;
1759
Michael Chanb6016b72005-05-26 13:03:09 -07001760 return 0;
1761}
1762
Michael Chan59b47d82006-11-19 14:10:45 -08001763static int
1764bnx2_init_5709_context(struct bnx2 *bp)
1765{
1766 int i, ret = 0;
1767 u32 val;
1768
1769 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1770 val |= (BCM_PAGE_BITS - 8) << 16;
1771 REG_WR(bp, BNX2_CTX_COMMAND, val);
1772 for (i = 0; i < bp->ctx_pages; i++) {
1773 int j;
1774
1775 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1776 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1777 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1778 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1779 (u64) bp->ctx_blk_mapping[i] >> 32);
1780 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1781 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1782 for (j = 0; j < 10; j++) {
1783
1784 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1785 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1786 break;
1787 udelay(5);
1788 }
1789 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1790 ret = -EBUSY;
1791 break;
1792 }
1793 }
1794 return ret;
1795}
1796
Michael Chanb6016b72005-05-26 13:03:09 -07001797static void
1798bnx2_init_context(struct bnx2 *bp)
1799{
1800 u32 vcid;
1801
1802 vcid = 96;
1803 while (vcid) {
1804 u32 vcid_addr, pcid_addr, offset;
1805
1806 vcid--;
1807
1808 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1809 u32 new_vcid;
1810
1811 vcid_addr = GET_PCID_ADDR(vcid);
1812 if (vcid & 0x8) {
1813 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1814 }
1815 else {
1816 new_vcid = vcid;
1817 }
1818 pcid_addr = GET_PCID_ADDR(new_vcid);
1819 }
1820 else {
1821 vcid_addr = GET_CID_ADDR(vcid);
1822 pcid_addr = vcid_addr;
1823 }
1824
1825 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1826 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1827
1828 /* Zero out the context. */
1829 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1830 CTX_WR(bp, 0x00, offset, 0);
1831 }
1832
1833 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1834 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1835 }
1836}
1837
1838static int
1839bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1840{
1841 u16 *good_mbuf;
1842 u32 good_mbuf_cnt;
1843 u32 val;
1844
1845 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1846 if (good_mbuf == NULL) {
1847 printk(KERN_ERR PFX "Failed to allocate memory in "
1848 "bnx2_alloc_bad_rbuf\n");
1849 return -ENOMEM;
1850 }
1851
1852 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1853 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1854
1855 good_mbuf_cnt = 0;
1856
1857 /* Allocate a bunch of mbufs and save the good ones in an array. */
1858 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1859 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1860 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1861
1862 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1863
1864 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1865
1866 /* The addresses with Bit 9 set are bad memory blocks. */
1867 if (!(val & (1 << 9))) {
1868 good_mbuf[good_mbuf_cnt] = (u16) val;
1869 good_mbuf_cnt++;
1870 }
1871
1872 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1873 }
1874
1875 /* Free the good ones back to the mbuf pool thus discarding
1876 * all the bad ones. */
1877 while (good_mbuf_cnt) {
1878 good_mbuf_cnt--;
1879
1880 val = good_mbuf[good_mbuf_cnt];
1881 val = (val << 9) | val | 1;
1882
1883 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1884 }
1885 kfree(good_mbuf);
1886 return 0;
1887}
1888
1889static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001890bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001891{
1892 u32 val;
1893 u8 *mac_addr = bp->dev->dev_addr;
1894
1895 val = (mac_addr[0] << 8) | mac_addr[1];
1896
1897 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1898
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001899 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001900 (mac_addr[4] << 8) | mac_addr[5];
1901
1902 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1903}
1904
1905static inline int
1906bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1907{
1908 struct sk_buff *skb;
1909 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1910 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001911 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001912 unsigned long align;
1913
Michael Chan932f3772006-08-15 01:39:36 -07001914 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001915 if (skb == NULL) {
1916 return -ENOMEM;
1917 }
1918
Michael Chan59b47d82006-11-19 14:10:45 -08001919 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1920 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001921
Michael Chanb6016b72005-05-26 13:03:09 -07001922 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1923 PCI_DMA_FROMDEVICE);
1924
1925 rx_buf->skb = skb;
1926 pci_unmap_addr_set(rx_buf, mapping, mapping);
1927
1928 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1929 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1930
1931 bp->rx_prod_bseq += bp->rx_buf_use_size;
1932
1933 return 0;
1934}
1935
1936static void
1937bnx2_phy_int(struct bnx2 *bp)
1938{
1939 u32 new_link_state, old_link_state;
1940
1941 new_link_state = bp->status_blk->status_attn_bits &
1942 STATUS_ATTN_BITS_LINK_STATE;
1943 old_link_state = bp->status_blk->status_attn_bits_ack &
1944 STATUS_ATTN_BITS_LINK_STATE;
1945 if (new_link_state != old_link_state) {
1946 if (new_link_state) {
1947 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1948 STATUS_ATTN_BITS_LINK_STATE);
1949 }
1950 else {
1951 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1952 STATUS_ATTN_BITS_LINK_STATE);
1953 }
1954 bnx2_set_link(bp);
1955 }
1956}
1957
1958static void
1959bnx2_tx_int(struct bnx2 *bp)
1960{
Michael Chanf4e418f2005-11-04 08:53:48 -08001961 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001962 u16 hw_cons, sw_cons, sw_ring_cons;
1963 int tx_free_bd = 0;
1964
Michael Chanf4e418f2005-11-04 08:53:48 -08001965 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001966 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1967 hw_cons++;
1968 }
1969 sw_cons = bp->tx_cons;
1970
1971 while (sw_cons != hw_cons) {
1972 struct sw_bd *tx_buf;
1973 struct sk_buff *skb;
1974 int i, last;
1975
1976 sw_ring_cons = TX_RING_IDX(sw_cons);
1977
1978 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1979 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001980
Michael Chanb6016b72005-05-26 13:03:09 -07001981 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001982 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001983 u16 last_idx, last_ring_idx;
1984
1985 last_idx = sw_cons +
1986 skb_shinfo(skb)->nr_frags + 1;
1987 last_ring_idx = sw_ring_cons +
1988 skb_shinfo(skb)->nr_frags + 1;
1989 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1990 last_idx++;
1991 }
1992 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1993 break;
1994 }
1995 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001996
Michael Chanb6016b72005-05-26 13:03:09 -07001997 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1998 skb_headlen(skb), PCI_DMA_TODEVICE);
1999
2000 tx_buf->skb = NULL;
2001 last = skb_shinfo(skb)->nr_frags;
2002
2003 for (i = 0; i < last; i++) {
2004 sw_cons = NEXT_TX_BD(sw_cons);
2005
2006 pci_unmap_page(bp->pdev,
2007 pci_unmap_addr(
2008 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2009 mapping),
2010 skb_shinfo(skb)->frags[i].size,
2011 PCI_DMA_TODEVICE);
2012 }
2013
2014 sw_cons = NEXT_TX_BD(sw_cons);
2015
2016 tx_free_bd += last + 1;
2017
Michael Chan745720e2006-06-29 12:37:41 -07002018 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002019
Michael Chanf4e418f2005-11-04 08:53:48 -08002020 hw_cons = bp->hw_tx_cons =
2021 sblk->status_tx_quick_consumer_index0;
2022
Michael Chanb6016b72005-05-26 13:03:09 -07002023 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2024 hw_cons++;
2025 }
2026 }
2027
Michael Chane89bbf12005-08-25 15:36:58 -07002028 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002029 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2030 * before checking for netif_queue_stopped(). Without the
2031 * memory barrier, there is a small possibility that bnx2_start_xmit()
2032 * will miss it and cause the queue to be stopped forever.
2033 */
2034 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002035
Michael Chan2f8af122006-08-15 01:39:10 -07002036 if (unlikely(netif_queue_stopped(bp->dev)) &&
2037 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2038 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002039 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002040 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002041 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002042 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002043 }
Michael Chanb6016b72005-05-26 13:03:09 -07002044}
2045
2046static inline void
2047bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2048 u16 cons, u16 prod)
2049{
Michael Chan236b6392006-03-20 17:49:02 -08002050 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2051 struct rx_bd *cons_bd, *prod_bd;
2052
2053 cons_rx_buf = &bp->rx_buf_ring[cons];
2054 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002055
2056 pci_dma_sync_single_for_device(bp->pdev,
2057 pci_unmap_addr(cons_rx_buf, mapping),
2058 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2059
Michael Chan236b6392006-03-20 17:49:02 -08002060 bp->rx_prod_bseq += bp->rx_buf_use_size;
2061
2062 prod_rx_buf->skb = skb;
2063
2064 if (cons == prod)
2065 return;
2066
Michael Chanb6016b72005-05-26 13:03:09 -07002067 pci_unmap_addr_set(prod_rx_buf, mapping,
2068 pci_unmap_addr(cons_rx_buf, mapping));
2069
Michael Chan3fdfcc22006-03-20 17:49:49 -08002070 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2071 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002072 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2073 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002074}
2075
2076static int
2077bnx2_rx_int(struct bnx2 *bp, int budget)
2078{
Michael Chanf4e418f2005-11-04 08:53:48 -08002079 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002080 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2081 struct l2_fhdr *rx_hdr;
2082 int rx_pkt = 0;
2083
Michael Chanf4e418f2005-11-04 08:53:48 -08002084 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002085 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2086 hw_cons++;
2087 }
2088 sw_cons = bp->rx_cons;
2089 sw_prod = bp->rx_prod;
2090
2091 /* Memory barrier necessary as speculative reads of the rx
2092 * buffer can be ahead of the index in the status block
2093 */
2094 rmb();
2095 while (sw_cons != hw_cons) {
2096 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002097 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002098 struct sw_bd *rx_buf;
2099 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002100 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002101
2102 sw_ring_cons = RX_RING_IDX(sw_cons);
2103 sw_ring_prod = RX_RING_IDX(sw_prod);
2104
2105 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2106 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002107
2108 rx_buf->skb = NULL;
2109
2110 dma_addr = pci_unmap_addr(rx_buf, mapping);
2111
2112 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002113 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2114
2115 rx_hdr = (struct l2_fhdr *) skb->data;
2116 len = rx_hdr->l2_fhdr_pkt_len - 4;
2117
Michael Chanade2bfe2006-01-23 16:09:51 -08002118 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002119 (L2_FHDR_ERRORS_BAD_CRC |
2120 L2_FHDR_ERRORS_PHY_DECODE |
2121 L2_FHDR_ERRORS_ALIGNMENT |
2122 L2_FHDR_ERRORS_TOO_SHORT |
2123 L2_FHDR_ERRORS_GIANT_FRAME)) {
2124
2125 goto reuse_rx;
2126 }
2127
2128 /* Since we don't have a jumbo ring, copy small packets
2129 * if mtu > 1500
2130 */
2131 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2132 struct sk_buff *new_skb;
2133
Michael Chan932f3772006-08-15 01:39:36 -07002134 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002135 if (new_skb == NULL)
2136 goto reuse_rx;
2137
2138 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002139 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2140 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002141 skb_reserve(new_skb, 2);
2142 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002143
2144 bnx2_reuse_rx_skb(bp, skb,
2145 sw_ring_cons, sw_ring_prod);
2146
2147 skb = new_skb;
2148 }
2149 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002150 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002151 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2152
2153 skb_reserve(skb, bp->rx_offset);
2154 skb_put(skb, len);
2155 }
2156 else {
2157reuse_rx:
2158 bnx2_reuse_rx_skb(bp, skb,
2159 sw_ring_cons, sw_ring_prod);
2160 goto next_rx;
2161 }
2162
2163 skb->protocol = eth_type_trans(skb, bp->dev);
2164
2165 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002166 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002167
Michael Chan745720e2006-06-29 12:37:41 -07002168 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002169 goto next_rx;
2170
2171 }
2172
Michael Chanb6016b72005-05-26 13:03:09 -07002173 skb->ip_summed = CHECKSUM_NONE;
2174 if (bp->rx_csum &&
2175 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2176 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2177
Michael Chanade2bfe2006-01-23 16:09:51 -08002178 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2179 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002180 skb->ip_summed = CHECKSUM_UNNECESSARY;
2181 }
2182
2183#ifdef BCM_VLAN
2184 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2185 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2186 rx_hdr->l2_fhdr_vlan_tag);
2187 }
2188 else
2189#endif
2190 netif_receive_skb(skb);
2191
2192 bp->dev->last_rx = jiffies;
2193 rx_pkt++;
2194
2195next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002196 sw_cons = NEXT_RX_BD(sw_cons);
2197 sw_prod = NEXT_RX_BD(sw_prod);
2198
2199 if ((rx_pkt == budget))
2200 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002201
2202 /* Refresh hw_cons to see if there is new work */
2203 if (sw_cons == hw_cons) {
2204 hw_cons = bp->hw_rx_cons =
2205 sblk->status_rx_quick_consumer_index0;
2206 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2207 hw_cons++;
2208 rmb();
2209 }
Michael Chanb6016b72005-05-26 13:03:09 -07002210 }
2211 bp->rx_cons = sw_cons;
2212 bp->rx_prod = sw_prod;
2213
2214 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2215
2216 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2217
2218 mmiowb();
2219
2220 return rx_pkt;
2221
2222}
2223
2224/* MSI ISR - The only difference between this and the INTx ISR
2225 * is that the MSI interrupt is always serviced.
2226 */
2227static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002228bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002229{
2230 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002231 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002232
Michael Chanc921e4c2005-09-08 13:15:32 -07002233 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002234 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2235 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2236 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2237
2238 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002239 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2240 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002241
Michael Chan73eef4c2005-08-25 15:39:15 -07002242 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002243
Michael Chan73eef4c2005-08-25 15:39:15 -07002244 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002245}
2246
2247static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002248bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002249{
2250 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002251 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002252
2253 /* When using INTx, it is possible for the interrupt to arrive
2254 * at the CPU before the status block posted prior to the
2255 * interrupt. Reading a register will flush the status block.
2256 * When using MSI, the MSI message will always complete after
2257 * the status block write.
2258 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002259 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002260 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2261 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002262 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002263
2264 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2265 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2266 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2267
2268 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002269 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2270 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002271
Michael Chan73eef4c2005-08-25 15:39:15 -07002272 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002273
Michael Chan73eef4c2005-08-25 15:39:15 -07002274 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002275}
2276
Michael Chanf4e418f2005-11-04 08:53:48 -08002277static inline int
2278bnx2_has_work(struct bnx2 *bp)
2279{
2280 struct status_block *sblk = bp->status_blk;
2281
2282 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2283 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2284 return 1;
2285
Michael Chandb8b2252007-03-28 14:17:36 -07002286 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2287 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
Michael Chanf4e418f2005-11-04 08:53:48 -08002288 return 1;
2289
2290 return 0;
2291}
2292
Michael Chanb6016b72005-05-26 13:03:09 -07002293static int
2294bnx2_poll(struct net_device *dev, int *budget)
2295{
Michael Chan972ec0d2006-01-23 16:12:43 -08002296 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002297
Michael Chanb6016b72005-05-26 13:03:09 -07002298 if ((bp->status_blk->status_attn_bits &
2299 STATUS_ATTN_BITS_LINK_STATE) !=
2300 (bp->status_blk->status_attn_bits_ack &
2301 STATUS_ATTN_BITS_LINK_STATE)) {
2302
Michael Chanc770a652005-08-25 15:38:39 -07002303 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002304 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002305 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002306
2307 /* This is needed to take care of transient status
2308 * during link changes.
2309 */
2310 REG_WR(bp, BNX2_HC_COMMAND,
2311 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2312 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002313 }
2314
Michael Chanf4e418f2005-11-04 08:53:48 -08002315 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002316 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002317
Michael Chanf4e418f2005-11-04 08:53:48 -08002318 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002319 int orig_budget = *budget;
2320 int work_done;
2321
2322 if (orig_budget > dev->quota)
2323 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002324
Michael Chanb6016b72005-05-26 13:03:09 -07002325 work_done = bnx2_rx_int(bp, orig_budget);
2326 *budget -= work_done;
2327 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002328 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002329
Michael Chanf4e418f2005-11-04 08:53:48 -08002330 bp->last_status_idx = bp->status_blk->status_idx;
2331 rmb();
2332
2333 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002334 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002335 if (likely(bp->flags & USING_MSI_FLAG)) {
2336 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2337 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2338 bp->last_status_idx);
2339 return 0;
2340 }
Michael Chanb6016b72005-05-26 13:03:09 -07002341 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002342 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2343 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2344 bp->last_status_idx);
2345
2346 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2347 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2348 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002349 return 0;
2350 }
2351
2352 return 1;
2353}
2354
Herbert Xu932ff272006-06-09 12:20:56 -07002355/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002356 * from set_multicast.
2357 */
2358static void
2359bnx2_set_rx_mode(struct net_device *dev)
2360{
Michael Chan972ec0d2006-01-23 16:12:43 -08002361 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002362 u32 rx_mode, sort_mode;
2363 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002364
Michael Chanc770a652005-08-25 15:38:39 -07002365 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002366
2367 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2368 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2369 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2370#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002371 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002372 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002373#else
Michael Chane29054f2006-01-23 16:06:06 -08002374 if (!(bp->flags & ASF_ENABLE_FLAG))
2375 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002376#endif
2377 if (dev->flags & IFF_PROMISC) {
2378 /* Promiscuous mode. */
2379 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002380 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2381 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002382 }
2383 else if (dev->flags & IFF_ALLMULTI) {
2384 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2385 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2386 0xffffffff);
2387 }
2388 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2389 }
2390 else {
2391 /* Accept one or more multicast(s). */
2392 struct dev_mc_list *mclist;
2393 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2394 u32 regidx;
2395 u32 bit;
2396 u32 crc;
2397
2398 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2399
2400 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2401 i++, mclist = mclist->next) {
2402
2403 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2404 bit = crc & 0xff;
2405 regidx = (bit & 0xe0) >> 5;
2406 bit &= 0x1f;
2407 mc_filter[regidx] |= (1 << bit);
2408 }
2409
2410 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2411 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2412 mc_filter[i]);
2413 }
2414
2415 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2416 }
2417
2418 if (rx_mode != bp->rx_mode) {
2419 bp->rx_mode = rx_mode;
2420 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2421 }
2422
2423 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2424 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2425 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2426
Michael Chanc770a652005-08-25 15:38:39 -07002427 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002428}
2429
Michael Chanfba9fe92006-06-12 22:21:25 -07002430#define FW_BUF_SIZE 0x8000
2431
2432static int
2433bnx2_gunzip_init(struct bnx2 *bp)
2434{
2435 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2436 goto gunzip_nomem1;
2437
2438 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2439 goto gunzip_nomem2;
2440
2441 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2442 if (bp->strm->workspace == NULL)
2443 goto gunzip_nomem3;
2444
2445 return 0;
2446
2447gunzip_nomem3:
2448 kfree(bp->strm);
2449 bp->strm = NULL;
2450
2451gunzip_nomem2:
2452 vfree(bp->gunzip_buf);
2453 bp->gunzip_buf = NULL;
2454
2455gunzip_nomem1:
2456 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2457 "uncompression.\n", bp->dev->name);
2458 return -ENOMEM;
2459}
2460
2461static void
2462bnx2_gunzip_end(struct bnx2 *bp)
2463{
2464 kfree(bp->strm->workspace);
2465
2466 kfree(bp->strm);
2467 bp->strm = NULL;
2468
2469 if (bp->gunzip_buf) {
2470 vfree(bp->gunzip_buf);
2471 bp->gunzip_buf = NULL;
2472 }
2473}
2474
2475static int
2476bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2477{
2478 int n, rc;
2479
2480 /* check gzip header */
2481 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2482 return -EINVAL;
2483
2484 n = 10;
2485
2486#define FNAME 0x8
2487 if (zbuf[3] & FNAME)
2488 while ((zbuf[n++] != 0) && (n < len));
2489
2490 bp->strm->next_in = zbuf + n;
2491 bp->strm->avail_in = len - n;
2492 bp->strm->next_out = bp->gunzip_buf;
2493 bp->strm->avail_out = FW_BUF_SIZE;
2494
2495 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2496 if (rc != Z_OK)
2497 return rc;
2498
2499 rc = zlib_inflate(bp->strm, Z_FINISH);
2500
2501 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2502 *outbuf = bp->gunzip_buf;
2503
2504 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2505 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2506 bp->dev->name, bp->strm->msg);
2507
2508 zlib_inflateEnd(bp->strm);
2509
2510 if (rc == Z_STREAM_END)
2511 return 0;
2512
2513 return rc;
2514}
2515
Michael Chanb6016b72005-05-26 13:03:09 -07002516static void
2517load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2518 u32 rv2p_proc)
2519{
2520 int i;
2521 u32 val;
2522
2523
2524 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002525 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002526 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002527 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002528 rv2p_code++;
2529
2530 if (rv2p_proc == RV2P_PROC1) {
2531 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2532 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2533 }
2534 else {
2535 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2536 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2537 }
2538 }
2539
2540 /* Reset the processor, un-stall is done later. */
2541 if (rv2p_proc == RV2P_PROC1) {
2542 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2543 }
2544 else {
2545 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2546 }
2547}
2548
Michael Chanaf3ee512006-11-19 14:09:25 -08002549static int
Michael Chanb6016b72005-05-26 13:03:09 -07002550load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2551{
2552 u32 offset;
2553 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002554 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002555
2556 /* Halt the CPU. */
2557 val = REG_RD_IND(bp, cpu_reg->mode);
2558 val |= cpu_reg->mode_value_halt;
2559 REG_WR_IND(bp, cpu_reg->mode, val);
2560 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2561
2562 /* Load the Text area. */
2563 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002564 if (fw->gz_text) {
2565 u32 text_len;
2566 void *text;
2567
2568 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2569 &text_len);
2570 if (rc)
2571 return rc;
2572
2573 fw->text = text;
2574 }
2575 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002576 int j;
2577
2578 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002579 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002580 }
2581 }
2582
2583 /* Load the Data area. */
2584 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2585 if (fw->data) {
2586 int j;
2587
2588 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2589 REG_WR_IND(bp, offset, fw->data[j]);
2590 }
2591 }
2592
2593 /* Load the SBSS area. */
2594 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2595 if (fw->sbss) {
2596 int j;
2597
2598 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2599 REG_WR_IND(bp, offset, fw->sbss[j]);
2600 }
2601 }
2602
2603 /* Load the BSS area. */
2604 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2605 if (fw->bss) {
2606 int j;
2607
2608 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2609 REG_WR_IND(bp, offset, fw->bss[j]);
2610 }
2611 }
2612
2613 /* Load the Read-Only area. */
2614 offset = cpu_reg->spad_base +
2615 (fw->rodata_addr - cpu_reg->mips_view_base);
2616 if (fw->rodata) {
2617 int j;
2618
2619 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2620 REG_WR_IND(bp, offset, fw->rodata[j]);
2621 }
2622 }
2623
2624 /* Clear the pre-fetch instruction. */
2625 REG_WR_IND(bp, cpu_reg->inst, 0);
2626 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2627
2628 /* Start the CPU. */
2629 val = REG_RD_IND(bp, cpu_reg->mode);
2630 val &= ~cpu_reg->mode_value_halt;
2631 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2632 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002633
2634 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002635}
2636
Michael Chanfba9fe92006-06-12 22:21:25 -07002637static int
Michael Chanb6016b72005-05-26 13:03:09 -07002638bnx2_init_cpus(struct bnx2 *bp)
2639{
2640 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002641 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002642 int rc = 0;
2643 void *text;
2644 u32 text_len;
2645
2646 if ((rc = bnx2_gunzip_init(bp)) != 0)
2647 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002648
2649 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002650 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2651 &text_len);
2652 if (rc)
2653 goto init_cpu_err;
2654
2655 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2656
2657 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2658 &text_len);
2659 if (rc)
2660 goto init_cpu_err;
2661
2662 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002663
2664 /* Initialize the RX Processor. */
2665 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2666 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2667 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2668 cpu_reg.state = BNX2_RXP_CPU_STATE;
2669 cpu_reg.state_value_clear = 0xffffff;
2670 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2671 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2672 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2673 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2674 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2675 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2676 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002677
Michael Chand43584c2006-11-19 14:14:35 -08002678 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2679 fw = &bnx2_rxp_fw_09;
2680 else
2681 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002682
Michael Chanaf3ee512006-11-19 14:09:25 -08002683 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002684 if (rc)
2685 goto init_cpu_err;
2686
Michael Chanb6016b72005-05-26 13:03:09 -07002687 /* Initialize the TX Processor. */
2688 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2689 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2690 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2691 cpu_reg.state = BNX2_TXP_CPU_STATE;
2692 cpu_reg.state_value_clear = 0xffffff;
2693 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2694 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2695 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2696 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2697 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2698 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2699 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002700
Michael Chand43584c2006-11-19 14:14:35 -08002701 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2702 fw = &bnx2_txp_fw_09;
2703 else
2704 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002705
Michael Chanaf3ee512006-11-19 14:09:25 -08002706 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002707 if (rc)
2708 goto init_cpu_err;
2709
Michael Chanb6016b72005-05-26 13:03:09 -07002710 /* Initialize the TX Patch-up Processor. */
2711 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2712 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2713 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2714 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2715 cpu_reg.state_value_clear = 0xffffff;
2716 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2717 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2718 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2719 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2720 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2721 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2722 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002723
Michael Chand43584c2006-11-19 14:14:35 -08002724 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2725 fw = &bnx2_tpat_fw_09;
2726 else
2727 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002728
Michael Chanaf3ee512006-11-19 14:09:25 -08002729 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002730 if (rc)
2731 goto init_cpu_err;
2732
Michael Chanb6016b72005-05-26 13:03:09 -07002733 /* Initialize the Completion Processor. */
2734 cpu_reg.mode = BNX2_COM_CPU_MODE;
2735 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2736 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2737 cpu_reg.state = BNX2_COM_CPU_STATE;
2738 cpu_reg.state_value_clear = 0xffffff;
2739 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2740 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2741 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2742 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2743 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2744 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2745 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002746
Michael Chand43584c2006-11-19 14:14:35 -08002747 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2748 fw = &bnx2_com_fw_09;
2749 else
2750 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002751
Michael Chanaf3ee512006-11-19 14:09:25 -08002752 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002753 if (rc)
2754 goto init_cpu_err;
2755
Michael Chand43584c2006-11-19 14:14:35 -08002756 /* Initialize the Command Processor. */
2757 cpu_reg.mode = BNX2_CP_CPU_MODE;
2758 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2759 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2760 cpu_reg.state = BNX2_CP_CPU_STATE;
2761 cpu_reg.state_value_clear = 0xffffff;
2762 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2763 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2764 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2765 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2766 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2767 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2768 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002769
Michael Chand43584c2006-11-19 14:14:35 -08002770 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2771 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002772
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002773 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002774 if (rc)
2775 goto init_cpu_err;
2776 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002777init_cpu_err:
2778 bnx2_gunzip_end(bp);
2779 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002780}
2781
2782static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002783bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002784{
2785 u16 pmcsr;
2786
2787 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2788
2789 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002790 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002791 u32 val;
2792
2793 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2794 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2795 PCI_PM_CTRL_PME_STATUS);
2796
2797 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2798 /* delay required during transition out of D3hot */
2799 msleep(20);
2800
2801 val = REG_RD(bp, BNX2_EMAC_MODE);
2802 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2803 val &= ~BNX2_EMAC_MODE_MPKT;
2804 REG_WR(bp, BNX2_EMAC_MODE, val);
2805
2806 val = REG_RD(bp, BNX2_RPM_CONFIG);
2807 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2808 REG_WR(bp, BNX2_RPM_CONFIG, val);
2809 break;
2810 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002811 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002812 int i;
2813 u32 val, wol_msg;
2814
2815 if (bp->wol) {
2816 u32 advertising;
2817 u8 autoneg;
2818
2819 autoneg = bp->autoneg;
2820 advertising = bp->advertising;
2821
2822 bp->autoneg = AUTONEG_SPEED;
2823 bp->advertising = ADVERTISED_10baseT_Half |
2824 ADVERTISED_10baseT_Full |
2825 ADVERTISED_100baseT_Half |
2826 ADVERTISED_100baseT_Full |
2827 ADVERTISED_Autoneg;
2828
2829 bnx2_setup_copper_phy(bp);
2830
2831 bp->autoneg = autoneg;
2832 bp->advertising = advertising;
2833
2834 bnx2_set_mac_addr(bp);
2835
2836 val = REG_RD(bp, BNX2_EMAC_MODE);
2837
2838 /* Enable port mode. */
2839 val &= ~BNX2_EMAC_MODE_PORT;
2840 val |= BNX2_EMAC_MODE_PORT_MII |
2841 BNX2_EMAC_MODE_MPKT_RCVD |
2842 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002843 BNX2_EMAC_MODE_MPKT;
2844
2845 REG_WR(bp, BNX2_EMAC_MODE, val);
2846
2847 /* receive all multicast */
2848 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2849 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2850 0xffffffff);
2851 }
2852 REG_WR(bp, BNX2_EMAC_RX_MODE,
2853 BNX2_EMAC_RX_MODE_SORT_MODE);
2854
2855 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2856 BNX2_RPM_SORT_USER0_MC_EN;
2857 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2858 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2859 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2860 BNX2_RPM_SORT_USER0_ENA);
2861
2862 /* Need to enable EMAC and RPM for WOL. */
2863 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2864 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2865 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2866 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2867
2868 val = REG_RD(bp, BNX2_RPM_CONFIG);
2869 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2870 REG_WR(bp, BNX2_RPM_CONFIG, val);
2871
2872 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2873 }
2874 else {
2875 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2876 }
2877
Michael Chandda1e392006-01-23 16:08:14 -08002878 if (!(bp->flags & NO_WOL_FLAG))
2879 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002880
2881 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2882 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2883 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2884
2885 if (bp->wol)
2886 pmcsr |= 3;
2887 }
2888 else {
2889 pmcsr |= 3;
2890 }
2891 if (bp->wol) {
2892 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2893 }
2894 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2895 pmcsr);
2896
2897 /* No more memory access after this point until
2898 * device is brought back to D0.
2899 */
2900 udelay(50);
2901 break;
2902 }
2903 default:
2904 return -EINVAL;
2905 }
2906 return 0;
2907}
2908
2909static int
2910bnx2_acquire_nvram_lock(struct bnx2 *bp)
2911{
2912 u32 val;
2913 int j;
2914
2915 /* Request access to the flash interface. */
2916 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2917 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2918 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2919 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2920 break;
2921
2922 udelay(5);
2923 }
2924
2925 if (j >= NVRAM_TIMEOUT_COUNT)
2926 return -EBUSY;
2927
2928 return 0;
2929}
2930
2931static int
2932bnx2_release_nvram_lock(struct bnx2 *bp)
2933{
2934 int j;
2935 u32 val;
2936
2937 /* Relinquish nvram interface. */
2938 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2939
2940 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2941 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2942 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2943 break;
2944
2945 udelay(5);
2946 }
2947
2948 if (j >= NVRAM_TIMEOUT_COUNT)
2949 return -EBUSY;
2950
2951 return 0;
2952}
2953
2954
2955static int
2956bnx2_enable_nvram_write(struct bnx2 *bp)
2957{
2958 u32 val;
2959
2960 val = REG_RD(bp, BNX2_MISC_CFG);
2961 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2962
2963 if (!bp->flash_info->buffered) {
2964 int j;
2965
2966 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2967 REG_WR(bp, BNX2_NVM_COMMAND,
2968 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2969
2970 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2971 udelay(5);
2972
2973 val = REG_RD(bp, BNX2_NVM_COMMAND);
2974 if (val & BNX2_NVM_COMMAND_DONE)
2975 break;
2976 }
2977
2978 if (j >= NVRAM_TIMEOUT_COUNT)
2979 return -EBUSY;
2980 }
2981 return 0;
2982}
2983
2984static void
2985bnx2_disable_nvram_write(struct bnx2 *bp)
2986{
2987 u32 val;
2988
2989 val = REG_RD(bp, BNX2_MISC_CFG);
2990 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2991}
2992
2993
2994static void
2995bnx2_enable_nvram_access(struct bnx2 *bp)
2996{
2997 u32 val;
2998
2999 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3000 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003001 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003002 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3003}
3004
3005static void
3006bnx2_disable_nvram_access(struct bnx2 *bp)
3007{
3008 u32 val;
3009
3010 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3011 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003012 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003013 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3014 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3015}
3016
3017static int
3018bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3019{
3020 u32 cmd;
3021 int j;
3022
3023 if (bp->flash_info->buffered)
3024 /* Buffered flash, no erase needed */
3025 return 0;
3026
3027 /* Build an erase command */
3028 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3029 BNX2_NVM_COMMAND_DOIT;
3030
3031 /* Need to clear DONE bit separately. */
3032 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3033
3034 /* Address of the NVRAM to read from. */
3035 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3036
3037 /* Issue an erase command. */
3038 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3039
3040 /* Wait for completion. */
3041 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3042 u32 val;
3043
3044 udelay(5);
3045
3046 val = REG_RD(bp, BNX2_NVM_COMMAND);
3047 if (val & BNX2_NVM_COMMAND_DONE)
3048 break;
3049 }
3050
3051 if (j >= NVRAM_TIMEOUT_COUNT)
3052 return -EBUSY;
3053
3054 return 0;
3055}
3056
3057static int
3058bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3059{
3060 u32 cmd;
3061 int j;
3062
3063 /* Build the command word. */
3064 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3065
3066 /* Calculate an offset of a buffered flash. */
3067 if (bp->flash_info->buffered) {
3068 offset = ((offset / bp->flash_info->page_size) <<
3069 bp->flash_info->page_bits) +
3070 (offset % bp->flash_info->page_size);
3071 }
3072
3073 /* Need to clear DONE bit separately. */
3074 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3075
3076 /* Address of the NVRAM to read from. */
3077 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3078
3079 /* Issue a read command. */
3080 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3081
3082 /* Wait for completion. */
3083 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3084 u32 val;
3085
3086 udelay(5);
3087
3088 val = REG_RD(bp, BNX2_NVM_COMMAND);
3089 if (val & BNX2_NVM_COMMAND_DONE) {
3090 val = REG_RD(bp, BNX2_NVM_READ);
3091
3092 val = be32_to_cpu(val);
3093 memcpy(ret_val, &val, 4);
3094 break;
3095 }
3096 }
3097 if (j >= NVRAM_TIMEOUT_COUNT)
3098 return -EBUSY;
3099
3100 return 0;
3101}
3102
3103
3104static int
3105bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3106{
3107 u32 cmd, val32;
3108 int j;
3109
3110 /* Build the command word. */
3111 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3112
3113 /* Calculate an offset of a buffered flash. */
3114 if (bp->flash_info->buffered) {
3115 offset = ((offset / bp->flash_info->page_size) <<
3116 bp->flash_info->page_bits) +
3117 (offset % bp->flash_info->page_size);
3118 }
3119
3120 /* Need to clear DONE bit separately. */
3121 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3122
3123 memcpy(&val32, val, 4);
3124 val32 = cpu_to_be32(val32);
3125
3126 /* Write the data. */
3127 REG_WR(bp, BNX2_NVM_WRITE, val32);
3128
3129 /* Address of the NVRAM to write to. */
3130 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3131
3132 /* Issue the write command. */
3133 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3134
3135 /* Wait for completion. */
3136 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3137 udelay(5);
3138
3139 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3140 break;
3141 }
3142 if (j >= NVRAM_TIMEOUT_COUNT)
3143 return -EBUSY;
3144
3145 return 0;
3146}
3147
3148static int
3149bnx2_init_nvram(struct bnx2 *bp)
3150{
3151 u32 val;
3152 int j, entry_count, rc;
3153 struct flash_spec *flash;
3154
3155 /* Determine the selected interface. */
3156 val = REG_RD(bp, BNX2_NVM_CFG1);
3157
3158 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3159
3160 rc = 0;
3161 if (val & 0x40000000) {
3162
3163 /* Flash interface has been reconfigured */
3164 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003165 j++, flash++) {
3166 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3167 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003168 bp->flash_info = flash;
3169 break;
3170 }
3171 }
3172 }
3173 else {
Michael Chan37137702005-11-04 08:49:17 -08003174 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003175 /* Not yet been reconfigured */
3176
Michael Chan37137702005-11-04 08:49:17 -08003177 if (val & (1 << 23))
3178 mask = FLASH_BACKUP_STRAP_MASK;
3179 else
3180 mask = FLASH_STRAP_MASK;
3181
Michael Chanb6016b72005-05-26 13:03:09 -07003182 for (j = 0, flash = &flash_table[0]; j < entry_count;
3183 j++, flash++) {
3184
Michael Chan37137702005-11-04 08:49:17 -08003185 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003186 bp->flash_info = flash;
3187
3188 /* Request access to the flash interface. */
3189 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3190 return rc;
3191
3192 /* Enable access to flash interface */
3193 bnx2_enable_nvram_access(bp);
3194
3195 /* Reconfigure the flash interface */
3196 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3197 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3198 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3199 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3200
3201 /* Disable access to flash interface */
3202 bnx2_disable_nvram_access(bp);
3203 bnx2_release_nvram_lock(bp);
3204
3205 break;
3206 }
3207 }
3208 } /* if (val & 0x40000000) */
3209
3210 if (j == entry_count) {
3211 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003212 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003213 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003214 }
3215
Michael Chan1122db72006-01-23 16:11:42 -08003216 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3217 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3218 if (val)
3219 bp->flash_size = val;
3220 else
3221 bp->flash_size = bp->flash_info->total_size;
3222
Michael Chanb6016b72005-05-26 13:03:09 -07003223 return rc;
3224}
3225
3226static int
3227bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3228 int buf_size)
3229{
3230 int rc = 0;
3231 u32 cmd_flags, offset32, len32, extra;
3232
3233 if (buf_size == 0)
3234 return 0;
3235
3236 /* Request access to the flash interface. */
3237 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3238 return rc;
3239
3240 /* Enable access to flash interface */
3241 bnx2_enable_nvram_access(bp);
3242
3243 len32 = buf_size;
3244 offset32 = offset;
3245 extra = 0;
3246
3247 cmd_flags = 0;
3248
3249 if (offset32 & 3) {
3250 u8 buf[4];
3251 u32 pre_len;
3252
3253 offset32 &= ~3;
3254 pre_len = 4 - (offset & 3);
3255
3256 if (pre_len >= len32) {
3257 pre_len = len32;
3258 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3259 BNX2_NVM_COMMAND_LAST;
3260 }
3261 else {
3262 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3263 }
3264
3265 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3266
3267 if (rc)
3268 return rc;
3269
3270 memcpy(ret_buf, buf + (offset & 3), pre_len);
3271
3272 offset32 += 4;
3273 ret_buf += pre_len;
3274 len32 -= pre_len;
3275 }
3276 if (len32 & 3) {
3277 extra = 4 - (len32 & 3);
3278 len32 = (len32 + 4) & ~3;
3279 }
3280
3281 if (len32 == 4) {
3282 u8 buf[4];
3283
3284 if (cmd_flags)
3285 cmd_flags = BNX2_NVM_COMMAND_LAST;
3286 else
3287 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3288 BNX2_NVM_COMMAND_LAST;
3289
3290 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3291
3292 memcpy(ret_buf, buf, 4 - extra);
3293 }
3294 else if (len32 > 0) {
3295 u8 buf[4];
3296
3297 /* Read the first word. */
3298 if (cmd_flags)
3299 cmd_flags = 0;
3300 else
3301 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3302
3303 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3304
3305 /* Advance to the next dword. */
3306 offset32 += 4;
3307 ret_buf += 4;
3308 len32 -= 4;
3309
3310 while (len32 > 4 && rc == 0) {
3311 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3312
3313 /* Advance to the next dword. */
3314 offset32 += 4;
3315 ret_buf += 4;
3316 len32 -= 4;
3317 }
3318
3319 if (rc)
3320 return rc;
3321
3322 cmd_flags = BNX2_NVM_COMMAND_LAST;
3323 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3324
3325 memcpy(ret_buf, buf, 4 - extra);
3326 }
3327
3328 /* Disable access to flash interface */
3329 bnx2_disable_nvram_access(bp);
3330
3331 bnx2_release_nvram_lock(bp);
3332
3333 return rc;
3334}
3335
3336static int
3337bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3338 int buf_size)
3339{
3340 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003341 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003342 int rc = 0;
3343 int align_start, align_end;
3344
3345 buf = data_buf;
3346 offset32 = offset;
3347 len32 = buf_size;
3348 align_start = align_end = 0;
3349
3350 if ((align_start = (offset32 & 3))) {
3351 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003352 len32 += align_start;
3353 if (len32 < 4)
3354 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003355 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3356 return rc;
3357 }
3358
3359 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003360 align_end = 4 - (len32 & 3);
3361 len32 += align_end;
3362 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3363 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003364 }
3365
3366 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003367 align_buf = kmalloc(len32, GFP_KERNEL);
3368 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003369 return -ENOMEM;
3370 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003371 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003372 }
3373 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003374 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003375 }
Michael Chane6be7632007-01-08 19:56:13 -08003376 memcpy(align_buf + align_start, data_buf, buf_size);
3377 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003378 }
3379
Michael Chanae181bc2006-05-22 16:39:20 -07003380 if (bp->flash_info->buffered == 0) {
3381 flash_buffer = kmalloc(264, GFP_KERNEL);
3382 if (flash_buffer == NULL) {
3383 rc = -ENOMEM;
3384 goto nvram_write_end;
3385 }
3386 }
3387
Michael Chanb6016b72005-05-26 13:03:09 -07003388 written = 0;
3389 while ((written < len32) && (rc == 0)) {
3390 u32 page_start, page_end, data_start, data_end;
3391 u32 addr, cmd_flags;
3392 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003393
3394 /* Find the page_start addr */
3395 page_start = offset32 + written;
3396 page_start -= (page_start % bp->flash_info->page_size);
3397 /* Find the page_end addr */
3398 page_end = page_start + bp->flash_info->page_size;
3399 /* Find the data_start addr */
3400 data_start = (written == 0) ? offset32 : page_start;
3401 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003402 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003403 (offset32 + len32) : page_end;
3404
3405 /* Request access to the flash interface. */
3406 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3407 goto nvram_write_end;
3408
3409 /* Enable access to flash interface */
3410 bnx2_enable_nvram_access(bp);
3411
3412 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3413 if (bp->flash_info->buffered == 0) {
3414 int j;
3415
3416 /* Read the whole page into the buffer
3417 * (non-buffer flash only) */
3418 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3419 if (j == (bp->flash_info->page_size - 4)) {
3420 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3421 }
3422 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003423 page_start + j,
3424 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003425 cmd_flags);
3426
3427 if (rc)
3428 goto nvram_write_end;
3429
3430 cmd_flags = 0;
3431 }
3432 }
3433
3434 /* Enable writes to flash interface (unlock write-protect) */
3435 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3436 goto nvram_write_end;
3437
Michael Chanb6016b72005-05-26 13:03:09 -07003438 /* Loop to write back the buffer data from page_start to
3439 * data_start */
3440 i = 0;
3441 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003442 /* Erase the page */
3443 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3444 goto nvram_write_end;
3445
3446 /* Re-enable the write again for the actual write */
3447 bnx2_enable_nvram_write(bp);
3448
Michael Chanb6016b72005-05-26 13:03:09 -07003449 for (addr = page_start; addr < data_start;
3450 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003451
Michael Chanb6016b72005-05-26 13:03:09 -07003452 rc = bnx2_nvram_write_dword(bp, addr,
3453 &flash_buffer[i], cmd_flags);
3454
3455 if (rc != 0)
3456 goto nvram_write_end;
3457
3458 cmd_flags = 0;
3459 }
3460 }
3461
3462 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003463 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003464 if ((addr == page_end - 4) ||
3465 ((bp->flash_info->buffered) &&
3466 (addr == data_end - 4))) {
3467
3468 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3469 }
3470 rc = bnx2_nvram_write_dword(bp, addr, buf,
3471 cmd_flags);
3472
3473 if (rc != 0)
3474 goto nvram_write_end;
3475
3476 cmd_flags = 0;
3477 buf += 4;
3478 }
3479
3480 /* Loop to write back the buffer data from data_end
3481 * to page_end */
3482 if (bp->flash_info->buffered == 0) {
3483 for (addr = data_end; addr < page_end;
3484 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003485
Michael Chanb6016b72005-05-26 13:03:09 -07003486 if (addr == page_end-4) {
3487 cmd_flags = BNX2_NVM_COMMAND_LAST;
3488 }
3489 rc = bnx2_nvram_write_dword(bp, addr,
3490 &flash_buffer[i], cmd_flags);
3491
3492 if (rc != 0)
3493 goto nvram_write_end;
3494
3495 cmd_flags = 0;
3496 }
3497 }
3498
3499 /* Disable writes to flash interface (lock write-protect) */
3500 bnx2_disable_nvram_write(bp);
3501
3502 /* Disable access to flash interface */
3503 bnx2_disable_nvram_access(bp);
3504 bnx2_release_nvram_lock(bp);
3505
3506 /* Increment written */
3507 written += data_end - data_start;
3508 }
3509
3510nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003511 kfree(flash_buffer);
3512 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003513 return rc;
3514}
3515
3516static int
3517bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3518{
3519 u32 val;
3520 int i, rc = 0;
3521
3522 /* Wait for the current PCI transaction to complete before
3523 * issuing a reset. */
3524 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3525 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3526 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3527 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3528 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3529 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3530 udelay(5);
3531
Michael Chanb090ae22006-01-23 16:07:10 -08003532 /* Wait for the firmware to tell us it is ok to issue a reset. */
3533 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3534
Michael Chanb6016b72005-05-26 13:03:09 -07003535 /* Deposit a driver reset signature so the firmware knows that
3536 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003537 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003538 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3539
Michael Chanb6016b72005-05-26 13:03:09 -07003540 /* Do a dummy read to force the chip to complete all current transaction
3541 * before we issue a reset. */
3542 val = REG_RD(bp, BNX2_MISC_ID);
3543
Michael Chan234754d2006-11-19 14:11:41 -08003544 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3545 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3546 REG_RD(bp, BNX2_MISC_COMMAND);
3547 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003548
Michael Chan234754d2006-11-19 14:11:41 -08003549 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3550 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003551
Michael Chan234754d2006-11-19 14:11:41 -08003552 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003553
Michael Chan234754d2006-11-19 14:11:41 -08003554 } else {
3555 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3556 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3557 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3558
3559 /* Chip reset. */
3560 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3561
3562 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3563 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3564 current->state = TASK_UNINTERRUPTIBLE;
3565 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003566 }
Michael Chanb6016b72005-05-26 13:03:09 -07003567
Michael Chan234754d2006-11-19 14:11:41 -08003568 /* Reset takes approximate 30 usec */
3569 for (i = 0; i < 10; i++) {
3570 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3571 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3572 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3573 break;
3574 udelay(10);
3575 }
3576
3577 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3578 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3579 printk(KERN_ERR PFX "Chip reset did not complete\n");
3580 return -EBUSY;
3581 }
Michael Chanb6016b72005-05-26 13:03:09 -07003582 }
3583
3584 /* Make sure byte swapping is properly configured. */
3585 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3586 if (val != 0x01020304) {
3587 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3588 return -ENODEV;
3589 }
3590
Michael Chanb6016b72005-05-26 13:03:09 -07003591 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003592 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3593 if (rc)
3594 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003595
3596 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3597 /* Adjust the voltage regular to two steps lower. The default
3598 * of this register is 0x0000000e. */
3599 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3600
3601 /* Remove bad rbuf memory from the free pool. */
3602 rc = bnx2_alloc_bad_rbuf(bp);
3603 }
3604
3605 return rc;
3606}
3607
3608static int
3609bnx2_init_chip(struct bnx2 *bp)
3610{
3611 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003612 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003613
3614 /* Make sure the interrupt is not active. */
3615 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3616
3617 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3618 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3619#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003620 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003621#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003622 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003623 DMA_READ_CHANS << 12 |
3624 DMA_WRITE_CHANS << 16;
3625
3626 val |= (0x2 << 20) | (1 << 11);
3627
Michael Chandda1e392006-01-23 16:08:14 -08003628 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003629 val |= (1 << 23);
3630
3631 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3632 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3633 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3634
3635 REG_WR(bp, BNX2_DMA_CONFIG, val);
3636
3637 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3638 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3639 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3640 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3641 }
3642
3643 if (bp->flags & PCIX_FLAG) {
3644 u16 val16;
3645
3646 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3647 &val16);
3648 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3649 val16 & ~PCI_X_CMD_ERO);
3650 }
3651
3652 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3653 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3654 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3655 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3656
3657 /* Initialize context mapping and zero out the quick contexts. The
3658 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003659 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3660 bnx2_init_5709_context(bp);
3661 else
3662 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003663
Michael Chanfba9fe92006-06-12 22:21:25 -07003664 if ((rc = bnx2_init_cpus(bp)) != 0)
3665 return rc;
3666
Michael Chanb6016b72005-05-26 13:03:09 -07003667 bnx2_init_nvram(bp);
3668
3669 bnx2_set_mac_addr(bp);
3670
3671 val = REG_RD(bp, BNX2_MQ_CONFIG);
3672 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3673 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003674 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3675 val |= BNX2_MQ_CONFIG_HALT_DIS;
3676
Michael Chanb6016b72005-05-26 13:03:09 -07003677 REG_WR(bp, BNX2_MQ_CONFIG, val);
3678
3679 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3680 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3681 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3682
3683 val = (BCM_PAGE_BITS - 8) << 24;
3684 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3685
3686 /* Configure page size. */
3687 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3688 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3689 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3690 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3691
3692 val = bp->mac_addr[0] +
3693 (bp->mac_addr[1] << 8) +
3694 (bp->mac_addr[2] << 16) +
3695 bp->mac_addr[3] +
3696 (bp->mac_addr[4] << 8) +
3697 (bp->mac_addr[5] << 16);
3698 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3699
3700 /* Program the MTU. Also include 4 bytes for CRC32. */
3701 val = bp->dev->mtu + ETH_HLEN + 4;
3702 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3703 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3704 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3705
3706 bp->last_status_idx = 0;
3707 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3708
3709 /* Set up how to generate a link change interrupt. */
3710 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3711
3712 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3713 (u64) bp->status_blk_mapping & 0xffffffff);
3714 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3715
3716 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3717 (u64) bp->stats_blk_mapping & 0xffffffff);
3718 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3719 (u64) bp->stats_blk_mapping >> 32);
3720
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003721 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003722 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3723
3724 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3725 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3726
3727 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3728 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3729
3730 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3731
3732 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3733
3734 REG_WR(bp, BNX2_HC_COM_TICKS,
3735 (bp->com_ticks_int << 16) | bp->com_ticks);
3736
3737 REG_WR(bp, BNX2_HC_CMD_TICKS,
3738 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3739
3740 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3741 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3742
3743 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3744 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3745 else {
3746 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3747 BNX2_HC_CONFIG_TX_TMR_MODE |
3748 BNX2_HC_CONFIG_COLLECT_STATS);
3749 }
3750
3751 /* Clear internal stats counters. */
3752 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3753
3754 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3755
Michael Chane29054f2006-01-23 16:06:06 -08003756 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3757 BNX2_PORT_FEATURE_ASF_ENABLED)
3758 bp->flags |= ASF_ENABLE_FLAG;
3759
Michael Chanb6016b72005-05-26 13:03:09 -07003760 /* Initialize the receive filter. */
3761 bnx2_set_rx_mode(bp->dev);
3762
Michael Chanb090ae22006-01-23 16:07:10 -08003763 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3764 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003765
3766 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3767 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3768
3769 udelay(20);
3770
Michael Chanbf5295b2006-03-23 01:11:56 -08003771 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3772
Michael Chanb090ae22006-01-23 16:07:10 -08003773 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003774}
3775
Michael Chan59b47d82006-11-19 14:10:45 -08003776static void
3777bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3778{
3779 u32 val, offset0, offset1, offset2, offset3;
3780
3781 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3782 offset0 = BNX2_L2CTX_TYPE_XI;
3783 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3784 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3785 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3786 } else {
3787 offset0 = BNX2_L2CTX_TYPE;
3788 offset1 = BNX2_L2CTX_CMD_TYPE;
3789 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3790 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3791 }
3792 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3793 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3794
3795 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3796 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3797
3798 val = (u64) bp->tx_desc_mapping >> 32;
3799 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3800
3801 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3802 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3803}
Michael Chanb6016b72005-05-26 13:03:09 -07003804
3805static void
3806bnx2_init_tx_ring(struct bnx2 *bp)
3807{
3808 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003809 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003810
Michael Chan2f8af122006-08-15 01:39:10 -07003811 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3812
Michael Chanb6016b72005-05-26 13:03:09 -07003813 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003814
Michael Chanb6016b72005-05-26 13:03:09 -07003815 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3816 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3817
3818 bp->tx_prod = 0;
3819 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003820 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003821 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003822
Michael Chan59b47d82006-11-19 14:10:45 -08003823 cid = TX_CID;
3824 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3825 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003826
Michael Chan59b47d82006-11-19 14:10:45 -08003827 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003828}
3829
3830static void
3831bnx2_init_rx_ring(struct bnx2 *bp)
3832{
3833 struct rx_bd *rxbd;
3834 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003835 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003836 u32 val;
3837
3838 /* 8 for CRC and VLAN */
3839 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003840 /* hw alignment */
3841 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003842
3843 ring_prod = prod = bp->rx_prod = 0;
3844 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003845 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003846 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003847
Michael Chan13daffa2006-03-20 17:49:20 -08003848 for (i = 0; i < bp->rx_max_ring; i++) {
3849 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003850
Michael Chan13daffa2006-03-20 17:49:20 -08003851 rxbd = &bp->rx_desc_ring[i][0];
3852 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3853 rxbd->rx_bd_len = bp->rx_buf_use_size;
3854 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3855 }
3856 if (i == (bp->rx_max_ring - 1))
3857 j = 0;
3858 else
3859 j = i + 1;
3860 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3861 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3862 0xffffffff;
3863 }
Michael Chanb6016b72005-05-26 13:03:09 -07003864
3865 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3866 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3867 val |= 0x02 << 8;
3868 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3869
Michael Chan13daffa2006-03-20 17:49:20 -08003870 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003871 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3872
Michael Chan13daffa2006-03-20 17:49:20 -08003873 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003874 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3875
Michael Chan236b6392006-03-20 17:49:02 -08003876 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003877 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3878 break;
3879 }
3880 prod = NEXT_RX_BD(prod);
3881 ring_prod = RX_RING_IDX(prod);
3882 }
3883 bp->rx_prod = prod;
3884
3885 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3886
3887 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3888}
3889
3890static void
Michael Chan13daffa2006-03-20 17:49:20 -08003891bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3892{
3893 u32 num_rings, max;
3894
3895 bp->rx_ring_size = size;
3896 num_rings = 1;
3897 while (size > MAX_RX_DESC_CNT) {
3898 size -= MAX_RX_DESC_CNT;
3899 num_rings++;
3900 }
3901 /* round to next power of 2 */
3902 max = MAX_RX_RINGS;
3903 while ((max & num_rings) == 0)
3904 max >>= 1;
3905
3906 if (num_rings != max)
3907 max <<= 1;
3908
3909 bp->rx_max_ring = max;
3910 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3911}
3912
3913static void
Michael Chanb6016b72005-05-26 13:03:09 -07003914bnx2_free_tx_skbs(struct bnx2 *bp)
3915{
3916 int i;
3917
3918 if (bp->tx_buf_ring == NULL)
3919 return;
3920
3921 for (i = 0; i < TX_DESC_CNT; ) {
3922 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3923 struct sk_buff *skb = tx_buf->skb;
3924 int j, last;
3925
3926 if (skb == NULL) {
3927 i++;
3928 continue;
3929 }
3930
3931 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3932 skb_headlen(skb), PCI_DMA_TODEVICE);
3933
3934 tx_buf->skb = NULL;
3935
3936 last = skb_shinfo(skb)->nr_frags;
3937 for (j = 0; j < last; j++) {
3938 tx_buf = &bp->tx_buf_ring[i + j + 1];
3939 pci_unmap_page(bp->pdev,
3940 pci_unmap_addr(tx_buf, mapping),
3941 skb_shinfo(skb)->frags[j].size,
3942 PCI_DMA_TODEVICE);
3943 }
Michael Chan745720e2006-06-29 12:37:41 -07003944 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003945 i += j + 1;
3946 }
3947
3948}
3949
3950static void
3951bnx2_free_rx_skbs(struct bnx2 *bp)
3952{
3953 int i;
3954
3955 if (bp->rx_buf_ring == NULL)
3956 return;
3957
Michael Chan13daffa2006-03-20 17:49:20 -08003958 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003959 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3960 struct sk_buff *skb = rx_buf->skb;
3961
Michael Chan05d0f1c2005-11-04 08:53:48 -08003962 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003963 continue;
3964
3965 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3966 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3967
3968 rx_buf->skb = NULL;
3969
Michael Chan745720e2006-06-29 12:37:41 -07003970 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003971 }
3972}
3973
3974static void
3975bnx2_free_skbs(struct bnx2 *bp)
3976{
3977 bnx2_free_tx_skbs(bp);
3978 bnx2_free_rx_skbs(bp);
3979}
3980
3981static int
3982bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3983{
3984 int rc;
3985
3986 rc = bnx2_reset_chip(bp, reset_code);
3987 bnx2_free_skbs(bp);
3988 if (rc)
3989 return rc;
3990
Michael Chanfba9fe92006-06-12 22:21:25 -07003991 if ((rc = bnx2_init_chip(bp)) != 0)
3992 return rc;
3993
Michael Chanb6016b72005-05-26 13:03:09 -07003994 bnx2_init_tx_ring(bp);
3995 bnx2_init_rx_ring(bp);
3996 return 0;
3997}
3998
3999static int
4000bnx2_init_nic(struct bnx2 *bp)
4001{
4002 int rc;
4003
4004 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4005 return rc;
4006
Michael Chan80be4432006-11-19 14:07:28 -08004007 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004008 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08004009 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004010 bnx2_set_link(bp);
4011 return 0;
4012}
4013
4014static int
4015bnx2_test_registers(struct bnx2 *bp)
4016{
4017 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004018 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004019 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004020 u16 offset;
4021 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004022#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004023 u32 rw_mask;
4024 u32 ro_mask;
4025 } reg_tbl[] = {
4026 { 0x006c, 0, 0x00000000, 0x0000003f },
4027 { 0x0090, 0, 0xffffffff, 0x00000000 },
4028 { 0x0094, 0, 0x00000000, 0x00000000 },
4029
Michael Chan5bae30c2007-05-03 13:18:46 -07004030 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4031 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4032 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4033 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4034 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4035 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4036 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4037 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4038 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004039
Michael Chan5bae30c2007-05-03 13:18:46 -07004040 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4041 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4042 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4043 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4044 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4045 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004046
Michael Chan5bae30c2007-05-03 13:18:46 -07004047 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4048 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4049 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004050
4051 { 0x1000, 0, 0x00000000, 0x00000001 },
4052 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004053
4054 { 0x1408, 0, 0x01c00800, 0x00000000 },
4055 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4056 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004057 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004058 { 0x14b0, 0, 0x00000002, 0x00000001 },
4059 { 0x14b8, 0, 0x00000000, 0x00000000 },
4060 { 0x14c0, 0, 0x00000000, 0x00000009 },
4061 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4062 { 0x14cc, 0, 0x00000000, 0x00000001 },
4063 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004064
4065 { 0x1800, 0, 0x00000000, 0x00000001 },
4066 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004067
4068 { 0x2800, 0, 0x00000000, 0x00000001 },
4069 { 0x2804, 0, 0x00000000, 0x00003f01 },
4070 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4071 { 0x2810, 0, 0xffff0000, 0x00000000 },
4072 { 0x2814, 0, 0xffff0000, 0x00000000 },
4073 { 0x2818, 0, 0xffff0000, 0x00000000 },
4074 { 0x281c, 0, 0xffff0000, 0x00000000 },
4075 { 0x2834, 0, 0xffffffff, 0x00000000 },
4076 { 0x2840, 0, 0x00000000, 0xffffffff },
4077 { 0x2844, 0, 0x00000000, 0xffffffff },
4078 { 0x2848, 0, 0xffffffff, 0x00000000 },
4079 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4080
4081 { 0x2c00, 0, 0x00000000, 0x00000011 },
4082 { 0x2c04, 0, 0x00000000, 0x00030007 },
4083
Michael Chanb6016b72005-05-26 13:03:09 -07004084 { 0x3c00, 0, 0x00000000, 0x00000001 },
4085 { 0x3c04, 0, 0x00000000, 0x00070000 },
4086 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4087 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4088 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4089 { 0x3c14, 0, 0x00000000, 0xffffffff },
4090 { 0x3c18, 0, 0x00000000, 0xffffffff },
4091 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4092 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004093
4094 { 0x5004, 0, 0x00000000, 0x0000007f },
4095 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004096
Michael Chanb6016b72005-05-26 13:03:09 -07004097 { 0x5c00, 0, 0x00000000, 0x00000001 },
4098 { 0x5c04, 0, 0x00000000, 0x0003000f },
4099 { 0x5c08, 0, 0x00000003, 0x00000000 },
4100 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4101 { 0x5c10, 0, 0x00000000, 0xffffffff },
4102 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4103 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4104 { 0x5c88, 0, 0x00000000, 0x00077373 },
4105 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4106
4107 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4108 { 0x680c, 0, 0xffffffff, 0x00000000 },
4109 { 0x6810, 0, 0xffffffff, 0x00000000 },
4110 { 0x6814, 0, 0xffffffff, 0x00000000 },
4111 { 0x6818, 0, 0xffffffff, 0x00000000 },
4112 { 0x681c, 0, 0xffffffff, 0x00000000 },
4113 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4114 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4115 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4116 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4117 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4118 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4119 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4120 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4121 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4122 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4123 { 0x684c, 0, 0xffffffff, 0x00000000 },
4124 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4125 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4126 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4127 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4128 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4129 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4130
4131 { 0xffff, 0, 0x00000000, 0x00000000 },
4132 };
4133
4134 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004135 is_5709 = 0;
4136 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4137 is_5709 = 1;
4138
Michael Chanb6016b72005-05-26 13:03:09 -07004139 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4140 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004141 u16 flags = reg_tbl[i].flags;
4142
4143 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4144 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004145
4146 offset = (u32) reg_tbl[i].offset;
4147 rw_mask = reg_tbl[i].rw_mask;
4148 ro_mask = reg_tbl[i].ro_mask;
4149
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004150 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004151
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004152 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004153
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004154 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004155 if ((val & rw_mask) != 0) {
4156 goto reg_test_err;
4157 }
4158
4159 if ((val & ro_mask) != (save_val & ro_mask)) {
4160 goto reg_test_err;
4161 }
4162
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004163 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004164
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004165 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004166 if ((val & rw_mask) != rw_mask) {
4167 goto reg_test_err;
4168 }
4169
4170 if ((val & ro_mask) != (save_val & ro_mask)) {
4171 goto reg_test_err;
4172 }
4173
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004174 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004175 continue;
4176
4177reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004178 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004179 ret = -ENODEV;
4180 break;
4181 }
4182 return ret;
4183}
4184
4185static int
4186bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4187{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004188 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004189 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4190 int i;
4191
4192 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4193 u32 offset;
4194
4195 for (offset = 0; offset < size; offset += 4) {
4196
4197 REG_WR_IND(bp, start + offset, test_pattern[i]);
4198
4199 if (REG_RD_IND(bp, start + offset) !=
4200 test_pattern[i]) {
4201 return -ENODEV;
4202 }
4203 }
4204 }
4205 return 0;
4206}
4207
4208static int
4209bnx2_test_memory(struct bnx2 *bp)
4210{
4211 int ret = 0;
4212 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004213 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004214 u32 offset;
4215 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004216 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004217 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004218 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004219 { 0xe0000, 0x4000 },
4220 { 0x120000, 0x4000 },
4221 { 0x1a0000, 0x4000 },
4222 { 0x160000, 0x4000 },
4223 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004224 },
4225 mem_tbl_5709[] = {
4226 { 0x60000, 0x4000 },
4227 { 0xa0000, 0x3000 },
4228 { 0xe0000, 0x4000 },
4229 { 0x120000, 0x4000 },
4230 { 0x1a0000, 0x4000 },
4231 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004232 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004233 struct mem_entry *mem_tbl;
4234
4235 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4236 mem_tbl = mem_tbl_5709;
4237 else
4238 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004239
4240 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4241 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4242 mem_tbl[i].len)) != 0) {
4243 return ret;
4244 }
4245 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004246
Michael Chanb6016b72005-05-26 13:03:09 -07004247 return ret;
4248}
4249
Michael Chanbc5a0692006-01-23 16:13:22 -08004250#define BNX2_MAC_LOOPBACK 0
4251#define BNX2_PHY_LOOPBACK 1
4252
Michael Chanb6016b72005-05-26 13:03:09 -07004253static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004254bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004255{
4256 unsigned int pkt_size, num_pkts, i;
4257 struct sk_buff *skb, *rx_skb;
4258 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004259 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004260 dma_addr_t map;
4261 struct tx_bd *txbd;
4262 struct sw_bd *rx_buf;
4263 struct l2_fhdr *rx_hdr;
4264 int ret = -ENODEV;
4265
Michael Chanbc5a0692006-01-23 16:13:22 -08004266 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4267 bp->loopback = MAC_LOOPBACK;
4268 bnx2_set_mac_loopback(bp);
4269 }
4270 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004271 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004272 bnx2_set_phy_loopback(bp);
4273 }
4274 else
4275 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004276
4277 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004278 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004279 if (!skb)
4280 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004281 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004282 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004283 memset(packet + 6, 0x0, 8);
4284 for (i = 14; i < pkt_size; i++)
4285 packet[i] = (unsigned char) (i & 0xff);
4286
4287 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4288 PCI_DMA_TODEVICE);
4289
Michael Chanbf5295b2006-03-23 01:11:56 -08004290 REG_WR(bp, BNX2_HC_COMMAND,
4291 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4292
Michael Chanb6016b72005-05-26 13:03:09 -07004293 REG_RD(bp, BNX2_HC_COMMAND);
4294
4295 udelay(5);
4296 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4297
Michael Chanb6016b72005-05-26 13:03:09 -07004298 num_pkts = 0;
4299
Michael Chanbc5a0692006-01-23 16:13:22 -08004300 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004301
4302 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4303 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4304 txbd->tx_bd_mss_nbytes = pkt_size;
4305 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4306
4307 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004308 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4309 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004310
Michael Chan234754d2006-11-19 14:11:41 -08004311 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4312 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004313
4314 udelay(100);
4315
Michael Chanbf5295b2006-03-23 01:11:56 -08004316 REG_WR(bp, BNX2_HC_COMMAND,
4317 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4318
Michael Chanb6016b72005-05-26 13:03:09 -07004319 REG_RD(bp, BNX2_HC_COMMAND);
4320
4321 udelay(5);
4322
4323 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004324 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004325
Michael Chanbc5a0692006-01-23 16:13:22 -08004326 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004327 goto loopback_test_done;
4328 }
4329
4330 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4331 if (rx_idx != rx_start_idx + num_pkts) {
4332 goto loopback_test_done;
4333 }
4334
4335 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4336 rx_skb = rx_buf->skb;
4337
4338 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4339 skb_reserve(rx_skb, bp->rx_offset);
4340
4341 pci_dma_sync_single_for_cpu(bp->pdev,
4342 pci_unmap_addr(rx_buf, mapping),
4343 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4344
Michael Chanade2bfe2006-01-23 16:09:51 -08004345 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004346 (L2_FHDR_ERRORS_BAD_CRC |
4347 L2_FHDR_ERRORS_PHY_DECODE |
4348 L2_FHDR_ERRORS_ALIGNMENT |
4349 L2_FHDR_ERRORS_TOO_SHORT |
4350 L2_FHDR_ERRORS_GIANT_FRAME)) {
4351
4352 goto loopback_test_done;
4353 }
4354
4355 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4356 goto loopback_test_done;
4357 }
4358
4359 for (i = 14; i < pkt_size; i++) {
4360 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4361 goto loopback_test_done;
4362 }
4363 }
4364
4365 ret = 0;
4366
4367loopback_test_done:
4368 bp->loopback = 0;
4369 return ret;
4370}
4371
Michael Chanbc5a0692006-01-23 16:13:22 -08004372#define BNX2_MAC_LOOPBACK_FAILED 1
4373#define BNX2_PHY_LOOPBACK_FAILED 2
4374#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4375 BNX2_PHY_LOOPBACK_FAILED)
4376
4377static int
4378bnx2_test_loopback(struct bnx2 *bp)
4379{
4380 int rc = 0;
4381
4382 if (!netif_running(bp->dev))
4383 return BNX2_LOOPBACK_FAILED;
4384
4385 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4386 spin_lock_bh(&bp->phy_lock);
4387 bnx2_init_phy(bp);
4388 spin_unlock_bh(&bp->phy_lock);
4389 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4390 rc |= BNX2_MAC_LOOPBACK_FAILED;
4391 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4392 rc |= BNX2_PHY_LOOPBACK_FAILED;
4393 return rc;
4394}
4395
Michael Chanb6016b72005-05-26 13:03:09 -07004396#define NVRAM_SIZE 0x200
4397#define CRC32_RESIDUAL 0xdebb20e3
4398
4399static int
4400bnx2_test_nvram(struct bnx2 *bp)
4401{
4402 u32 buf[NVRAM_SIZE / 4];
4403 u8 *data = (u8 *) buf;
4404 int rc = 0;
4405 u32 magic, csum;
4406
4407 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4408 goto test_nvram_done;
4409
4410 magic = be32_to_cpu(buf[0]);
4411 if (magic != 0x669955aa) {
4412 rc = -ENODEV;
4413 goto test_nvram_done;
4414 }
4415
4416 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4417 goto test_nvram_done;
4418
4419 csum = ether_crc_le(0x100, data);
4420 if (csum != CRC32_RESIDUAL) {
4421 rc = -ENODEV;
4422 goto test_nvram_done;
4423 }
4424
4425 csum = ether_crc_le(0x100, data + 0x100);
4426 if (csum != CRC32_RESIDUAL) {
4427 rc = -ENODEV;
4428 }
4429
4430test_nvram_done:
4431 return rc;
4432}
4433
4434static int
4435bnx2_test_link(struct bnx2 *bp)
4436{
4437 u32 bmsr;
4438
Michael Chanc770a652005-08-25 15:38:39 -07004439 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004440 bnx2_enable_bmsr1(bp);
4441 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4442 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4443 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004444 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004445
Michael Chanb6016b72005-05-26 13:03:09 -07004446 if (bmsr & BMSR_LSTATUS) {
4447 return 0;
4448 }
4449 return -ENODEV;
4450}
4451
4452static int
4453bnx2_test_intr(struct bnx2 *bp)
4454{
4455 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004456 u16 status_idx;
4457
4458 if (!netif_running(bp->dev))
4459 return -ENODEV;
4460
4461 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4462
4463 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004464 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004465 REG_RD(bp, BNX2_HC_COMMAND);
4466
4467 for (i = 0; i < 10; i++) {
4468 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4469 status_idx) {
4470
4471 break;
4472 }
4473
4474 msleep_interruptible(10);
4475 }
4476 if (i < 10)
4477 return 0;
4478
4479 return -ENODEV;
4480}
4481
4482static void
Michael Chan48b01e22006-11-19 14:08:00 -08004483bnx2_5706_serdes_timer(struct bnx2 *bp)
4484{
4485 spin_lock(&bp->phy_lock);
4486 if (bp->serdes_an_pending)
4487 bp->serdes_an_pending--;
4488 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4489 u32 bmcr;
4490
4491 bp->current_interval = bp->timer_interval;
4492
Michael Chanca58c3a2007-05-03 13:22:52 -07004493 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004494
4495 if (bmcr & BMCR_ANENABLE) {
4496 u32 phy1, phy2;
4497
4498 bnx2_write_phy(bp, 0x1c, 0x7c00);
4499 bnx2_read_phy(bp, 0x1c, &phy1);
4500
4501 bnx2_write_phy(bp, 0x17, 0x0f01);
4502 bnx2_read_phy(bp, 0x15, &phy2);
4503 bnx2_write_phy(bp, 0x17, 0x0f01);
4504 bnx2_read_phy(bp, 0x15, &phy2);
4505
4506 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4507 !(phy2 & 0x20)) { /* no CONFIG */
4508
4509 bmcr &= ~BMCR_ANENABLE;
4510 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004511 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004512 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4513 }
4514 }
4515 }
4516 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4517 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4518 u32 phy2;
4519
4520 bnx2_write_phy(bp, 0x17, 0x0f01);
4521 bnx2_read_phy(bp, 0x15, &phy2);
4522 if (phy2 & 0x20) {
4523 u32 bmcr;
4524
Michael Chanca58c3a2007-05-03 13:22:52 -07004525 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004526 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004527 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004528
4529 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4530 }
4531 } else
4532 bp->current_interval = bp->timer_interval;
4533
4534 spin_unlock(&bp->phy_lock);
4535}
4536
4537static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004538bnx2_5708_serdes_timer(struct bnx2 *bp)
4539{
4540 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4541 bp->serdes_an_pending = 0;
4542 return;
4543 }
4544
4545 spin_lock(&bp->phy_lock);
4546 if (bp->serdes_an_pending)
4547 bp->serdes_an_pending--;
4548 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4549 u32 bmcr;
4550
Michael Chanca58c3a2007-05-03 13:22:52 -07004551 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004552 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004553 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004554 bp->current_interval = SERDES_FORCED_TIMEOUT;
4555 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004556 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004557 bp->serdes_an_pending = 2;
4558 bp->current_interval = bp->timer_interval;
4559 }
4560
4561 } else
4562 bp->current_interval = bp->timer_interval;
4563
4564 spin_unlock(&bp->phy_lock);
4565}
4566
4567static void
Michael Chanb6016b72005-05-26 13:03:09 -07004568bnx2_timer(unsigned long data)
4569{
4570 struct bnx2 *bp = (struct bnx2 *) data;
4571 u32 msg;
4572
Michael Chancd339a02005-08-25 15:35:24 -07004573 if (!netif_running(bp->dev))
4574 return;
4575
Michael Chanb6016b72005-05-26 13:03:09 -07004576 if (atomic_read(&bp->intr_sem) != 0)
4577 goto bnx2_restart_timer;
4578
4579 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004580 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004581
Michael Chancea94db2006-06-12 22:16:13 -07004582 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4583
Michael Chanf8dd0642006-11-19 14:08:29 -08004584 if (bp->phy_flags & PHY_SERDES_FLAG) {
4585 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4586 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004587 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004588 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004589 }
4590
4591bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004592 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004593}
4594
4595/* Called with rtnl_lock */
4596static int
4597bnx2_open(struct net_device *dev)
4598{
Michael Chan972ec0d2006-01-23 16:12:43 -08004599 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004600 int rc;
4601
Michael Chan1b2f9222007-05-03 13:20:19 -07004602 netif_carrier_off(dev);
4603
Pavel Machek829ca9a2005-09-03 15:56:56 -07004604 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004605 bnx2_disable_int(bp);
4606
4607 rc = bnx2_alloc_mem(bp);
4608 if (rc)
4609 return rc;
4610
4611 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4612 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4613 !disable_msi) {
4614
4615 if (pci_enable_msi(bp->pdev) == 0) {
4616 bp->flags |= USING_MSI_FLAG;
4617 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4618 dev);
4619 }
4620 else {
4621 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004622 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004623 }
4624 }
4625 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004626 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004627 dev->name, dev);
4628 }
4629 if (rc) {
4630 bnx2_free_mem(bp);
4631 return rc;
4632 }
4633
4634 rc = bnx2_init_nic(bp);
4635
4636 if (rc) {
4637 free_irq(bp->pdev->irq, dev);
4638 if (bp->flags & USING_MSI_FLAG) {
4639 pci_disable_msi(bp->pdev);
4640 bp->flags &= ~USING_MSI_FLAG;
4641 }
4642 bnx2_free_skbs(bp);
4643 bnx2_free_mem(bp);
4644 return rc;
4645 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004646
Michael Chancd339a02005-08-25 15:35:24 -07004647 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004648
4649 atomic_set(&bp->intr_sem, 0);
4650
4651 bnx2_enable_int(bp);
4652
4653 if (bp->flags & USING_MSI_FLAG) {
4654 /* Test MSI to make sure it is working
4655 * If MSI test fails, go back to INTx mode
4656 */
4657 if (bnx2_test_intr(bp) != 0) {
4658 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4659 " using MSI, switching to INTx mode. Please"
4660 " report this failure to the PCI maintainer"
4661 " and include system chipset information.\n",
4662 bp->dev->name);
4663
4664 bnx2_disable_int(bp);
4665 free_irq(bp->pdev->irq, dev);
4666 pci_disable_msi(bp->pdev);
4667 bp->flags &= ~USING_MSI_FLAG;
4668
4669 rc = bnx2_init_nic(bp);
4670
4671 if (!rc) {
4672 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004673 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004674 }
4675 if (rc) {
4676 bnx2_free_skbs(bp);
4677 bnx2_free_mem(bp);
4678 del_timer_sync(&bp->timer);
4679 return rc;
4680 }
4681 bnx2_enable_int(bp);
4682 }
4683 }
4684 if (bp->flags & USING_MSI_FLAG) {
4685 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4686 }
4687
4688 netif_start_queue(dev);
4689
4690 return 0;
4691}
4692
4693static void
David Howellsc4028952006-11-22 14:57:56 +00004694bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004695{
David Howellsc4028952006-11-22 14:57:56 +00004696 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004697
Michael Chanafdc08b2005-08-25 15:34:29 -07004698 if (!netif_running(bp->dev))
4699 return;
4700
4701 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004702 bnx2_netif_stop(bp);
4703
4704 bnx2_init_nic(bp);
4705
4706 atomic_set(&bp->intr_sem, 1);
4707 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004708 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004709}
4710
4711static void
4712bnx2_tx_timeout(struct net_device *dev)
4713{
Michael Chan972ec0d2006-01-23 16:12:43 -08004714 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004715
4716 /* This allows the netif to be shutdown gracefully before resetting */
4717 schedule_work(&bp->reset_task);
4718}
4719
4720#ifdef BCM_VLAN
4721/* Called with rtnl_lock */
4722static void
4723bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4724{
Michael Chan972ec0d2006-01-23 16:12:43 -08004725 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004726
4727 bnx2_netif_stop(bp);
4728
4729 bp->vlgrp = vlgrp;
4730 bnx2_set_rx_mode(dev);
4731
4732 bnx2_netif_start(bp);
4733}
4734
4735/* Called with rtnl_lock */
4736static void
4737bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4738{
Michael Chan972ec0d2006-01-23 16:12:43 -08004739 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004740
4741 bnx2_netif_stop(bp);
Dan Aloni5c15bde2007-03-02 20:44:51 -08004742 vlan_group_set_device(bp->vlgrp, vid, NULL);
Michael Chanb6016b72005-05-26 13:03:09 -07004743 bnx2_set_rx_mode(dev);
4744
4745 bnx2_netif_start(bp);
4746}
4747#endif
4748
Herbert Xu932ff272006-06-09 12:20:56 -07004749/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004750 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4751 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004752 */
4753static int
4754bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4755{
Michael Chan972ec0d2006-01-23 16:12:43 -08004756 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004757 dma_addr_t mapping;
4758 struct tx_bd *txbd;
4759 struct sw_bd *tx_buf;
4760 u32 len, vlan_tag_flags, last_frag, mss;
4761 u16 prod, ring_prod;
4762 int i;
4763
Michael Chane89bbf12005-08-25 15:36:58 -07004764 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004765 netif_stop_queue(dev);
4766 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4767 dev->name);
4768
4769 return NETDEV_TX_BUSY;
4770 }
4771 len = skb_headlen(skb);
4772 prod = bp->tx_prod;
4773 ring_prod = TX_RING_IDX(prod);
4774
4775 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004776 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004777 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4778 }
4779
4780 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4781 vlan_tag_flags |=
4782 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4783 }
Herbert Xu79671682006-06-22 02:40:14 -07004784 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004785 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4786 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004787 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004788
Michael Chanb6016b72005-05-26 13:03:09 -07004789 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4790
Michael Chan4666f872007-05-03 13:22:28 -07004791 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004792
Michael Chan4666f872007-05-03 13:22:28 -07004793 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4794 u32 tcp_off = skb_transport_offset(skb) -
4795 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004796
Michael Chan4666f872007-05-03 13:22:28 -07004797 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4798 TX_BD_FLAGS_SW_FLAGS;
4799 if (likely(tcp_off == 0))
4800 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4801 else {
4802 tcp_off >>= 3;
4803 vlan_tag_flags |= ((tcp_off & 0x3) <<
4804 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4805 ((tcp_off & 0x10) <<
4806 TX_BD_FLAGS_TCP6_OFF4_SHL);
4807 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4808 }
4809 } else {
4810 if (skb_header_cloned(skb) &&
4811 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4812 dev_kfree_skb(skb);
4813 return NETDEV_TX_OK;
4814 }
4815
4816 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4817
4818 iph = ip_hdr(skb);
4819 iph->check = 0;
4820 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4821 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4822 iph->daddr, 0,
4823 IPPROTO_TCP,
4824 0);
4825 if (tcp_opt_len || (iph->ihl > 5)) {
4826 vlan_tag_flags |= ((iph->ihl - 5) +
4827 (tcp_opt_len >> 2)) << 8;
4828 }
Michael Chanb6016b72005-05-26 13:03:09 -07004829 }
Michael Chan4666f872007-05-03 13:22:28 -07004830 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004831 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004832
4833 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004834
Michael Chanb6016b72005-05-26 13:03:09 -07004835 tx_buf = &bp->tx_buf_ring[ring_prod];
4836 tx_buf->skb = skb;
4837 pci_unmap_addr_set(tx_buf, mapping, mapping);
4838
4839 txbd = &bp->tx_desc_ring[ring_prod];
4840
4841 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4842 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4843 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4844 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4845
4846 last_frag = skb_shinfo(skb)->nr_frags;
4847
4848 for (i = 0; i < last_frag; i++) {
4849 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4850
4851 prod = NEXT_TX_BD(prod);
4852 ring_prod = TX_RING_IDX(prod);
4853 txbd = &bp->tx_desc_ring[ring_prod];
4854
4855 len = frag->size;
4856 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4857 len, PCI_DMA_TODEVICE);
4858 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4859 mapping, mapping);
4860
4861 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4862 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4863 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4864 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4865
4866 }
4867 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4868
4869 prod = NEXT_TX_BD(prod);
4870 bp->tx_prod_bseq += skb->len;
4871
Michael Chan234754d2006-11-19 14:11:41 -08004872 REG_WR16(bp, bp->tx_bidx_addr, prod);
4873 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004874
4875 mmiowb();
4876
4877 bp->tx_prod = prod;
4878 dev->trans_start = jiffies;
4879
Michael Chane89bbf12005-08-25 15:36:58 -07004880 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004881 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004882 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004883 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004884 }
4885
4886 return NETDEV_TX_OK;
4887}
4888
4889/* Called with rtnl_lock */
4890static int
4891bnx2_close(struct net_device *dev)
4892{
Michael Chan972ec0d2006-01-23 16:12:43 -08004893 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004894 u32 reset_code;
4895
Michael Chanafdc08b2005-08-25 15:34:29 -07004896 /* Calling flush_scheduled_work() may deadlock because
4897 * linkwatch_event() may be on the workqueue and it will try to get
4898 * the rtnl_lock which we are holding.
4899 */
4900 while (bp->in_reset_task)
4901 msleep(1);
4902
Michael Chanb6016b72005-05-26 13:03:09 -07004903 bnx2_netif_stop(bp);
4904 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004905 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004906 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004907 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004908 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4909 else
4910 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4911 bnx2_reset_chip(bp, reset_code);
4912 free_irq(bp->pdev->irq, dev);
4913 if (bp->flags & USING_MSI_FLAG) {
4914 pci_disable_msi(bp->pdev);
4915 bp->flags &= ~USING_MSI_FLAG;
4916 }
4917 bnx2_free_skbs(bp);
4918 bnx2_free_mem(bp);
4919 bp->link_up = 0;
4920 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004921 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004922 return 0;
4923}
4924
4925#define GET_NET_STATS64(ctr) \
4926 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4927 (unsigned long) (ctr##_lo)
4928
4929#define GET_NET_STATS32(ctr) \
4930 (ctr##_lo)
4931
4932#if (BITS_PER_LONG == 64)
4933#define GET_NET_STATS GET_NET_STATS64
4934#else
4935#define GET_NET_STATS GET_NET_STATS32
4936#endif
4937
4938static struct net_device_stats *
4939bnx2_get_stats(struct net_device *dev)
4940{
Michael Chan972ec0d2006-01-23 16:12:43 -08004941 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004942 struct statistics_block *stats_blk = bp->stats_blk;
4943 struct net_device_stats *net_stats = &bp->net_stats;
4944
4945 if (bp->stats_blk == NULL) {
4946 return net_stats;
4947 }
4948 net_stats->rx_packets =
4949 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4950 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4951 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4952
4953 net_stats->tx_packets =
4954 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4955 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4956 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4957
4958 net_stats->rx_bytes =
4959 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4960
4961 net_stats->tx_bytes =
4962 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4963
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004964 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004965 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4966
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004967 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004968 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4969
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004970 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004971 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4972 stats_blk->stat_EtherStatsOverrsizePkts);
4973
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004974 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004975 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4976
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004977 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004978 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4979
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004980 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004981 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4982
4983 net_stats->rx_errors = net_stats->rx_length_errors +
4984 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4985 net_stats->rx_crc_errors;
4986
4987 net_stats->tx_aborted_errors =
4988 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4989 stats_blk->stat_Dot3StatsLateCollisions);
4990
Michael Chan5b0c76a2005-11-04 08:45:49 -08004991 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4992 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004993 net_stats->tx_carrier_errors = 0;
4994 else {
4995 net_stats->tx_carrier_errors =
4996 (unsigned long)
4997 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4998 }
4999
5000 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005001 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005002 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5003 +
5004 net_stats->tx_aborted_errors +
5005 net_stats->tx_carrier_errors;
5006
Michael Chancea94db2006-06-12 22:16:13 -07005007 net_stats->rx_missed_errors =
5008 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5009 stats_blk->stat_FwRxDrop);
5010
Michael Chanb6016b72005-05-26 13:03:09 -07005011 return net_stats;
5012}
5013
5014/* All ethtool functions called with rtnl_lock */
5015
5016static int
5017bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5018{
Michael Chan972ec0d2006-01-23 16:12:43 -08005019 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005020
5021 cmd->supported = SUPPORTED_Autoneg;
5022 if (bp->phy_flags & PHY_SERDES_FLAG) {
5023 cmd->supported |= SUPPORTED_1000baseT_Full |
5024 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005025 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5026 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005027
5028 cmd->port = PORT_FIBRE;
5029 }
5030 else {
5031 cmd->supported |= SUPPORTED_10baseT_Half |
5032 SUPPORTED_10baseT_Full |
5033 SUPPORTED_100baseT_Half |
5034 SUPPORTED_100baseT_Full |
5035 SUPPORTED_1000baseT_Full |
5036 SUPPORTED_TP;
5037
5038 cmd->port = PORT_TP;
5039 }
5040
5041 cmd->advertising = bp->advertising;
5042
5043 if (bp->autoneg & AUTONEG_SPEED) {
5044 cmd->autoneg = AUTONEG_ENABLE;
5045 }
5046 else {
5047 cmd->autoneg = AUTONEG_DISABLE;
5048 }
5049
5050 if (netif_carrier_ok(dev)) {
5051 cmd->speed = bp->line_speed;
5052 cmd->duplex = bp->duplex;
5053 }
5054 else {
5055 cmd->speed = -1;
5056 cmd->duplex = -1;
5057 }
5058
5059 cmd->transceiver = XCVR_INTERNAL;
5060 cmd->phy_address = bp->phy_addr;
5061
5062 return 0;
5063}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005064
Michael Chanb6016b72005-05-26 13:03:09 -07005065static int
5066bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5067{
Michael Chan972ec0d2006-01-23 16:12:43 -08005068 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005069 u8 autoneg = bp->autoneg;
5070 u8 req_duplex = bp->req_duplex;
5071 u16 req_line_speed = bp->req_line_speed;
5072 u32 advertising = bp->advertising;
5073
5074 if (cmd->autoneg == AUTONEG_ENABLE) {
5075 autoneg |= AUTONEG_SPEED;
5076
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005077 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005078
5079 /* allow advertising 1 speed */
5080 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5081 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5082 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5083 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5084
5085 if (bp->phy_flags & PHY_SERDES_FLAG)
5086 return -EINVAL;
5087
5088 advertising = cmd->advertising;
5089
Michael Chan27a005b2007-05-03 13:23:41 -07005090 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5091 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5092 return -EINVAL;
5093 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005094 advertising = cmd->advertising;
5095 }
5096 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5097 return -EINVAL;
5098 }
5099 else {
5100 if (bp->phy_flags & PHY_SERDES_FLAG) {
5101 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5102 }
5103 else {
5104 advertising = ETHTOOL_ALL_COPPER_SPEED;
5105 }
5106 }
5107 advertising |= ADVERTISED_Autoneg;
5108 }
5109 else {
5110 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005111 if ((cmd->speed != SPEED_1000 &&
5112 cmd->speed != SPEED_2500) ||
5113 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005114 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005115
5116 if (cmd->speed == SPEED_2500 &&
5117 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5118 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005119 }
5120 else if (cmd->speed == SPEED_1000) {
5121 return -EINVAL;
5122 }
5123 autoneg &= ~AUTONEG_SPEED;
5124 req_line_speed = cmd->speed;
5125 req_duplex = cmd->duplex;
5126 advertising = 0;
5127 }
5128
5129 bp->autoneg = autoneg;
5130 bp->advertising = advertising;
5131 bp->req_line_speed = req_line_speed;
5132 bp->req_duplex = req_duplex;
5133
Michael Chanc770a652005-08-25 15:38:39 -07005134 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005135
5136 bnx2_setup_phy(bp);
5137
Michael Chanc770a652005-08-25 15:38:39 -07005138 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005139
5140 return 0;
5141}
5142
5143static void
5144bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5145{
Michael Chan972ec0d2006-01-23 16:12:43 -08005146 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005147
5148 strcpy(info->driver, DRV_MODULE_NAME);
5149 strcpy(info->version, DRV_MODULE_VERSION);
5150 strcpy(info->bus_info, pci_name(bp->pdev));
5151 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5152 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5153 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005154 info->fw_version[1] = info->fw_version[3] = '.';
5155 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005156}
5157
Michael Chan244ac4f2006-03-20 17:48:46 -08005158#define BNX2_REGDUMP_LEN (32 * 1024)
5159
5160static int
5161bnx2_get_regs_len(struct net_device *dev)
5162{
5163 return BNX2_REGDUMP_LEN;
5164}
5165
5166static void
5167bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5168{
5169 u32 *p = _p, i, offset;
5170 u8 *orig_p = _p;
5171 struct bnx2 *bp = netdev_priv(dev);
5172 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5173 0x0800, 0x0880, 0x0c00, 0x0c10,
5174 0x0c30, 0x0d08, 0x1000, 0x101c,
5175 0x1040, 0x1048, 0x1080, 0x10a4,
5176 0x1400, 0x1490, 0x1498, 0x14f0,
5177 0x1500, 0x155c, 0x1580, 0x15dc,
5178 0x1600, 0x1658, 0x1680, 0x16d8,
5179 0x1800, 0x1820, 0x1840, 0x1854,
5180 0x1880, 0x1894, 0x1900, 0x1984,
5181 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5182 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5183 0x2000, 0x2030, 0x23c0, 0x2400,
5184 0x2800, 0x2820, 0x2830, 0x2850,
5185 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5186 0x3c00, 0x3c94, 0x4000, 0x4010,
5187 0x4080, 0x4090, 0x43c0, 0x4458,
5188 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5189 0x4fc0, 0x5010, 0x53c0, 0x5444,
5190 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5191 0x5fc0, 0x6000, 0x6400, 0x6428,
5192 0x6800, 0x6848, 0x684c, 0x6860,
5193 0x6888, 0x6910, 0x8000 };
5194
5195 regs->version = 0;
5196
5197 memset(p, 0, BNX2_REGDUMP_LEN);
5198
5199 if (!netif_running(bp->dev))
5200 return;
5201
5202 i = 0;
5203 offset = reg_boundaries[0];
5204 p += offset;
5205 while (offset < BNX2_REGDUMP_LEN) {
5206 *p++ = REG_RD(bp, offset);
5207 offset += 4;
5208 if (offset == reg_boundaries[i + 1]) {
5209 offset = reg_boundaries[i + 2];
5210 p = (u32 *) (orig_p + offset);
5211 i += 2;
5212 }
5213 }
5214}
5215
Michael Chanb6016b72005-05-26 13:03:09 -07005216static void
5217bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5218{
Michael Chan972ec0d2006-01-23 16:12:43 -08005219 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005220
5221 if (bp->flags & NO_WOL_FLAG) {
5222 wol->supported = 0;
5223 wol->wolopts = 0;
5224 }
5225 else {
5226 wol->supported = WAKE_MAGIC;
5227 if (bp->wol)
5228 wol->wolopts = WAKE_MAGIC;
5229 else
5230 wol->wolopts = 0;
5231 }
5232 memset(&wol->sopass, 0, sizeof(wol->sopass));
5233}
5234
5235static int
5236bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5237{
Michael Chan972ec0d2006-01-23 16:12:43 -08005238 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005239
5240 if (wol->wolopts & ~WAKE_MAGIC)
5241 return -EINVAL;
5242
5243 if (wol->wolopts & WAKE_MAGIC) {
5244 if (bp->flags & NO_WOL_FLAG)
5245 return -EINVAL;
5246
5247 bp->wol = 1;
5248 }
5249 else {
5250 bp->wol = 0;
5251 }
5252 return 0;
5253}
5254
5255static int
5256bnx2_nway_reset(struct net_device *dev)
5257{
Michael Chan972ec0d2006-01-23 16:12:43 -08005258 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005259 u32 bmcr;
5260
5261 if (!(bp->autoneg & AUTONEG_SPEED)) {
5262 return -EINVAL;
5263 }
5264
Michael Chanc770a652005-08-25 15:38:39 -07005265 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005266
5267 /* Force a link down visible on the other side */
5268 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005269 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005270 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005271
5272 msleep(20);
5273
Michael Chanc770a652005-08-25 15:38:39 -07005274 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005275
5276 bp->current_interval = SERDES_AN_TIMEOUT;
5277 bp->serdes_an_pending = 1;
5278 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005279 }
5280
Michael Chanca58c3a2007-05-03 13:22:52 -07005281 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005282 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005283 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005284
Michael Chanc770a652005-08-25 15:38:39 -07005285 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005286
5287 return 0;
5288}
5289
5290static int
5291bnx2_get_eeprom_len(struct net_device *dev)
5292{
Michael Chan972ec0d2006-01-23 16:12:43 -08005293 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005294
Michael Chan1122db72006-01-23 16:11:42 -08005295 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005296 return 0;
5297
Michael Chan1122db72006-01-23 16:11:42 -08005298 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005299}
5300
5301static int
5302bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5303 u8 *eebuf)
5304{
Michael Chan972ec0d2006-01-23 16:12:43 -08005305 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005306 int rc;
5307
John W. Linville1064e942005-11-10 12:58:24 -08005308 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005309
5310 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5311
5312 return rc;
5313}
5314
5315static int
5316bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5317 u8 *eebuf)
5318{
Michael Chan972ec0d2006-01-23 16:12:43 -08005319 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005320 int rc;
5321
John W. Linville1064e942005-11-10 12:58:24 -08005322 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005323
5324 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5325
5326 return rc;
5327}
5328
5329static int
5330bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5331{
Michael Chan972ec0d2006-01-23 16:12:43 -08005332 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005333
5334 memset(coal, 0, sizeof(struct ethtool_coalesce));
5335
5336 coal->rx_coalesce_usecs = bp->rx_ticks;
5337 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5338 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5339 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5340
5341 coal->tx_coalesce_usecs = bp->tx_ticks;
5342 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5343 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5344 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5345
5346 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5347
5348 return 0;
5349}
5350
5351static int
5352bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5353{
Michael Chan972ec0d2006-01-23 16:12:43 -08005354 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005355
5356 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5357 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5358
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005359 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005360 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5361
5362 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5363 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5364
5365 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5366 if (bp->rx_quick_cons_trip_int > 0xff)
5367 bp->rx_quick_cons_trip_int = 0xff;
5368
5369 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5370 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5371
5372 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5373 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5374
5375 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5376 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5377
5378 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5379 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5380 0xff;
5381
5382 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5383 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5384 bp->stats_ticks &= 0xffff00;
5385
5386 if (netif_running(bp->dev)) {
5387 bnx2_netif_stop(bp);
5388 bnx2_init_nic(bp);
5389 bnx2_netif_start(bp);
5390 }
5391
5392 return 0;
5393}
5394
5395static void
5396bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5397{
Michael Chan972ec0d2006-01-23 16:12:43 -08005398 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005399
Michael Chan13daffa2006-03-20 17:49:20 -08005400 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005401 ering->rx_mini_max_pending = 0;
5402 ering->rx_jumbo_max_pending = 0;
5403
5404 ering->rx_pending = bp->rx_ring_size;
5405 ering->rx_mini_pending = 0;
5406 ering->rx_jumbo_pending = 0;
5407
5408 ering->tx_max_pending = MAX_TX_DESC_CNT;
5409 ering->tx_pending = bp->tx_ring_size;
5410}
5411
5412static int
5413bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5414{
Michael Chan972ec0d2006-01-23 16:12:43 -08005415 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005416
Michael Chan13daffa2006-03-20 17:49:20 -08005417 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005418 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5419 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5420
5421 return -EINVAL;
5422 }
Michael Chan13daffa2006-03-20 17:49:20 -08005423 if (netif_running(bp->dev)) {
5424 bnx2_netif_stop(bp);
5425 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5426 bnx2_free_skbs(bp);
5427 bnx2_free_mem(bp);
5428 }
5429
5430 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005431 bp->tx_ring_size = ering->tx_pending;
5432
5433 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005434 int rc;
5435
5436 rc = bnx2_alloc_mem(bp);
5437 if (rc)
5438 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005439 bnx2_init_nic(bp);
5440 bnx2_netif_start(bp);
5441 }
5442
5443 return 0;
5444}
5445
5446static void
5447bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5448{
Michael Chan972ec0d2006-01-23 16:12:43 -08005449 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005450
5451 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5452 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5453 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5454}
5455
5456static int
5457bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5458{
Michael Chan972ec0d2006-01-23 16:12:43 -08005459 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005460
5461 bp->req_flow_ctrl = 0;
5462 if (epause->rx_pause)
5463 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5464 if (epause->tx_pause)
5465 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5466
5467 if (epause->autoneg) {
5468 bp->autoneg |= AUTONEG_FLOW_CTRL;
5469 }
5470 else {
5471 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5472 }
5473
Michael Chanc770a652005-08-25 15:38:39 -07005474 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005475
5476 bnx2_setup_phy(bp);
5477
Michael Chanc770a652005-08-25 15:38:39 -07005478 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005479
5480 return 0;
5481}
5482
5483static u32
5484bnx2_get_rx_csum(struct net_device *dev)
5485{
Michael Chan972ec0d2006-01-23 16:12:43 -08005486 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005487
5488 return bp->rx_csum;
5489}
5490
5491static int
5492bnx2_set_rx_csum(struct net_device *dev, u32 data)
5493{
Michael Chan972ec0d2006-01-23 16:12:43 -08005494 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005495
5496 bp->rx_csum = data;
5497 return 0;
5498}
5499
Michael Chanb11d6212006-06-29 12:31:21 -07005500static int
5501bnx2_set_tso(struct net_device *dev, u32 data)
5502{
Michael Chan4666f872007-05-03 13:22:28 -07005503 struct bnx2 *bp = netdev_priv(dev);
5504
5505 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005506 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005507 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5508 dev->features |= NETIF_F_TSO6;
5509 } else
5510 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5511 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005512 return 0;
5513}
5514
Michael Chancea94db2006-06-12 22:16:13 -07005515#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005516
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005517static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005518 char string[ETH_GSTRING_LEN];
5519} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5520 { "rx_bytes" },
5521 { "rx_error_bytes" },
5522 { "tx_bytes" },
5523 { "tx_error_bytes" },
5524 { "rx_ucast_packets" },
5525 { "rx_mcast_packets" },
5526 { "rx_bcast_packets" },
5527 { "tx_ucast_packets" },
5528 { "tx_mcast_packets" },
5529 { "tx_bcast_packets" },
5530 { "tx_mac_errors" },
5531 { "tx_carrier_errors" },
5532 { "rx_crc_errors" },
5533 { "rx_align_errors" },
5534 { "tx_single_collisions" },
5535 { "tx_multi_collisions" },
5536 { "tx_deferred" },
5537 { "tx_excess_collisions" },
5538 { "tx_late_collisions" },
5539 { "tx_total_collisions" },
5540 { "rx_fragments" },
5541 { "rx_jabbers" },
5542 { "rx_undersize_packets" },
5543 { "rx_oversize_packets" },
5544 { "rx_64_byte_packets" },
5545 { "rx_65_to_127_byte_packets" },
5546 { "rx_128_to_255_byte_packets" },
5547 { "rx_256_to_511_byte_packets" },
5548 { "rx_512_to_1023_byte_packets" },
5549 { "rx_1024_to_1522_byte_packets" },
5550 { "rx_1523_to_9022_byte_packets" },
5551 { "tx_64_byte_packets" },
5552 { "tx_65_to_127_byte_packets" },
5553 { "tx_128_to_255_byte_packets" },
5554 { "tx_256_to_511_byte_packets" },
5555 { "tx_512_to_1023_byte_packets" },
5556 { "tx_1024_to_1522_byte_packets" },
5557 { "tx_1523_to_9022_byte_packets" },
5558 { "rx_xon_frames" },
5559 { "rx_xoff_frames" },
5560 { "tx_xon_frames" },
5561 { "tx_xoff_frames" },
5562 { "rx_mac_ctrl_frames" },
5563 { "rx_filtered_packets" },
5564 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005565 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005566};
5567
5568#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5569
Arjan van de Venf71e1302006-03-03 21:33:57 -05005570static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005571 STATS_OFFSET32(stat_IfHCInOctets_hi),
5572 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5573 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5574 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5575 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5576 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5577 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5578 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5579 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5580 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5581 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005582 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5583 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5584 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5585 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5586 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5587 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5588 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5589 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5590 STATS_OFFSET32(stat_EtherStatsCollisions),
5591 STATS_OFFSET32(stat_EtherStatsFragments),
5592 STATS_OFFSET32(stat_EtherStatsJabbers),
5593 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5594 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5595 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5596 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5597 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5598 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5599 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5600 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5601 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5602 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5603 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5604 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5605 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5606 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5607 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5608 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5609 STATS_OFFSET32(stat_XonPauseFramesReceived),
5610 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5611 STATS_OFFSET32(stat_OutXonSent),
5612 STATS_OFFSET32(stat_OutXoffSent),
5613 STATS_OFFSET32(stat_MacControlFramesReceived),
5614 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5615 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005616 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005617};
5618
5619/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5620 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005621 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005622static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005623 8,0,8,8,8,8,8,8,8,8,
5624 4,0,4,4,4,4,4,4,4,4,
5625 4,4,4,4,4,4,4,4,4,4,
5626 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005627 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005628};
5629
Michael Chan5b0c76a2005-11-04 08:45:49 -08005630static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5631 8,0,8,8,8,8,8,8,8,8,
5632 4,4,4,4,4,4,4,4,4,4,
5633 4,4,4,4,4,4,4,4,4,4,
5634 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005635 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005636};
5637
Michael Chanb6016b72005-05-26 13:03:09 -07005638#define BNX2_NUM_TESTS 6
5639
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005640static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005641 char string[ETH_GSTRING_LEN];
5642} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5643 { "register_test (offline)" },
5644 { "memory_test (offline)" },
5645 { "loopback_test (offline)" },
5646 { "nvram_test (online)" },
5647 { "interrupt_test (online)" },
5648 { "link_test (online)" },
5649};
5650
5651static int
5652bnx2_self_test_count(struct net_device *dev)
5653{
5654 return BNX2_NUM_TESTS;
5655}
5656
5657static void
5658bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5659{
Michael Chan972ec0d2006-01-23 16:12:43 -08005660 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005661
5662 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5663 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005664 int i;
5665
Michael Chanb6016b72005-05-26 13:03:09 -07005666 bnx2_netif_stop(bp);
5667 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5668 bnx2_free_skbs(bp);
5669
5670 if (bnx2_test_registers(bp) != 0) {
5671 buf[0] = 1;
5672 etest->flags |= ETH_TEST_FL_FAILED;
5673 }
5674 if (bnx2_test_memory(bp) != 0) {
5675 buf[1] = 1;
5676 etest->flags |= ETH_TEST_FL_FAILED;
5677 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005678 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005679 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005680
5681 if (!netif_running(bp->dev)) {
5682 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5683 }
5684 else {
5685 bnx2_init_nic(bp);
5686 bnx2_netif_start(bp);
5687 }
5688
5689 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005690 for (i = 0; i < 7; i++) {
5691 if (bp->link_up)
5692 break;
5693 msleep_interruptible(1000);
5694 }
Michael Chanb6016b72005-05-26 13:03:09 -07005695 }
5696
5697 if (bnx2_test_nvram(bp) != 0) {
5698 buf[3] = 1;
5699 etest->flags |= ETH_TEST_FL_FAILED;
5700 }
5701 if (bnx2_test_intr(bp) != 0) {
5702 buf[4] = 1;
5703 etest->flags |= ETH_TEST_FL_FAILED;
5704 }
5705
5706 if (bnx2_test_link(bp) != 0) {
5707 buf[5] = 1;
5708 etest->flags |= ETH_TEST_FL_FAILED;
5709
5710 }
5711}
5712
5713static void
5714bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5715{
5716 switch (stringset) {
5717 case ETH_SS_STATS:
5718 memcpy(buf, bnx2_stats_str_arr,
5719 sizeof(bnx2_stats_str_arr));
5720 break;
5721 case ETH_SS_TEST:
5722 memcpy(buf, bnx2_tests_str_arr,
5723 sizeof(bnx2_tests_str_arr));
5724 break;
5725 }
5726}
5727
5728static int
5729bnx2_get_stats_count(struct net_device *dev)
5730{
5731 return BNX2_NUM_STATS;
5732}
5733
5734static void
5735bnx2_get_ethtool_stats(struct net_device *dev,
5736 struct ethtool_stats *stats, u64 *buf)
5737{
Michael Chan972ec0d2006-01-23 16:12:43 -08005738 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005739 int i;
5740 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005741 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005742
5743 if (hw_stats == NULL) {
5744 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5745 return;
5746 }
5747
Michael Chan5b0c76a2005-11-04 08:45:49 -08005748 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5749 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5750 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5751 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005752 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005753 else
5754 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005755
5756 for (i = 0; i < BNX2_NUM_STATS; i++) {
5757 if (stats_len_arr[i] == 0) {
5758 /* skip this counter */
5759 buf[i] = 0;
5760 continue;
5761 }
5762 if (stats_len_arr[i] == 4) {
5763 /* 4-byte counter */
5764 buf[i] = (u64)
5765 *(hw_stats + bnx2_stats_offset_arr[i]);
5766 continue;
5767 }
5768 /* 8-byte counter */
5769 buf[i] = (((u64) *(hw_stats +
5770 bnx2_stats_offset_arr[i])) << 32) +
5771 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5772 }
5773}
5774
5775static int
5776bnx2_phys_id(struct net_device *dev, u32 data)
5777{
Michael Chan972ec0d2006-01-23 16:12:43 -08005778 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005779 int i;
5780 u32 save;
5781
5782 if (data == 0)
5783 data = 2;
5784
5785 save = REG_RD(bp, BNX2_MISC_CFG);
5786 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5787
5788 for (i = 0; i < (data * 2); i++) {
5789 if ((i % 2) == 0) {
5790 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5791 }
5792 else {
5793 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5794 BNX2_EMAC_LED_1000MB_OVERRIDE |
5795 BNX2_EMAC_LED_100MB_OVERRIDE |
5796 BNX2_EMAC_LED_10MB_OVERRIDE |
5797 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5798 BNX2_EMAC_LED_TRAFFIC);
5799 }
5800 msleep_interruptible(500);
5801 if (signal_pending(current))
5802 break;
5803 }
5804 REG_WR(bp, BNX2_EMAC_LED, 0);
5805 REG_WR(bp, BNX2_MISC_CFG, save);
5806 return 0;
5807}
5808
Michael Chan4666f872007-05-03 13:22:28 -07005809static int
5810bnx2_set_tx_csum(struct net_device *dev, u32 data)
5811{
5812 struct bnx2 *bp = netdev_priv(dev);
5813
5814 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5815 return (ethtool_op_set_tx_hw_csum(dev, data));
5816 else
5817 return (ethtool_op_set_tx_csum(dev, data));
5818}
5819
Jeff Garzik7282d492006-09-13 14:30:00 -04005820static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005821 .get_settings = bnx2_get_settings,
5822 .set_settings = bnx2_set_settings,
5823 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005824 .get_regs_len = bnx2_get_regs_len,
5825 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005826 .get_wol = bnx2_get_wol,
5827 .set_wol = bnx2_set_wol,
5828 .nway_reset = bnx2_nway_reset,
5829 .get_link = ethtool_op_get_link,
5830 .get_eeprom_len = bnx2_get_eeprom_len,
5831 .get_eeprom = bnx2_get_eeprom,
5832 .set_eeprom = bnx2_set_eeprom,
5833 .get_coalesce = bnx2_get_coalesce,
5834 .set_coalesce = bnx2_set_coalesce,
5835 .get_ringparam = bnx2_get_ringparam,
5836 .set_ringparam = bnx2_set_ringparam,
5837 .get_pauseparam = bnx2_get_pauseparam,
5838 .set_pauseparam = bnx2_set_pauseparam,
5839 .get_rx_csum = bnx2_get_rx_csum,
5840 .set_rx_csum = bnx2_set_rx_csum,
5841 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005842 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005843 .get_sg = ethtool_op_get_sg,
5844 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005845 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005846 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005847 .self_test_count = bnx2_self_test_count,
5848 .self_test = bnx2_self_test,
5849 .get_strings = bnx2_get_strings,
5850 .phys_id = bnx2_phys_id,
5851 .get_stats_count = bnx2_get_stats_count,
5852 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005853 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005854};
5855
5856/* Called with rtnl_lock */
5857static int
5858bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5859{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005860 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005861 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005862 int err;
5863
5864 switch(cmd) {
5865 case SIOCGMIIPHY:
5866 data->phy_id = bp->phy_addr;
5867
5868 /* fallthru */
5869 case SIOCGMIIREG: {
5870 u32 mii_regval;
5871
Michael Chandad3e452007-05-03 13:18:03 -07005872 if (!netif_running(dev))
5873 return -EAGAIN;
5874
Michael Chanc770a652005-08-25 15:38:39 -07005875 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005876 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005877 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005878
5879 data->val_out = mii_regval;
5880
5881 return err;
5882 }
5883
5884 case SIOCSMIIREG:
5885 if (!capable(CAP_NET_ADMIN))
5886 return -EPERM;
5887
Michael Chandad3e452007-05-03 13:18:03 -07005888 if (!netif_running(dev))
5889 return -EAGAIN;
5890
Michael Chanc770a652005-08-25 15:38:39 -07005891 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005892 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005893 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005894
5895 return err;
5896
5897 default:
5898 /* do nothing */
5899 break;
5900 }
5901 return -EOPNOTSUPP;
5902}
5903
5904/* Called with rtnl_lock */
5905static int
5906bnx2_change_mac_addr(struct net_device *dev, void *p)
5907{
5908 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005909 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005910
Michael Chan73eef4c2005-08-25 15:39:15 -07005911 if (!is_valid_ether_addr(addr->sa_data))
5912 return -EINVAL;
5913
Michael Chanb6016b72005-05-26 13:03:09 -07005914 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5915 if (netif_running(dev))
5916 bnx2_set_mac_addr(bp);
5917
5918 return 0;
5919}
5920
5921/* Called with rtnl_lock */
5922static int
5923bnx2_change_mtu(struct net_device *dev, int new_mtu)
5924{
Michael Chan972ec0d2006-01-23 16:12:43 -08005925 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005926
5927 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5928 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5929 return -EINVAL;
5930
5931 dev->mtu = new_mtu;
5932 if (netif_running(dev)) {
5933 bnx2_netif_stop(bp);
5934
5935 bnx2_init_nic(bp);
5936
5937 bnx2_netif_start(bp);
5938 }
5939 return 0;
5940}
5941
5942#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5943static void
5944poll_bnx2(struct net_device *dev)
5945{
Michael Chan972ec0d2006-01-23 16:12:43 -08005946 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005947
5948 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005949 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005950 enable_irq(bp->pdev->irq);
5951}
5952#endif
5953
Michael Chan253c8b72007-01-08 19:56:01 -08005954static void __devinit
5955bnx2_get_5709_media(struct bnx2 *bp)
5956{
5957 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5958 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5959 u32 strap;
5960
5961 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5962 return;
5963 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5964 bp->phy_flags |= PHY_SERDES_FLAG;
5965 return;
5966 }
5967
5968 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5969 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5970 else
5971 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5972
5973 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5974 switch (strap) {
5975 case 0x4:
5976 case 0x5:
5977 case 0x6:
5978 bp->phy_flags |= PHY_SERDES_FLAG;
5979 return;
5980 }
5981 } else {
5982 switch (strap) {
5983 case 0x1:
5984 case 0x2:
5985 case 0x4:
5986 bp->phy_flags |= PHY_SERDES_FLAG;
5987 return;
5988 }
5989 }
5990}
5991
Michael Chanb6016b72005-05-26 13:03:09 -07005992static int __devinit
5993bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5994{
5995 struct bnx2 *bp;
5996 unsigned long mem_len;
5997 int rc;
5998 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07005999 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006000
6001 SET_MODULE_OWNER(dev);
6002 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006003 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006004
6005 bp->flags = 0;
6006 bp->phy_flags = 0;
6007
6008 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6009 rc = pci_enable_device(pdev);
6010 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006011 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006012 goto err_out;
6013 }
6014
6015 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006016 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006017 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006018 rc = -ENODEV;
6019 goto err_out_disable;
6020 }
6021
6022 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6023 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006024 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006025 goto err_out_disable;
6026 }
6027
6028 pci_set_master(pdev);
6029
6030 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6031 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006032 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006033 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006034 rc = -EIO;
6035 goto err_out_release;
6036 }
6037
Michael Chanb6016b72005-05-26 13:03:09 -07006038 bp->dev = dev;
6039 bp->pdev = pdev;
6040
6041 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00006042 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006043
6044 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006045 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006046 dev->mem_end = dev->mem_start + mem_len;
6047 dev->irq = pdev->irq;
6048
6049 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6050
6051 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006052 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006053 rc = -ENOMEM;
6054 goto err_out_release;
6055 }
6056
6057 /* Configure byte swap and enable write to the reg_window registers.
6058 * Rely on CPU to do target byte swapping on big endian systems
6059 * The chip's target access swapping will not swap all accesses
6060 */
6061 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6062 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6063 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6064
Pavel Machek829ca9a2005-09-03 15:56:56 -07006065 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006066
6067 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6068
Michael Chan59b47d82006-11-19 14:10:45 -08006069 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6070 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6071 if (bp->pcix_cap == 0) {
6072 dev_err(&pdev->dev,
6073 "Cannot find PCIX capability, aborting.\n");
6074 rc = -EIO;
6075 goto err_out_unmap;
6076 }
6077 }
6078
Michael Chan40453c82007-05-03 13:19:18 -07006079 /* 5708 cannot support DMA addresses > 40-bit. */
6080 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6081 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6082 else
6083 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6084
6085 /* Configure DMA attributes. */
6086 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6087 dev->features |= NETIF_F_HIGHDMA;
6088 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6089 if (rc) {
6090 dev_err(&pdev->dev,
6091 "pci_set_consistent_dma_mask failed, aborting.\n");
6092 goto err_out_unmap;
6093 }
6094 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6095 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6096 goto err_out_unmap;
6097 }
6098
Michael Chanb6016b72005-05-26 13:03:09 -07006099 /* Get bus information. */
6100 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6101 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6102 u32 clkreg;
6103
6104 bp->flags |= PCIX_FLAG;
6105
6106 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006107
Michael Chanb6016b72005-05-26 13:03:09 -07006108 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6109 switch (clkreg) {
6110 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6111 bp->bus_speed_mhz = 133;
6112 break;
6113
6114 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6115 bp->bus_speed_mhz = 100;
6116 break;
6117
6118 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6119 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6120 bp->bus_speed_mhz = 66;
6121 break;
6122
6123 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6124 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6125 bp->bus_speed_mhz = 50;
6126 break;
6127
6128 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6129 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6130 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6131 bp->bus_speed_mhz = 33;
6132 break;
6133 }
6134 }
6135 else {
6136 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6137 bp->bus_speed_mhz = 66;
6138 else
6139 bp->bus_speed_mhz = 33;
6140 }
6141
6142 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6143 bp->flags |= PCI_32BIT_FLAG;
6144
6145 /* 5706A0 may falsely detect SERR and PERR. */
6146 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6147 reg = REG_RD(bp, PCI_COMMAND);
6148 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6149 REG_WR(bp, PCI_COMMAND, reg);
6150 }
6151 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6152 !(bp->flags & PCIX_FLAG)) {
6153
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006154 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006155 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006156 goto err_out_unmap;
6157 }
6158
6159 bnx2_init_nvram(bp);
6160
Michael Chane3648b32005-11-04 08:51:21 -08006161 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6162
6163 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006164 BNX2_SHM_HDR_SIGNATURE_SIG) {
6165 u32 off = PCI_FUNC(pdev->devfn) << 2;
6166
6167 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6168 } else
Michael Chane3648b32005-11-04 08:51:21 -08006169 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6170
Michael Chanb6016b72005-05-26 13:03:09 -07006171 /* Get the permanent MAC address. First we need to make sure the
6172 * firmware is actually running.
6173 */
Michael Chane3648b32005-11-04 08:51:21 -08006174 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006175
6176 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6177 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006178 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006179 rc = -ENODEV;
6180 goto err_out_unmap;
6181 }
6182
Michael Chane3648b32005-11-04 08:51:21 -08006183 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006184
Michael Chane3648b32005-11-04 08:51:21 -08006185 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006186 bp->mac_addr[0] = (u8) (reg >> 8);
6187 bp->mac_addr[1] = (u8) reg;
6188
Michael Chane3648b32005-11-04 08:51:21 -08006189 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006190 bp->mac_addr[2] = (u8) (reg >> 24);
6191 bp->mac_addr[3] = (u8) (reg >> 16);
6192 bp->mac_addr[4] = (u8) (reg >> 8);
6193 bp->mac_addr[5] = (u8) reg;
6194
6195 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006196 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006197
6198 bp->rx_csum = 1;
6199
6200 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6201
6202 bp->tx_quick_cons_trip_int = 20;
6203 bp->tx_quick_cons_trip = 20;
6204 bp->tx_ticks_int = 80;
6205 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006206
Michael Chanb6016b72005-05-26 13:03:09 -07006207 bp->rx_quick_cons_trip_int = 6;
6208 bp->rx_quick_cons_trip = 6;
6209 bp->rx_ticks_int = 18;
6210 bp->rx_ticks = 18;
6211
6212 bp->stats_ticks = 1000000 & 0xffff00;
6213
6214 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006215 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006216
Michael Chan5b0c76a2005-11-04 08:45:49 -08006217 bp->phy_addr = 1;
6218
Michael Chanb6016b72005-05-26 13:03:09 -07006219 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006220 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6221 bnx2_get_5709_media(bp);
6222 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006223 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006224
6225 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07006226 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006227 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006228 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006229 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006230 BNX2_SHARED_HW_CFG_CONFIG);
6231 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6232 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6233 }
Michael Chan261dd5c2007-01-08 19:55:46 -08006234 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6235 CHIP_NUM(bp) == CHIP_NUM_5708)
6236 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006237 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6238 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006239
Michael Chan16088272006-06-12 22:16:43 -07006240 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6241 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6242 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006243 bp->flags |= NO_WOL_FLAG;
6244
Michael Chanb6016b72005-05-26 13:03:09 -07006245 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6246 bp->tx_quick_cons_trip_int =
6247 bp->tx_quick_cons_trip;
6248 bp->tx_ticks_int = bp->tx_ticks;
6249 bp->rx_quick_cons_trip_int =
6250 bp->rx_quick_cons_trip;
6251 bp->rx_ticks_int = bp->rx_ticks;
6252 bp->comp_prod_trip_int = bp->comp_prod_trip;
6253 bp->com_ticks_int = bp->com_ticks;
6254 bp->cmd_ticks_int = bp->cmd_ticks;
6255 }
6256
Michael Chanf9317a42006-09-29 17:06:23 -07006257 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6258 *
6259 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6260 * with byte enables disabled on the unused 32-bit word. This is legal
6261 * but causes problems on the AMD 8132 which will eventually stop
6262 * responding after a while.
6263 *
6264 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006265 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006266 */
6267 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6268 struct pci_dev *amd_8132 = NULL;
6269
6270 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6271 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6272 amd_8132))) {
6273 u8 rev;
6274
6275 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6276 if (rev >= 0x10 && rev <= 0x13) {
6277 disable_msi = 1;
6278 pci_dev_put(amd_8132);
6279 break;
6280 }
6281 }
6282 }
6283
Michael Chanb6016b72005-05-26 13:03:09 -07006284 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6285 bp->req_line_speed = 0;
6286 if (bp->phy_flags & PHY_SERDES_FLAG) {
6287 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006288
Michael Chane3648b32005-11-04 08:51:21 -08006289 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006290 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6291 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6292 bp->autoneg = 0;
6293 bp->req_line_speed = bp->line_speed = SPEED_1000;
6294 bp->req_duplex = DUPLEX_FULL;
6295 }
Michael Chanb6016b72005-05-26 13:03:09 -07006296 }
6297 else {
6298 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6299 }
6300
6301 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6302
Michael Chancd339a02005-08-25 15:35:24 -07006303 init_timer(&bp->timer);
6304 bp->timer.expires = RUN_AT(bp->timer_interval);
6305 bp->timer.data = (unsigned long) bp;
6306 bp->timer.function = bnx2_timer;
6307
Michael Chanb6016b72005-05-26 13:03:09 -07006308 return 0;
6309
6310err_out_unmap:
6311 if (bp->regview) {
6312 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006313 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006314 }
6315
6316err_out_release:
6317 pci_release_regions(pdev);
6318
6319err_out_disable:
6320 pci_disable_device(pdev);
6321 pci_set_drvdata(pdev, NULL);
6322
6323err_out:
6324 return rc;
6325}
6326
6327static int __devinit
6328bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6329{
6330 static int version_printed = 0;
6331 struct net_device *dev = NULL;
6332 struct bnx2 *bp;
6333 int rc, i;
6334
6335 if (version_printed++ == 0)
6336 printk(KERN_INFO "%s", version);
6337
6338 /* dev zeroed in init_etherdev */
6339 dev = alloc_etherdev(sizeof(*bp));
6340
6341 if (!dev)
6342 return -ENOMEM;
6343
6344 rc = bnx2_init_board(pdev, dev);
6345 if (rc < 0) {
6346 free_netdev(dev);
6347 return rc;
6348 }
6349
6350 dev->open = bnx2_open;
6351 dev->hard_start_xmit = bnx2_start_xmit;
6352 dev->stop = bnx2_close;
6353 dev->get_stats = bnx2_get_stats;
6354 dev->set_multicast_list = bnx2_set_rx_mode;
6355 dev->do_ioctl = bnx2_ioctl;
6356 dev->set_mac_address = bnx2_change_mac_addr;
6357 dev->change_mtu = bnx2_change_mtu;
6358 dev->tx_timeout = bnx2_tx_timeout;
6359 dev->watchdog_timeo = TX_TIMEOUT;
6360#ifdef BCM_VLAN
6361 dev->vlan_rx_register = bnx2_vlan_rx_register;
6362 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6363#endif
6364 dev->poll = bnx2_poll;
6365 dev->ethtool_ops = &bnx2_ethtool_ops;
6366 dev->weight = 64;
6367
Michael Chan972ec0d2006-01-23 16:12:43 -08006368 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006369
6370#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6371 dev->poll_controller = poll_bnx2;
6372#endif
6373
Michael Chan1b2f9222007-05-03 13:20:19 -07006374 pci_set_drvdata(pdev, dev);
6375
6376 memcpy(dev->dev_addr, bp->mac_addr, 6);
6377 memcpy(dev->perm_addr, bp->mac_addr, 6);
6378 bp->name = board_info[ent->driver_data].name;
6379
Michael Chan4666f872007-05-03 13:22:28 -07006380 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6381 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6382 else
6383 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan1b2f9222007-05-03 13:20:19 -07006384#ifdef BCM_VLAN
6385 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6386#endif
6387 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006388 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6389 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006390
Michael Chanb6016b72005-05-26 13:03:09 -07006391 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006392 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006393 if (bp->regview)
6394 iounmap(bp->regview);
6395 pci_release_regions(pdev);
6396 pci_disable_device(pdev);
6397 pci_set_drvdata(pdev, NULL);
6398 free_netdev(dev);
6399 return rc;
6400 }
6401
Michael Chanb6016b72005-05-26 13:03:09 -07006402 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6403 "IRQ %d, ",
6404 dev->name,
6405 bp->name,
6406 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6407 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6408 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6409 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6410 bp->bus_speed_mhz,
6411 dev->base_addr,
6412 bp->pdev->irq);
6413
6414 printk("node addr ");
6415 for (i = 0; i < 6; i++)
6416 printk("%2.2x", dev->dev_addr[i]);
6417 printk("\n");
6418
Michael Chanb6016b72005-05-26 13:03:09 -07006419 return 0;
6420}
6421
6422static void __devexit
6423bnx2_remove_one(struct pci_dev *pdev)
6424{
6425 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006426 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006427
Michael Chanafdc08b2005-08-25 15:34:29 -07006428 flush_scheduled_work();
6429
Michael Chanb6016b72005-05-26 13:03:09 -07006430 unregister_netdev(dev);
6431
6432 if (bp->regview)
6433 iounmap(bp->regview);
6434
6435 free_netdev(dev);
6436 pci_release_regions(pdev);
6437 pci_disable_device(pdev);
6438 pci_set_drvdata(pdev, NULL);
6439}
6440
6441static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006442bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006443{
6444 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006445 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006446 u32 reset_code;
6447
6448 if (!netif_running(dev))
6449 return 0;
6450
Michael Chan1d60290f2006-03-20 17:50:08 -08006451 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006452 bnx2_netif_stop(bp);
6453 netif_device_detach(dev);
6454 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006455 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006456 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006457 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006458 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6459 else
6460 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6461 bnx2_reset_chip(bp, reset_code);
6462 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006463 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006464 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006465 return 0;
6466}
6467
6468static int
6469bnx2_resume(struct pci_dev *pdev)
6470{
6471 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006472 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006473
6474 if (!netif_running(dev))
6475 return 0;
6476
Michael Chan30c517b2007-05-03 13:20:40 -07006477 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006478 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006479 netif_device_attach(dev);
6480 bnx2_init_nic(bp);
6481 bnx2_netif_start(bp);
6482 return 0;
6483}
6484
6485static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006486 .name = DRV_MODULE_NAME,
6487 .id_table = bnx2_pci_tbl,
6488 .probe = bnx2_init_one,
6489 .remove = __devexit_p(bnx2_remove_one),
6490 .suspend = bnx2_suspend,
6491 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006492};
6493
6494static int __init bnx2_init(void)
6495{
Jeff Garzik29917622006-08-19 17:48:59 -04006496 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006497}
6498
6499static void __exit bnx2_cleanup(void)
6500{
6501 pci_unregister_driver(&bnx2_pci_driver);
6502}
6503
6504module_init(bnx2_init);
6505module_exit(bnx2_cleanup);
6506
6507
6508