blob: cb74f122138c38fd4b16c09ff8b0025953303863 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan68c9f752007-04-24 15:35:53 -070057#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
553 if (bp->link_up) {
554 u32 bmsr;
555
556 switch (bp->line_speed) {
557 case SPEED_10:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
562 break;
563 case SPEED_100:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
568 break;
569 case SPEED_1000:
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572 else
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574 break;
575 case SPEED_2500:
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578 else
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580 break;
581 }
582
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585 if (bp->autoneg) {
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
Michael Chanca58c3a2007-05-03 13:22:52 -0700588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800590
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594 else
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596 }
597 }
598 else
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
604static void
Michael Chanb6016b72005-05-26 13:03:09 -0700605bnx2_report_link(struct bnx2 *bp)
606{
607 if (bp->link_up) {
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611 printk("%d Mbps ", bp->line_speed);
612
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
615 else
616 printk("half duplex");
617
618 if (bp->flow_ctrl) {
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
623 }
624 else {
625 printk(", transmit ");
626 }
627 printk("flow control ON");
628 }
629 printk("\n");
630 }
631 else {
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634 }
Michael Chane3648b32005-11-04 08:51:21 -0800635
636 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642 u32 local_adv, remote_adv;
643
644 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
650 }
651 return;
652 }
653
654 if (bp->duplex != DUPLEX_FULL) {
655 return;
656 }
657
Michael Chan5b0c76a2005-11-04 08:45:49 -0800658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660 u32 val;
661
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
667 return;
668 }
669
Michael Chanca58c3a2007-05-03 13:22:52 -0700670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700672
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
676
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
688 }
689
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695 }
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
698 }
699 }
700 else {
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703 }
704 }
705 }
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710 bp->flow_ctrl = FLOW_CTRL_TX;
711 }
712 }
713}
714
715static int
Michael Chan27a005b2007-05-03 13:23:41 -0700716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718 u32 val, speed;
719
720 bp->link_up = 1;
721
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
729 return 0;
730 }
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732 switch (speed) {
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
735 break;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
742 break;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
745 break;
746 }
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
749 else
750 bp->duplex = DUPLEX_HALF;
751 return 0;
752}
753
754static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757 u32 val;
758
759 bp->link_up = 1;
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
764 break;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
773 break;
774 }
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
777 else
778 bp->duplex = DUPLEX_HALF;
779
780 return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700785{
786 u32 bmcr, local_adv, remote_adv, common;
787
788 bp->link_up = 1;
789 bp->line_speed = SPEED_1000;
790
Michael Chanca58c3a2007-05-03 13:22:52 -0700791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
794 }
795 else {
796 bp->duplex = DUPLEX_HALF;
797 }
798
799 if (!(bmcr & BMCR_ANENABLE)) {
800 return 0;
801 }
802
Michael Chanca58c3a2007-05-03 13:22:52 -0700803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700805
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
811 }
812 else {
813 bp->duplex = DUPLEX_HALF;
814 }
815 }
816
817 return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823 u32 bmcr;
824
Michael Chanca58c3a2007-05-03 13:22:52 -0700825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
828
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
836 }
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
840 }
841 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700844
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
857 }
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
861 }
862 else {
863 bp->line_speed = 0;
864 bp->link_up = 0;
865 }
866 }
867 }
868 else {
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
871 }
872 else {
873 bp->line_speed = SPEED_10;
874 }
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
877 }
878 else {
879 bp->duplex = DUPLEX_HALF;
880 }
881 }
882
883 return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889 u32 val;
890
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895 }
896
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
899
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800902 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700903
904 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800905 switch (bp->line_speed) {
906 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800909 break;
910 }
911 /* fall through */
912 case SPEED_100:
913 val |= BNX2_EMAC_MODE_PORT_MII;
914 break;
915 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800916 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800917 /* fall through */
918 case SPEED_1000:
919 val |= BNX2_EMAC_MODE_PORT_GMII;
920 break;
921 }
Michael Chanb6016b72005-05-26 13:03:09 -0700922 }
923 else {
924 val |= BNX2_EMAC_MODE_PORT_GMII;
925 }
926
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
931
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950 return 0;
951}
952
Michael Chan27a005b2007-05-03 13:23:41 -0700953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
Michael Chanb6016b72005-05-26 13:03:09 -0700971static int
Michael Chan605a9e22007-05-03 13:23:13 -0700972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974 u32 up1;
975 int ret = 1;
976
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978 return 0;
979
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
982
Michael Chan27a005b2007-05-03 13:23:41 -0700983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
Michael Chan605a9e22007-05-03 13:23:13 -0700986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
990 ret = 0;
991 }
992
Michael Chan27a005b2007-05-03 13:23:41 -0700993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
Michael Chan605a9e22007-05-03 13:23:13 -0700997 return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003 u32 up1;
1004 int ret = 0;
1005
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007 return 0;
1008
Michael Chan27a005b2007-05-03 13:23:41 -07001009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
Michael Chan605a9e22007-05-03 13:23:13 -07001012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 1;
1017 }
1018
Michael Chan27a005b2007-05-03 13:23:41 -07001019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
Michael Chan605a9e22007-05-03 13:23:13 -07001023 return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029 u32 bmcr;
1030
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032 return;
1033
Michael Chan27a005b2007-05-03 13:23:41 -07001034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035 u32 val;
1036
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051 }
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1057 }
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064 u32 bmcr;
1065
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067 return;
1068
Michael Chan27a005b2007-05-03 13:23:41 -07001069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070 u32 val;
1071
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085 }
1086
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
1092static int
Michael Chanb6016b72005-05-26 13:03:09 -07001093bnx2_set_link(struct bnx2 *bp)
1094{
1095 u32 bmsr;
1096 u8 link_up;
1097
Michael Chan80be4432006-11-19 14:07:28 -08001098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001099 bp->link_up = 1;
1100 return 0;
1101 }
1102
1103 link_up = bp->link_up;
1104
Michael Chan27a005b2007-05-03 13:23:41 -07001105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001109
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112 u32 val;
1113
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1117 else
1118 bmsr &= ~BMSR_LSTATUS;
1119 }
1120
1121 if (bmsr & BMSR_LSTATUS) {
1122 bp->link_up = 1;
1123
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001131 }
1132 else {
1133 bnx2_copper_linkup(bp);
1134 }
1135 bnx2_resolve_flow_ctrl(bp);
1136 }
1137 else {
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001141
Michael Chanb6016b72005-05-26 13:03:09 -07001142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143 bp->link_up = 0;
1144 }
1145
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1148 }
1149
1150 bnx2_set_mac_link(bp);
1151
1152 return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158 int i;
1159 u32 reg;
1160
Michael Chanca58c3a2007-05-03 13:22:52 -07001161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163#define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165 udelay(10);
1166
Michael Chanca58c3a2007-05-03 13:22:52 -07001167 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001168 if (!(reg & BMCR_RESET)) {
1169 udelay(20);
1170 break;
1171 }
1172 }
1173 if (i == PHY_RESET_MAX_WAIT) {
1174 return -EBUSY;
1175 }
1176 return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182 u32 adv = 0;
1183
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1189 }
1190 else {
1191 adv = ADVERTISE_PAUSE_CAP;
1192 }
1193 }
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1197 }
1198 else {
1199 adv = ADVERTISE_PAUSE_ASYM;
1200 }
1201 }
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208 }
1209 }
1210 return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
Michael Chan605a9e22007-05-03 13:23:13 -07001216 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001217 u32 new_adv = 0;
1218
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001221 int force_link_down = 0;
1222
Michael Chan605a9e22007-05-03 13:23:13 -07001223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1229 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001230 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
Michael Chanca58c3a2007-05-03 13:22:52 -07001233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001234 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001235 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001236
Michael Chan27a005b2007-05-03 13:23:41 -07001237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1243 }
1244
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248 else
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001250 }
1251
Michael Chanb6016b72005-05-26 13:03:09 -07001252 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001253 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001254 new_bmcr |= BMCR_FULLDPLX;
1255 }
1256 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001257 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001258 new_bmcr &= ~BMCR_FULLDPLX;
1259 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001260 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001261 /* Force a link down visible on the other side */
1262 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001263 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001267 BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269 bp->link_up = 0;
1270 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001272 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001273 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001276 } else {
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001279 }
1280 return 0;
1281 }
1282
Michael Chan605a9e22007-05-03 13:23:13 -07001283 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001284
Michael Chanb6016b72005-05-26 13:03:09 -07001285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1287
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1289
Michael Chanca58c3a2007-05-03 13:22:52 -07001290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001292
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1296 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001298 spin_unlock_bh(&bp->phy_lock);
1299 msleep(20);
1300 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001301 }
1302
Michael Chanca58c3a2007-05-03 13:22:52 -07001303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001305 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1313 */
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001317 } else {
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001320 }
1321
1322 return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1327
1328#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1332
1333#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001335
Michael Chanb6016b72005-05-26 13:03:09 -07001336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338static int
1339bnx2_setup_copper_phy(struct bnx2 *bp)
1340{
1341 u32 bmcr;
1342 u32 new_bmcr;
1343
Michael Chanca58c3a2007-05-03 13:22:52 -07001344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001345
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1350
Michael Chanca58c3a2007-05-03 13:22:52 -07001351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1354
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001368
Michael Chanb6016b72005-05-26 13:03:09 -07001369 new_adv_reg |= ADVERTISE_CSMA;
1370
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1376
Michael Chanca58c3a2007-05-03 13:22:52 -07001377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001380 BMCR_ANENABLE);
1381 }
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1385
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1388 }
1389 return 0;
1390 }
1391
1392 new_bmcr = 0;
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1395 }
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1398 }
1399 if (new_bmcr != bmcr) {
1400 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001401
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001404
Michael Chanb6016b72005-05-26 13:03:09 -07001405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001408 spin_unlock_bh(&bp->phy_lock);
1409 msleep(50);
1410 spin_lock_bh(&bp->phy_lock);
1411
Michael Chanca58c3a2007-05-03 13:22:52 -07001412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001414 }
1415
Michael Chanca58c3a2007-05-03 13:22:52 -07001416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001417
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1421 */
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1427 }
Michael Chan27a005b2007-05-03 13:23:41 -07001428 } else {
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001431 }
1432 return 0;
1433}
1434
1435static int
1436bnx2_setup_phy(struct bnx2 *bp)
1437{
1438 if (bp->loopback == MAC_LOOPBACK)
1439 return 0;
1440
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1443 }
1444 else {
1445 return (bnx2_setup_copper_phy(bp));
1446 }
1447}
1448
1449static int
Michael Chan27a005b2007-05-03 13:23:41 -07001450bnx2_init_5709s_phy(struct bnx2 *bp)
1451{
1452 u32 val;
1453
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_reset_phy(bp);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1478 else
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495 return 0;
1496}
1497
1498static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001499bnx2_init_5708s_phy(struct bnx2 *bp)
1500{
1501 u32 val;
1502
Michael Chan27a005b2007-05-03 13:23:41 -07001503 bnx2_reset_phy(bp);
1504
1505 bp->mii_up1 = BCM5708S_UP1;
1506
Michael Chan5b0c76a2005-11-04 08:45:49 -08001507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523 }
1524
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535 }
1536
Michael Chane3648b32005-11-04 08:51:21 -08001537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540 if (val) {
1541 u32 is_backplane;
1542
Michael Chane3648b32005-11-04 08:51:21 -08001543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1551 }
1552 }
1553 return 0;
1554}
1555
1556static int
1557bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001558{
Michael Chan27a005b2007-05-03 13:23:41 -07001559 bnx2_reset_phy(bp);
1560
Michael Chanb6016b72005-05-26 13:03:09 -07001561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
Michael Chan59b47d82006-11-19 14:10:45 -08001563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001565
1566 if (bp->dev->mtu > 1500) {
1567 u32 val;
1568
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577 }
1578 else {
1579 u32 val;
1580
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588 }
1589
1590 return 0;
1591}
1592
1593static int
1594bnx2_init_copper_phy(struct bnx2 *bp)
1595{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001596 u32 val;
1597
Michael Chan27a005b2007-05-03 13:23:41 -07001598 bnx2_reset_phy(bp);
1599
Michael Chanb6016b72005-05-26 13:03:09 -07001600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1609 }
1610
Michael Chanb659f442007-02-02 00:46:35 -08001611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615 val &= ~(1 << 8);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617 }
1618
Michael Chanb6016b72005-05-26 13:03:09 -07001619 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1627 }
1628 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635 }
1636
Michael Chan5b0c76a2005-11-04 08:45:49 -08001637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001641 return 0;
1642}
1643
1644
1645static int
1646bnx2_init_phy(struct bnx2 *bp)
1647{
1648 u32 val;
1649 int rc = 0;
1650
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
Michael Chanca58c3a2007-05-03 13:22:52 -07001654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001656 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1659
Michael Chanb6016b72005-05-26 13:03:09 -07001660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
Michael Chanb6016b72005-05-26 13:03:09 -07001662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1666
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001674 }
1675 else {
1676 rc = bnx2_init_copper_phy(bp);
1677 }
1678
1679 bnx2_setup_phy(bp);
1680
1681 return rc;
1682}
1683
1684static int
1685bnx2_set_mac_loopback(struct bnx2 *bp)
1686{
1687 u32 mac_mode;
1688
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693 bp->link_up = 1;
1694 return 0;
1695}
1696
Michael Chanbc5a0692006-01-23 16:13:22 -08001697static int bnx2_test_link(struct bnx2 *);
1698
1699static int
1700bnx2_set_phy_loopback(struct bnx2 *bp)
1701{
1702 u32 mac_mode;
1703 int rc, i;
1704
1705 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001707 BMCR_SPEED1000);
1708 spin_unlock_bh(&bp->phy_lock);
1709 if (rc)
1710 return rc;
1711
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1714 break;
Michael Chan80be4432006-11-19 14:07:28 -08001715 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001716 }
1717
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001721 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001722
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725 bp->link_up = 1;
1726 return 0;
1727}
1728
Michael Chanb6016b72005-05-26 13:03:09 -07001729static int
Michael Chanb090ae22006-01-23 16:07:10 -08001730bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001731{
1732 int i;
1733 u32 val;
1734
Michael Chanb6016b72005-05-26 13:03:09 -07001735 bp->fw_wr_seq++;
1736 msg_data |= bp->fw_wr_seq;
1737
Michael Chane3648b32005-11-04 08:51:21 -08001738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001739
1740 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001743
Michael Chane3648b32005-11-04 08:51:21 -08001744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001745
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747 break;
1748 }
Michael Chanb090ae22006-01-23 16:07:10 -08001749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001751
1752 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754 if (!silent)
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001757
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
Michael Chane3648b32005-11-04 08:51:21 -08001761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001762
Michael Chanb6016b72005-05-26 13:03:09 -07001763 return -EBUSY;
1764 }
1765
Michael Chanb090ae22006-01-23 16:07:10 -08001766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767 return -EIO;
1768
Michael Chanb6016b72005-05-26 13:03:09 -07001769 return 0;
1770}
1771
Michael Chan59b47d82006-11-19 14:10:45 -08001772static int
1773bnx2_init_5709_context(struct bnx2 *bp)
1774{
1775 int i, ret = 0;
1776 u32 val;
1777
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1782 int j;
1783
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1792
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1795 break;
1796 udelay(5);
1797 }
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1799 ret = -EBUSY;
1800 break;
1801 }
1802 }
1803 return ret;
1804}
1805
Michael Chanb6016b72005-05-26 13:03:09 -07001806static void
1807bnx2_init_context(struct bnx2 *bp)
1808{
1809 u32 vcid;
1810
1811 vcid = 96;
1812 while (vcid) {
1813 u32 vcid_addr, pcid_addr, offset;
1814
1815 vcid--;
1816
1817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1818 u32 new_vcid;
1819
1820 vcid_addr = GET_PCID_ADDR(vcid);
1821 if (vcid & 0x8) {
1822 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1823 }
1824 else {
1825 new_vcid = vcid;
1826 }
1827 pcid_addr = GET_PCID_ADDR(new_vcid);
1828 }
1829 else {
1830 vcid_addr = GET_CID_ADDR(vcid);
1831 pcid_addr = vcid_addr;
1832 }
1833
1834 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1836
1837 /* Zero out the context. */
1838 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839 CTX_WR(bp, 0x00, offset, 0);
1840 }
1841
1842 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1844 }
1845}
1846
1847static int
1848bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1849{
1850 u16 *good_mbuf;
1851 u32 good_mbuf_cnt;
1852 u32 val;
1853
1854 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855 if (good_mbuf == NULL) {
1856 printk(KERN_ERR PFX "Failed to allocate memory in "
1857 "bnx2_alloc_bad_rbuf\n");
1858 return -ENOMEM;
1859 }
1860
1861 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1863
1864 good_mbuf_cnt = 0;
1865
1866 /* Allocate a bunch of mbufs and save the good ones in an array. */
1867 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1870
1871 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1872
1873 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1874
1875 /* The addresses with Bit 9 set are bad memory blocks. */
1876 if (!(val & (1 << 9))) {
1877 good_mbuf[good_mbuf_cnt] = (u16) val;
1878 good_mbuf_cnt++;
1879 }
1880
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882 }
1883
1884 /* Free the good ones back to the mbuf pool thus discarding
1885 * all the bad ones. */
1886 while (good_mbuf_cnt) {
1887 good_mbuf_cnt--;
1888
1889 val = good_mbuf[good_mbuf_cnt];
1890 val = (val << 9) | val | 1;
1891
1892 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1893 }
1894 kfree(good_mbuf);
1895 return 0;
1896}
1897
1898static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001899bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001900{
1901 u32 val;
1902 u8 *mac_addr = bp->dev->dev_addr;
1903
1904 val = (mac_addr[0] << 8) | mac_addr[1];
1905
1906 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1907
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001909 (mac_addr[4] << 8) | mac_addr[5];
1910
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1912}
1913
1914static inline int
1915bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1916{
1917 struct sk_buff *skb;
1918 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1919 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001920 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001921 unsigned long align;
1922
Michael Chan932f3772006-08-15 01:39:36 -07001923 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001924 if (skb == NULL) {
1925 return -ENOMEM;
1926 }
1927
Michael Chan59b47d82006-11-19 14:10:45 -08001928 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001930
Michael Chanb6016b72005-05-26 13:03:09 -07001931 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932 PCI_DMA_FROMDEVICE);
1933
1934 rx_buf->skb = skb;
1935 pci_unmap_addr_set(rx_buf, mapping, mapping);
1936
1937 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1939
1940 bp->rx_prod_bseq += bp->rx_buf_use_size;
1941
1942 return 0;
1943}
1944
1945static void
1946bnx2_phy_int(struct bnx2 *bp)
1947{
1948 u32 new_link_state, old_link_state;
1949
1950 new_link_state = bp->status_blk->status_attn_bits &
1951 STATUS_ATTN_BITS_LINK_STATE;
1952 old_link_state = bp->status_blk->status_attn_bits_ack &
1953 STATUS_ATTN_BITS_LINK_STATE;
1954 if (new_link_state != old_link_state) {
1955 if (new_link_state) {
1956 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1957 STATUS_ATTN_BITS_LINK_STATE);
1958 }
1959 else {
1960 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1961 STATUS_ATTN_BITS_LINK_STATE);
1962 }
1963 bnx2_set_link(bp);
1964 }
1965}
1966
1967static void
1968bnx2_tx_int(struct bnx2 *bp)
1969{
Michael Chanf4e418f2005-11-04 08:53:48 -08001970 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001971 u16 hw_cons, sw_cons, sw_ring_cons;
1972 int tx_free_bd = 0;
1973
Michael Chanf4e418f2005-11-04 08:53:48 -08001974 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001975 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1976 hw_cons++;
1977 }
1978 sw_cons = bp->tx_cons;
1979
1980 while (sw_cons != hw_cons) {
1981 struct sw_bd *tx_buf;
1982 struct sk_buff *skb;
1983 int i, last;
1984
1985 sw_ring_cons = TX_RING_IDX(sw_cons);
1986
1987 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1988 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001989
Michael Chanb6016b72005-05-26 13:03:09 -07001990 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001991 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001992 u16 last_idx, last_ring_idx;
1993
1994 last_idx = sw_cons +
1995 skb_shinfo(skb)->nr_frags + 1;
1996 last_ring_idx = sw_ring_cons +
1997 skb_shinfo(skb)->nr_frags + 1;
1998 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1999 last_idx++;
2000 }
2001 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2002 break;
2003 }
2004 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002005
Michael Chanb6016b72005-05-26 13:03:09 -07002006 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2007 skb_headlen(skb), PCI_DMA_TODEVICE);
2008
2009 tx_buf->skb = NULL;
2010 last = skb_shinfo(skb)->nr_frags;
2011
2012 for (i = 0; i < last; i++) {
2013 sw_cons = NEXT_TX_BD(sw_cons);
2014
2015 pci_unmap_page(bp->pdev,
2016 pci_unmap_addr(
2017 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2018 mapping),
2019 skb_shinfo(skb)->frags[i].size,
2020 PCI_DMA_TODEVICE);
2021 }
2022
2023 sw_cons = NEXT_TX_BD(sw_cons);
2024
2025 tx_free_bd += last + 1;
2026
Michael Chan745720e2006-06-29 12:37:41 -07002027 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002028
Michael Chanf4e418f2005-11-04 08:53:48 -08002029 hw_cons = bp->hw_tx_cons =
2030 sblk->status_tx_quick_consumer_index0;
2031
Michael Chanb6016b72005-05-26 13:03:09 -07002032 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2033 hw_cons++;
2034 }
2035 }
2036
Michael Chane89bbf12005-08-25 15:36:58 -07002037 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002038 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2039 * before checking for netif_queue_stopped(). Without the
2040 * memory barrier, there is a small possibility that bnx2_start_xmit()
2041 * will miss it and cause the queue to be stopped forever.
2042 */
2043 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002044
Michael Chan2f8af122006-08-15 01:39:10 -07002045 if (unlikely(netif_queue_stopped(bp->dev)) &&
2046 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2047 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002048 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002049 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002050 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002051 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002052 }
Michael Chanb6016b72005-05-26 13:03:09 -07002053}
2054
2055static inline void
2056bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2057 u16 cons, u16 prod)
2058{
Michael Chan236b6392006-03-20 17:49:02 -08002059 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2060 struct rx_bd *cons_bd, *prod_bd;
2061
2062 cons_rx_buf = &bp->rx_buf_ring[cons];
2063 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002064
2065 pci_dma_sync_single_for_device(bp->pdev,
2066 pci_unmap_addr(cons_rx_buf, mapping),
2067 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2068
Michael Chan236b6392006-03-20 17:49:02 -08002069 bp->rx_prod_bseq += bp->rx_buf_use_size;
2070
2071 prod_rx_buf->skb = skb;
2072
2073 if (cons == prod)
2074 return;
2075
Michael Chanb6016b72005-05-26 13:03:09 -07002076 pci_unmap_addr_set(prod_rx_buf, mapping,
2077 pci_unmap_addr(cons_rx_buf, mapping));
2078
Michael Chan3fdfcc22006-03-20 17:49:49 -08002079 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2080 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002081 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2082 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002083}
2084
2085static int
2086bnx2_rx_int(struct bnx2 *bp, int budget)
2087{
Michael Chanf4e418f2005-11-04 08:53:48 -08002088 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002089 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2090 struct l2_fhdr *rx_hdr;
2091 int rx_pkt = 0;
2092
Michael Chanf4e418f2005-11-04 08:53:48 -08002093 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002094 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2095 hw_cons++;
2096 }
2097 sw_cons = bp->rx_cons;
2098 sw_prod = bp->rx_prod;
2099
2100 /* Memory barrier necessary as speculative reads of the rx
2101 * buffer can be ahead of the index in the status block
2102 */
2103 rmb();
2104 while (sw_cons != hw_cons) {
2105 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002106 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002107 struct sw_bd *rx_buf;
2108 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002109 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002110
2111 sw_ring_cons = RX_RING_IDX(sw_cons);
2112 sw_ring_prod = RX_RING_IDX(sw_prod);
2113
2114 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2115 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002116
2117 rx_buf->skb = NULL;
2118
2119 dma_addr = pci_unmap_addr(rx_buf, mapping);
2120
2121 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002122 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2123
2124 rx_hdr = (struct l2_fhdr *) skb->data;
2125 len = rx_hdr->l2_fhdr_pkt_len - 4;
2126
Michael Chanade2bfe2006-01-23 16:09:51 -08002127 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002128 (L2_FHDR_ERRORS_BAD_CRC |
2129 L2_FHDR_ERRORS_PHY_DECODE |
2130 L2_FHDR_ERRORS_ALIGNMENT |
2131 L2_FHDR_ERRORS_TOO_SHORT |
2132 L2_FHDR_ERRORS_GIANT_FRAME)) {
2133
2134 goto reuse_rx;
2135 }
2136
2137 /* Since we don't have a jumbo ring, copy small packets
2138 * if mtu > 1500
2139 */
2140 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2141 struct sk_buff *new_skb;
2142
Michael Chan932f3772006-08-15 01:39:36 -07002143 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002144 if (new_skb == NULL)
2145 goto reuse_rx;
2146
2147 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002148 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2149 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002150 skb_reserve(new_skb, 2);
2151 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002152
2153 bnx2_reuse_rx_skb(bp, skb,
2154 sw_ring_cons, sw_ring_prod);
2155
2156 skb = new_skb;
2157 }
2158 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002159 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002160 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2161
2162 skb_reserve(skb, bp->rx_offset);
2163 skb_put(skb, len);
2164 }
2165 else {
2166reuse_rx:
2167 bnx2_reuse_rx_skb(bp, skb,
2168 sw_ring_cons, sw_ring_prod);
2169 goto next_rx;
2170 }
2171
2172 skb->protocol = eth_type_trans(skb, bp->dev);
2173
2174 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002175 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002176
Michael Chan745720e2006-06-29 12:37:41 -07002177 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002178 goto next_rx;
2179
2180 }
2181
Michael Chanb6016b72005-05-26 13:03:09 -07002182 skb->ip_summed = CHECKSUM_NONE;
2183 if (bp->rx_csum &&
2184 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2185 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2186
Michael Chanade2bfe2006-01-23 16:09:51 -08002187 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2188 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002189 skb->ip_summed = CHECKSUM_UNNECESSARY;
2190 }
2191
2192#ifdef BCM_VLAN
2193 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2194 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2195 rx_hdr->l2_fhdr_vlan_tag);
2196 }
2197 else
2198#endif
2199 netif_receive_skb(skb);
2200
2201 bp->dev->last_rx = jiffies;
2202 rx_pkt++;
2203
2204next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002205 sw_cons = NEXT_RX_BD(sw_cons);
2206 sw_prod = NEXT_RX_BD(sw_prod);
2207
2208 if ((rx_pkt == budget))
2209 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002210
2211 /* Refresh hw_cons to see if there is new work */
2212 if (sw_cons == hw_cons) {
2213 hw_cons = bp->hw_rx_cons =
2214 sblk->status_rx_quick_consumer_index0;
2215 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2216 hw_cons++;
2217 rmb();
2218 }
Michael Chanb6016b72005-05-26 13:03:09 -07002219 }
2220 bp->rx_cons = sw_cons;
2221 bp->rx_prod = sw_prod;
2222
2223 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2224
2225 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2226
2227 mmiowb();
2228
2229 return rx_pkt;
2230
2231}
2232
2233/* MSI ISR - The only difference between this and the INTx ISR
2234 * is that the MSI interrupt is always serviced.
2235 */
2236static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002237bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002238{
2239 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002240 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002241
Michael Chanc921e4c2005-09-08 13:15:32 -07002242 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002243 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2244 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2245 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2246
2247 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002248 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2249 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002250
Michael Chan73eef4c2005-08-25 15:39:15 -07002251 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002252
Michael Chan73eef4c2005-08-25 15:39:15 -07002253 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002254}
2255
2256static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002257bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002258{
2259 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002260 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002261
2262 /* When using INTx, it is possible for the interrupt to arrive
2263 * at the CPU before the status block posted prior to the
2264 * interrupt. Reading a register will flush the status block.
2265 * When using MSI, the MSI message will always complete after
2266 * the status block write.
2267 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002268 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002269 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2270 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002271 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002272
2273 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2274 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2275 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2276
2277 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002278 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2279 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002280
Michael Chan73eef4c2005-08-25 15:39:15 -07002281 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002282
Michael Chan73eef4c2005-08-25 15:39:15 -07002283 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002284}
2285
Michael Chanf4e418f2005-11-04 08:53:48 -08002286static inline int
2287bnx2_has_work(struct bnx2 *bp)
2288{
2289 struct status_block *sblk = bp->status_blk;
2290
2291 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2292 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2293 return 1;
2294
Michael Chandb8b2252007-03-28 14:17:36 -07002295 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2296 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
Michael Chanf4e418f2005-11-04 08:53:48 -08002297 return 1;
2298
2299 return 0;
2300}
2301
Michael Chanb6016b72005-05-26 13:03:09 -07002302static int
2303bnx2_poll(struct net_device *dev, int *budget)
2304{
Michael Chan972ec0d2006-01-23 16:12:43 -08002305 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002306
Michael Chanb6016b72005-05-26 13:03:09 -07002307 if ((bp->status_blk->status_attn_bits &
2308 STATUS_ATTN_BITS_LINK_STATE) !=
2309 (bp->status_blk->status_attn_bits_ack &
2310 STATUS_ATTN_BITS_LINK_STATE)) {
2311
Michael Chanc770a652005-08-25 15:38:39 -07002312 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002313 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002314 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002315
2316 /* This is needed to take care of transient status
2317 * during link changes.
2318 */
2319 REG_WR(bp, BNX2_HC_COMMAND,
2320 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2321 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002322 }
2323
Michael Chanf4e418f2005-11-04 08:53:48 -08002324 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002325 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002326
Michael Chanf4e418f2005-11-04 08:53:48 -08002327 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002328 int orig_budget = *budget;
2329 int work_done;
2330
2331 if (orig_budget > dev->quota)
2332 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002333
Michael Chanb6016b72005-05-26 13:03:09 -07002334 work_done = bnx2_rx_int(bp, orig_budget);
2335 *budget -= work_done;
2336 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002337 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002338
Michael Chanf4e418f2005-11-04 08:53:48 -08002339 bp->last_status_idx = bp->status_blk->status_idx;
2340 rmb();
2341
2342 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002343 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002344 if (likely(bp->flags & USING_MSI_FLAG)) {
2345 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2346 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2347 bp->last_status_idx);
2348 return 0;
2349 }
Michael Chanb6016b72005-05-26 13:03:09 -07002350 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002351 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2352 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2353 bp->last_status_idx);
2354
2355 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2356 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2357 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002358 return 0;
2359 }
2360
2361 return 1;
2362}
2363
Herbert Xu932ff272006-06-09 12:20:56 -07002364/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002365 * from set_multicast.
2366 */
2367static void
2368bnx2_set_rx_mode(struct net_device *dev)
2369{
Michael Chan972ec0d2006-01-23 16:12:43 -08002370 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002371 u32 rx_mode, sort_mode;
2372 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002373
Michael Chanc770a652005-08-25 15:38:39 -07002374 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002375
2376 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2377 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2378 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2379#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002380 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002381 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002382#else
Michael Chane29054f2006-01-23 16:06:06 -08002383 if (!(bp->flags & ASF_ENABLE_FLAG))
2384 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002385#endif
2386 if (dev->flags & IFF_PROMISC) {
2387 /* Promiscuous mode. */
2388 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002389 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2390 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002391 }
2392 else if (dev->flags & IFF_ALLMULTI) {
2393 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2394 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2395 0xffffffff);
2396 }
2397 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2398 }
2399 else {
2400 /* Accept one or more multicast(s). */
2401 struct dev_mc_list *mclist;
2402 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2403 u32 regidx;
2404 u32 bit;
2405 u32 crc;
2406
2407 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2408
2409 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2410 i++, mclist = mclist->next) {
2411
2412 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2413 bit = crc & 0xff;
2414 regidx = (bit & 0xe0) >> 5;
2415 bit &= 0x1f;
2416 mc_filter[regidx] |= (1 << bit);
2417 }
2418
2419 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2420 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2421 mc_filter[i]);
2422 }
2423
2424 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2425 }
2426
2427 if (rx_mode != bp->rx_mode) {
2428 bp->rx_mode = rx_mode;
2429 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2430 }
2431
2432 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2433 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2434 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2435
Michael Chanc770a652005-08-25 15:38:39 -07002436 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002437}
2438
Michael Chanfba9fe92006-06-12 22:21:25 -07002439#define FW_BUF_SIZE 0x8000
2440
2441static int
2442bnx2_gunzip_init(struct bnx2 *bp)
2443{
2444 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2445 goto gunzip_nomem1;
2446
2447 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2448 goto gunzip_nomem2;
2449
2450 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2451 if (bp->strm->workspace == NULL)
2452 goto gunzip_nomem3;
2453
2454 return 0;
2455
2456gunzip_nomem3:
2457 kfree(bp->strm);
2458 bp->strm = NULL;
2459
2460gunzip_nomem2:
2461 vfree(bp->gunzip_buf);
2462 bp->gunzip_buf = NULL;
2463
2464gunzip_nomem1:
2465 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2466 "uncompression.\n", bp->dev->name);
2467 return -ENOMEM;
2468}
2469
2470static void
2471bnx2_gunzip_end(struct bnx2 *bp)
2472{
2473 kfree(bp->strm->workspace);
2474
2475 kfree(bp->strm);
2476 bp->strm = NULL;
2477
2478 if (bp->gunzip_buf) {
2479 vfree(bp->gunzip_buf);
2480 bp->gunzip_buf = NULL;
2481 }
2482}
2483
2484static int
2485bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2486{
2487 int n, rc;
2488
2489 /* check gzip header */
2490 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2491 return -EINVAL;
2492
2493 n = 10;
2494
2495#define FNAME 0x8
2496 if (zbuf[3] & FNAME)
2497 while ((zbuf[n++] != 0) && (n < len));
2498
2499 bp->strm->next_in = zbuf + n;
2500 bp->strm->avail_in = len - n;
2501 bp->strm->next_out = bp->gunzip_buf;
2502 bp->strm->avail_out = FW_BUF_SIZE;
2503
2504 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2505 if (rc != Z_OK)
2506 return rc;
2507
2508 rc = zlib_inflate(bp->strm, Z_FINISH);
2509
2510 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2511 *outbuf = bp->gunzip_buf;
2512
2513 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2514 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2515 bp->dev->name, bp->strm->msg);
2516
2517 zlib_inflateEnd(bp->strm);
2518
2519 if (rc == Z_STREAM_END)
2520 return 0;
2521
2522 return rc;
2523}
2524
Michael Chanb6016b72005-05-26 13:03:09 -07002525static void
2526load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2527 u32 rv2p_proc)
2528{
2529 int i;
2530 u32 val;
2531
2532
2533 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002534 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002535 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002536 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002537 rv2p_code++;
2538
2539 if (rv2p_proc == RV2P_PROC1) {
2540 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2541 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2542 }
2543 else {
2544 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2545 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2546 }
2547 }
2548
2549 /* Reset the processor, un-stall is done later. */
2550 if (rv2p_proc == RV2P_PROC1) {
2551 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2552 }
2553 else {
2554 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2555 }
2556}
2557
Michael Chanaf3ee512006-11-19 14:09:25 -08002558static int
Michael Chanb6016b72005-05-26 13:03:09 -07002559load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2560{
2561 u32 offset;
2562 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002563 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002564
2565 /* Halt the CPU. */
2566 val = REG_RD_IND(bp, cpu_reg->mode);
2567 val |= cpu_reg->mode_value_halt;
2568 REG_WR_IND(bp, cpu_reg->mode, val);
2569 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2570
2571 /* Load the Text area. */
2572 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002573 if (fw->gz_text) {
2574 u32 text_len;
2575 void *text;
2576
2577 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2578 &text_len);
2579 if (rc)
2580 return rc;
2581
2582 fw->text = text;
2583 }
2584 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002585 int j;
2586
2587 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002588 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002589 }
2590 }
2591
2592 /* Load the Data area. */
2593 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2594 if (fw->data) {
2595 int j;
2596
2597 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2598 REG_WR_IND(bp, offset, fw->data[j]);
2599 }
2600 }
2601
2602 /* Load the SBSS area. */
2603 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2604 if (fw->sbss) {
2605 int j;
2606
2607 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2608 REG_WR_IND(bp, offset, fw->sbss[j]);
2609 }
2610 }
2611
2612 /* Load the BSS area. */
2613 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2614 if (fw->bss) {
2615 int j;
2616
2617 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2618 REG_WR_IND(bp, offset, fw->bss[j]);
2619 }
2620 }
2621
2622 /* Load the Read-Only area. */
2623 offset = cpu_reg->spad_base +
2624 (fw->rodata_addr - cpu_reg->mips_view_base);
2625 if (fw->rodata) {
2626 int j;
2627
2628 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2629 REG_WR_IND(bp, offset, fw->rodata[j]);
2630 }
2631 }
2632
2633 /* Clear the pre-fetch instruction. */
2634 REG_WR_IND(bp, cpu_reg->inst, 0);
2635 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2636
2637 /* Start the CPU. */
2638 val = REG_RD_IND(bp, cpu_reg->mode);
2639 val &= ~cpu_reg->mode_value_halt;
2640 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2641 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002642
2643 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002644}
2645
Michael Chanfba9fe92006-06-12 22:21:25 -07002646static int
Michael Chanb6016b72005-05-26 13:03:09 -07002647bnx2_init_cpus(struct bnx2 *bp)
2648{
2649 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002650 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002651 int rc = 0;
2652 void *text;
2653 u32 text_len;
2654
2655 if ((rc = bnx2_gunzip_init(bp)) != 0)
2656 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002657
2658 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002659 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2660 &text_len);
2661 if (rc)
2662 goto init_cpu_err;
2663
2664 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2665
2666 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2667 &text_len);
2668 if (rc)
2669 goto init_cpu_err;
2670
2671 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002672
2673 /* Initialize the RX Processor. */
2674 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2675 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2676 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2677 cpu_reg.state = BNX2_RXP_CPU_STATE;
2678 cpu_reg.state_value_clear = 0xffffff;
2679 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2680 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2681 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2682 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2683 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2684 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2685 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002686
Michael Chand43584c2006-11-19 14:14:35 -08002687 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2688 fw = &bnx2_rxp_fw_09;
2689 else
2690 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002691
Michael Chanaf3ee512006-11-19 14:09:25 -08002692 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002693 if (rc)
2694 goto init_cpu_err;
2695
Michael Chanb6016b72005-05-26 13:03:09 -07002696 /* Initialize the TX Processor. */
2697 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2698 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2699 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2700 cpu_reg.state = BNX2_TXP_CPU_STATE;
2701 cpu_reg.state_value_clear = 0xffffff;
2702 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2703 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2704 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2705 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2706 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2707 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2708 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002709
Michael Chand43584c2006-11-19 14:14:35 -08002710 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2711 fw = &bnx2_txp_fw_09;
2712 else
2713 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002714
Michael Chanaf3ee512006-11-19 14:09:25 -08002715 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002716 if (rc)
2717 goto init_cpu_err;
2718
Michael Chanb6016b72005-05-26 13:03:09 -07002719 /* Initialize the TX Patch-up Processor. */
2720 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2721 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2722 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2723 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2724 cpu_reg.state_value_clear = 0xffffff;
2725 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2726 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2727 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2728 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2729 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2730 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2731 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002732
Michael Chand43584c2006-11-19 14:14:35 -08002733 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2734 fw = &bnx2_tpat_fw_09;
2735 else
2736 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002737
Michael Chanaf3ee512006-11-19 14:09:25 -08002738 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002739 if (rc)
2740 goto init_cpu_err;
2741
Michael Chanb6016b72005-05-26 13:03:09 -07002742 /* Initialize the Completion Processor. */
2743 cpu_reg.mode = BNX2_COM_CPU_MODE;
2744 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2745 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2746 cpu_reg.state = BNX2_COM_CPU_STATE;
2747 cpu_reg.state_value_clear = 0xffffff;
2748 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2749 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2750 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2751 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2752 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2753 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2754 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002755
Michael Chand43584c2006-11-19 14:14:35 -08002756 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2757 fw = &bnx2_com_fw_09;
2758 else
2759 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002760
Michael Chanaf3ee512006-11-19 14:09:25 -08002761 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002762 if (rc)
2763 goto init_cpu_err;
2764
Michael Chand43584c2006-11-19 14:14:35 -08002765 /* Initialize the Command Processor. */
2766 cpu_reg.mode = BNX2_CP_CPU_MODE;
2767 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2768 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2769 cpu_reg.state = BNX2_CP_CPU_STATE;
2770 cpu_reg.state_value_clear = 0xffffff;
2771 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2772 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2773 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2774 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2775 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2776 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2777 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002778
Michael Chand43584c2006-11-19 14:14:35 -08002779 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2780 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002781
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002782 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002783 if (rc)
2784 goto init_cpu_err;
2785 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002786init_cpu_err:
2787 bnx2_gunzip_end(bp);
2788 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002789}
2790
2791static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002792bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002793{
2794 u16 pmcsr;
2795
2796 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2797
2798 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002799 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002800 u32 val;
2801
2802 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2803 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2804 PCI_PM_CTRL_PME_STATUS);
2805
2806 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2807 /* delay required during transition out of D3hot */
2808 msleep(20);
2809
2810 val = REG_RD(bp, BNX2_EMAC_MODE);
2811 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2812 val &= ~BNX2_EMAC_MODE_MPKT;
2813 REG_WR(bp, BNX2_EMAC_MODE, val);
2814
2815 val = REG_RD(bp, BNX2_RPM_CONFIG);
2816 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2817 REG_WR(bp, BNX2_RPM_CONFIG, val);
2818 break;
2819 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002820 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002821 int i;
2822 u32 val, wol_msg;
2823
2824 if (bp->wol) {
2825 u32 advertising;
2826 u8 autoneg;
2827
2828 autoneg = bp->autoneg;
2829 advertising = bp->advertising;
2830
2831 bp->autoneg = AUTONEG_SPEED;
2832 bp->advertising = ADVERTISED_10baseT_Half |
2833 ADVERTISED_10baseT_Full |
2834 ADVERTISED_100baseT_Half |
2835 ADVERTISED_100baseT_Full |
2836 ADVERTISED_Autoneg;
2837
2838 bnx2_setup_copper_phy(bp);
2839
2840 bp->autoneg = autoneg;
2841 bp->advertising = advertising;
2842
2843 bnx2_set_mac_addr(bp);
2844
2845 val = REG_RD(bp, BNX2_EMAC_MODE);
2846
2847 /* Enable port mode. */
2848 val &= ~BNX2_EMAC_MODE_PORT;
2849 val |= BNX2_EMAC_MODE_PORT_MII |
2850 BNX2_EMAC_MODE_MPKT_RCVD |
2851 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002852 BNX2_EMAC_MODE_MPKT;
2853
2854 REG_WR(bp, BNX2_EMAC_MODE, val);
2855
2856 /* receive all multicast */
2857 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2858 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2859 0xffffffff);
2860 }
2861 REG_WR(bp, BNX2_EMAC_RX_MODE,
2862 BNX2_EMAC_RX_MODE_SORT_MODE);
2863
2864 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2865 BNX2_RPM_SORT_USER0_MC_EN;
2866 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2867 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2868 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2869 BNX2_RPM_SORT_USER0_ENA);
2870
2871 /* Need to enable EMAC and RPM for WOL. */
2872 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2873 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2874 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2875 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2876
2877 val = REG_RD(bp, BNX2_RPM_CONFIG);
2878 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2879 REG_WR(bp, BNX2_RPM_CONFIG, val);
2880
2881 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2882 }
2883 else {
2884 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2885 }
2886
Michael Chandda1e392006-01-23 16:08:14 -08002887 if (!(bp->flags & NO_WOL_FLAG))
2888 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002889
2890 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2891 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2892 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2893
2894 if (bp->wol)
2895 pmcsr |= 3;
2896 }
2897 else {
2898 pmcsr |= 3;
2899 }
2900 if (bp->wol) {
2901 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2902 }
2903 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2904 pmcsr);
2905
2906 /* No more memory access after this point until
2907 * device is brought back to D0.
2908 */
2909 udelay(50);
2910 break;
2911 }
2912 default:
2913 return -EINVAL;
2914 }
2915 return 0;
2916}
2917
2918static int
2919bnx2_acquire_nvram_lock(struct bnx2 *bp)
2920{
2921 u32 val;
2922 int j;
2923
2924 /* Request access to the flash interface. */
2925 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2926 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2927 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2928 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2929 break;
2930
2931 udelay(5);
2932 }
2933
2934 if (j >= NVRAM_TIMEOUT_COUNT)
2935 return -EBUSY;
2936
2937 return 0;
2938}
2939
2940static int
2941bnx2_release_nvram_lock(struct bnx2 *bp)
2942{
2943 int j;
2944 u32 val;
2945
2946 /* Relinquish nvram interface. */
2947 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2948
2949 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2950 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2951 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2952 break;
2953
2954 udelay(5);
2955 }
2956
2957 if (j >= NVRAM_TIMEOUT_COUNT)
2958 return -EBUSY;
2959
2960 return 0;
2961}
2962
2963
2964static int
2965bnx2_enable_nvram_write(struct bnx2 *bp)
2966{
2967 u32 val;
2968
2969 val = REG_RD(bp, BNX2_MISC_CFG);
2970 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2971
2972 if (!bp->flash_info->buffered) {
2973 int j;
2974
2975 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2976 REG_WR(bp, BNX2_NVM_COMMAND,
2977 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2978
2979 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2980 udelay(5);
2981
2982 val = REG_RD(bp, BNX2_NVM_COMMAND);
2983 if (val & BNX2_NVM_COMMAND_DONE)
2984 break;
2985 }
2986
2987 if (j >= NVRAM_TIMEOUT_COUNT)
2988 return -EBUSY;
2989 }
2990 return 0;
2991}
2992
2993static void
2994bnx2_disable_nvram_write(struct bnx2 *bp)
2995{
2996 u32 val;
2997
2998 val = REG_RD(bp, BNX2_MISC_CFG);
2999 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3000}
3001
3002
3003static void
3004bnx2_enable_nvram_access(struct bnx2 *bp)
3005{
3006 u32 val;
3007
3008 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3009 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003010 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003011 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3012}
3013
3014static void
3015bnx2_disable_nvram_access(struct bnx2 *bp)
3016{
3017 u32 val;
3018
3019 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3020 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003021 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003022 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3023 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3024}
3025
3026static int
3027bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3028{
3029 u32 cmd;
3030 int j;
3031
3032 if (bp->flash_info->buffered)
3033 /* Buffered flash, no erase needed */
3034 return 0;
3035
3036 /* Build an erase command */
3037 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3038 BNX2_NVM_COMMAND_DOIT;
3039
3040 /* Need to clear DONE bit separately. */
3041 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3042
3043 /* Address of the NVRAM to read from. */
3044 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3045
3046 /* Issue an erase command. */
3047 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3048
3049 /* Wait for completion. */
3050 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3051 u32 val;
3052
3053 udelay(5);
3054
3055 val = REG_RD(bp, BNX2_NVM_COMMAND);
3056 if (val & BNX2_NVM_COMMAND_DONE)
3057 break;
3058 }
3059
3060 if (j >= NVRAM_TIMEOUT_COUNT)
3061 return -EBUSY;
3062
3063 return 0;
3064}
3065
3066static int
3067bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3068{
3069 u32 cmd;
3070 int j;
3071
3072 /* Build the command word. */
3073 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3074
3075 /* Calculate an offset of a buffered flash. */
3076 if (bp->flash_info->buffered) {
3077 offset = ((offset / bp->flash_info->page_size) <<
3078 bp->flash_info->page_bits) +
3079 (offset % bp->flash_info->page_size);
3080 }
3081
3082 /* Need to clear DONE bit separately. */
3083 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3084
3085 /* Address of the NVRAM to read from. */
3086 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3087
3088 /* Issue a read command. */
3089 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3090
3091 /* Wait for completion. */
3092 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3093 u32 val;
3094
3095 udelay(5);
3096
3097 val = REG_RD(bp, BNX2_NVM_COMMAND);
3098 if (val & BNX2_NVM_COMMAND_DONE) {
3099 val = REG_RD(bp, BNX2_NVM_READ);
3100
3101 val = be32_to_cpu(val);
3102 memcpy(ret_val, &val, 4);
3103 break;
3104 }
3105 }
3106 if (j >= NVRAM_TIMEOUT_COUNT)
3107 return -EBUSY;
3108
3109 return 0;
3110}
3111
3112
3113static int
3114bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3115{
3116 u32 cmd, val32;
3117 int j;
3118
3119 /* Build the command word. */
3120 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3121
3122 /* Calculate an offset of a buffered flash. */
3123 if (bp->flash_info->buffered) {
3124 offset = ((offset / bp->flash_info->page_size) <<
3125 bp->flash_info->page_bits) +
3126 (offset % bp->flash_info->page_size);
3127 }
3128
3129 /* Need to clear DONE bit separately. */
3130 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3131
3132 memcpy(&val32, val, 4);
3133 val32 = cpu_to_be32(val32);
3134
3135 /* Write the data. */
3136 REG_WR(bp, BNX2_NVM_WRITE, val32);
3137
3138 /* Address of the NVRAM to write to. */
3139 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3140
3141 /* Issue the write command. */
3142 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3143
3144 /* Wait for completion. */
3145 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3146 udelay(5);
3147
3148 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3149 break;
3150 }
3151 if (j >= NVRAM_TIMEOUT_COUNT)
3152 return -EBUSY;
3153
3154 return 0;
3155}
3156
3157static int
3158bnx2_init_nvram(struct bnx2 *bp)
3159{
3160 u32 val;
3161 int j, entry_count, rc;
3162 struct flash_spec *flash;
3163
3164 /* Determine the selected interface. */
3165 val = REG_RD(bp, BNX2_NVM_CFG1);
3166
3167 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3168
3169 rc = 0;
3170 if (val & 0x40000000) {
3171
3172 /* Flash interface has been reconfigured */
3173 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003174 j++, flash++) {
3175 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3176 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003177 bp->flash_info = flash;
3178 break;
3179 }
3180 }
3181 }
3182 else {
Michael Chan37137702005-11-04 08:49:17 -08003183 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003184 /* Not yet been reconfigured */
3185
Michael Chan37137702005-11-04 08:49:17 -08003186 if (val & (1 << 23))
3187 mask = FLASH_BACKUP_STRAP_MASK;
3188 else
3189 mask = FLASH_STRAP_MASK;
3190
Michael Chanb6016b72005-05-26 13:03:09 -07003191 for (j = 0, flash = &flash_table[0]; j < entry_count;
3192 j++, flash++) {
3193
Michael Chan37137702005-11-04 08:49:17 -08003194 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003195 bp->flash_info = flash;
3196
3197 /* Request access to the flash interface. */
3198 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3199 return rc;
3200
3201 /* Enable access to flash interface */
3202 bnx2_enable_nvram_access(bp);
3203
3204 /* Reconfigure the flash interface */
3205 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3206 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3207 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3208 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3209
3210 /* Disable access to flash interface */
3211 bnx2_disable_nvram_access(bp);
3212 bnx2_release_nvram_lock(bp);
3213
3214 break;
3215 }
3216 }
3217 } /* if (val & 0x40000000) */
3218
3219 if (j == entry_count) {
3220 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003221 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003222 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003223 }
3224
Michael Chan1122db72006-01-23 16:11:42 -08003225 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3226 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3227 if (val)
3228 bp->flash_size = val;
3229 else
3230 bp->flash_size = bp->flash_info->total_size;
3231
Michael Chanb6016b72005-05-26 13:03:09 -07003232 return rc;
3233}
3234
3235static int
3236bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3237 int buf_size)
3238{
3239 int rc = 0;
3240 u32 cmd_flags, offset32, len32, extra;
3241
3242 if (buf_size == 0)
3243 return 0;
3244
3245 /* Request access to the flash interface. */
3246 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3247 return rc;
3248
3249 /* Enable access to flash interface */
3250 bnx2_enable_nvram_access(bp);
3251
3252 len32 = buf_size;
3253 offset32 = offset;
3254 extra = 0;
3255
3256 cmd_flags = 0;
3257
3258 if (offset32 & 3) {
3259 u8 buf[4];
3260 u32 pre_len;
3261
3262 offset32 &= ~3;
3263 pre_len = 4 - (offset & 3);
3264
3265 if (pre_len >= len32) {
3266 pre_len = len32;
3267 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3268 BNX2_NVM_COMMAND_LAST;
3269 }
3270 else {
3271 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3272 }
3273
3274 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3275
3276 if (rc)
3277 return rc;
3278
3279 memcpy(ret_buf, buf + (offset & 3), pre_len);
3280
3281 offset32 += 4;
3282 ret_buf += pre_len;
3283 len32 -= pre_len;
3284 }
3285 if (len32 & 3) {
3286 extra = 4 - (len32 & 3);
3287 len32 = (len32 + 4) & ~3;
3288 }
3289
3290 if (len32 == 4) {
3291 u8 buf[4];
3292
3293 if (cmd_flags)
3294 cmd_flags = BNX2_NVM_COMMAND_LAST;
3295 else
3296 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3297 BNX2_NVM_COMMAND_LAST;
3298
3299 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3300
3301 memcpy(ret_buf, buf, 4 - extra);
3302 }
3303 else if (len32 > 0) {
3304 u8 buf[4];
3305
3306 /* Read the first word. */
3307 if (cmd_flags)
3308 cmd_flags = 0;
3309 else
3310 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3311
3312 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3313
3314 /* Advance to the next dword. */
3315 offset32 += 4;
3316 ret_buf += 4;
3317 len32 -= 4;
3318
3319 while (len32 > 4 && rc == 0) {
3320 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3321
3322 /* Advance to the next dword. */
3323 offset32 += 4;
3324 ret_buf += 4;
3325 len32 -= 4;
3326 }
3327
3328 if (rc)
3329 return rc;
3330
3331 cmd_flags = BNX2_NVM_COMMAND_LAST;
3332 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3333
3334 memcpy(ret_buf, buf, 4 - extra);
3335 }
3336
3337 /* Disable access to flash interface */
3338 bnx2_disable_nvram_access(bp);
3339
3340 bnx2_release_nvram_lock(bp);
3341
3342 return rc;
3343}
3344
3345static int
3346bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3347 int buf_size)
3348{
3349 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003350 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003351 int rc = 0;
3352 int align_start, align_end;
3353
3354 buf = data_buf;
3355 offset32 = offset;
3356 len32 = buf_size;
3357 align_start = align_end = 0;
3358
3359 if ((align_start = (offset32 & 3))) {
3360 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003361 len32 += align_start;
3362 if (len32 < 4)
3363 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003364 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3365 return rc;
3366 }
3367
3368 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003369 align_end = 4 - (len32 & 3);
3370 len32 += align_end;
3371 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3372 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003373 }
3374
3375 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003376 align_buf = kmalloc(len32, GFP_KERNEL);
3377 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003378 return -ENOMEM;
3379 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003380 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003381 }
3382 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003383 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003384 }
Michael Chane6be7632007-01-08 19:56:13 -08003385 memcpy(align_buf + align_start, data_buf, buf_size);
3386 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003387 }
3388
Michael Chanae181bc2006-05-22 16:39:20 -07003389 if (bp->flash_info->buffered == 0) {
3390 flash_buffer = kmalloc(264, GFP_KERNEL);
3391 if (flash_buffer == NULL) {
3392 rc = -ENOMEM;
3393 goto nvram_write_end;
3394 }
3395 }
3396
Michael Chanb6016b72005-05-26 13:03:09 -07003397 written = 0;
3398 while ((written < len32) && (rc == 0)) {
3399 u32 page_start, page_end, data_start, data_end;
3400 u32 addr, cmd_flags;
3401 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003402
3403 /* Find the page_start addr */
3404 page_start = offset32 + written;
3405 page_start -= (page_start % bp->flash_info->page_size);
3406 /* Find the page_end addr */
3407 page_end = page_start + bp->flash_info->page_size;
3408 /* Find the data_start addr */
3409 data_start = (written == 0) ? offset32 : page_start;
3410 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003411 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003412 (offset32 + len32) : page_end;
3413
3414 /* Request access to the flash interface. */
3415 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3416 goto nvram_write_end;
3417
3418 /* Enable access to flash interface */
3419 bnx2_enable_nvram_access(bp);
3420
3421 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3422 if (bp->flash_info->buffered == 0) {
3423 int j;
3424
3425 /* Read the whole page into the buffer
3426 * (non-buffer flash only) */
3427 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3428 if (j == (bp->flash_info->page_size - 4)) {
3429 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3430 }
3431 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003432 page_start + j,
3433 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003434 cmd_flags);
3435
3436 if (rc)
3437 goto nvram_write_end;
3438
3439 cmd_flags = 0;
3440 }
3441 }
3442
3443 /* Enable writes to flash interface (unlock write-protect) */
3444 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3445 goto nvram_write_end;
3446
Michael Chanb6016b72005-05-26 13:03:09 -07003447 /* Loop to write back the buffer data from page_start to
3448 * data_start */
3449 i = 0;
3450 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003451 /* Erase the page */
3452 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3453 goto nvram_write_end;
3454
3455 /* Re-enable the write again for the actual write */
3456 bnx2_enable_nvram_write(bp);
3457
Michael Chanb6016b72005-05-26 13:03:09 -07003458 for (addr = page_start; addr < data_start;
3459 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003460
Michael Chanb6016b72005-05-26 13:03:09 -07003461 rc = bnx2_nvram_write_dword(bp, addr,
3462 &flash_buffer[i], cmd_flags);
3463
3464 if (rc != 0)
3465 goto nvram_write_end;
3466
3467 cmd_flags = 0;
3468 }
3469 }
3470
3471 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003472 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003473 if ((addr == page_end - 4) ||
3474 ((bp->flash_info->buffered) &&
3475 (addr == data_end - 4))) {
3476
3477 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3478 }
3479 rc = bnx2_nvram_write_dword(bp, addr, buf,
3480 cmd_flags);
3481
3482 if (rc != 0)
3483 goto nvram_write_end;
3484
3485 cmd_flags = 0;
3486 buf += 4;
3487 }
3488
3489 /* Loop to write back the buffer data from data_end
3490 * to page_end */
3491 if (bp->flash_info->buffered == 0) {
3492 for (addr = data_end; addr < page_end;
3493 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003494
Michael Chanb6016b72005-05-26 13:03:09 -07003495 if (addr == page_end-4) {
3496 cmd_flags = BNX2_NVM_COMMAND_LAST;
3497 }
3498 rc = bnx2_nvram_write_dword(bp, addr,
3499 &flash_buffer[i], cmd_flags);
3500
3501 if (rc != 0)
3502 goto nvram_write_end;
3503
3504 cmd_flags = 0;
3505 }
3506 }
3507
3508 /* Disable writes to flash interface (lock write-protect) */
3509 bnx2_disable_nvram_write(bp);
3510
3511 /* Disable access to flash interface */
3512 bnx2_disable_nvram_access(bp);
3513 bnx2_release_nvram_lock(bp);
3514
3515 /* Increment written */
3516 written += data_end - data_start;
3517 }
3518
3519nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003520 kfree(flash_buffer);
3521 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003522 return rc;
3523}
3524
3525static int
3526bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3527{
3528 u32 val;
3529 int i, rc = 0;
3530
3531 /* Wait for the current PCI transaction to complete before
3532 * issuing a reset. */
3533 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3534 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3535 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3536 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3537 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3538 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3539 udelay(5);
3540
Michael Chanb090ae22006-01-23 16:07:10 -08003541 /* Wait for the firmware to tell us it is ok to issue a reset. */
3542 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3543
Michael Chanb6016b72005-05-26 13:03:09 -07003544 /* Deposit a driver reset signature so the firmware knows that
3545 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003546 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003547 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3548
Michael Chanb6016b72005-05-26 13:03:09 -07003549 /* Do a dummy read to force the chip to complete all current transaction
3550 * before we issue a reset. */
3551 val = REG_RD(bp, BNX2_MISC_ID);
3552
Michael Chan234754d2006-11-19 14:11:41 -08003553 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3554 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3555 REG_RD(bp, BNX2_MISC_COMMAND);
3556 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003557
Michael Chan234754d2006-11-19 14:11:41 -08003558 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3559 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003560
Michael Chan234754d2006-11-19 14:11:41 -08003561 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003562
Michael Chan234754d2006-11-19 14:11:41 -08003563 } else {
3564 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3565 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3566 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3567
3568 /* Chip reset. */
3569 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3570
3571 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3572 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3573 current->state = TASK_UNINTERRUPTIBLE;
3574 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003575 }
Michael Chanb6016b72005-05-26 13:03:09 -07003576
Michael Chan234754d2006-11-19 14:11:41 -08003577 /* Reset takes approximate 30 usec */
3578 for (i = 0; i < 10; i++) {
3579 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3580 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3581 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3582 break;
3583 udelay(10);
3584 }
3585
3586 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3587 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3588 printk(KERN_ERR PFX "Chip reset did not complete\n");
3589 return -EBUSY;
3590 }
Michael Chanb6016b72005-05-26 13:03:09 -07003591 }
3592
3593 /* Make sure byte swapping is properly configured. */
3594 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3595 if (val != 0x01020304) {
3596 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3597 return -ENODEV;
3598 }
3599
Michael Chanb6016b72005-05-26 13:03:09 -07003600 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003601 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3602 if (rc)
3603 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003604
3605 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3606 /* Adjust the voltage regular to two steps lower. The default
3607 * of this register is 0x0000000e. */
3608 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3609
3610 /* Remove bad rbuf memory from the free pool. */
3611 rc = bnx2_alloc_bad_rbuf(bp);
3612 }
3613
3614 return rc;
3615}
3616
3617static int
3618bnx2_init_chip(struct bnx2 *bp)
3619{
3620 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003621 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003622
3623 /* Make sure the interrupt is not active. */
3624 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3625
3626 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3627 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3628#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003629 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003630#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003631 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003632 DMA_READ_CHANS << 12 |
3633 DMA_WRITE_CHANS << 16;
3634
3635 val |= (0x2 << 20) | (1 << 11);
3636
Michael Chandda1e392006-01-23 16:08:14 -08003637 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003638 val |= (1 << 23);
3639
3640 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3641 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3642 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3643
3644 REG_WR(bp, BNX2_DMA_CONFIG, val);
3645
3646 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3647 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3648 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3649 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3650 }
3651
3652 if (bp->flags & PCIX_FLAG) {
3653 u16 val16;
3654
3655 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3656 &val16);
3657 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3658 val16 & ~PCI_X_CMD_ERO);
3659 }
3660
3661 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3662 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3663 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3664 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3665
3666 /* Initialize context mapping and zero out the quick contexts. The
3667 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003668 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3669 bnx2_init_5709_context(bp);
3670 else
3671 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003672
Michael Chanfba9fe92006-06-12 22:21:25 -07003673 if ((rc = bnx2_init_cpus(bp)) != 0)
3674 return rc;
3675
Michael Chanb6016b72005-05-26 13:03:09 -07003676 bnx2_init_nvram(bp);
3677
3678 bnx2_set_mac_addr(bp);
3679
3680 val = REG_RD(bp, BNX2_MQ_CONFIG);
3681 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3682 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003683 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3684 val |= BNX2_MQ_CONFIG_HALT_DIS;
3685
Michael Chanb6016b72005-05-26 13:03:09 -07003686 REG_WR(bp, BNX2_MQ_CONFIG, val);
3687
3688 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3689 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3690 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3691
3692 val = (BCM_PAGE_BITS - 8) << 24;
3693 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3694
3695 /* Configure page size. */
3696 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3697 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3698 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3699 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3700
3701 val = bp->mac_addr[0] +
3702 (bp->mac_addr[1] << 8) +
3703 (bp->mac_addr[2] << 16) +
3704 bp->mac_addr[3] +
3705 (bp->mac_addr[4] << 8) +
3706 (bp->mac_addr[5] << 16);
3707 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3708
3709 /* Program the MTU. Also include 4 bytes for CRC32. */
3710 val = bp->dev->mtu + ETH_HLEN + 4;
3711 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3712 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3713 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3714
3715 bp->last_status_idx = 0;
3716 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3717
3718 /* Set up how to generate a link change interrupt. */
3719 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3720
3721 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3722 (u64) bp->status_blk_mapping & 0xffffffff);
3723 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3724
3725 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3726 (u64) bp->stats_blk_mapping & 0xffffffff);
3727 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3728 (u64) bp->stats_blk_mapping >> 32);
3729
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003730 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003731 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3732
3733 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3734 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3735
3736 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3737 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3738
3739 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3740
3741 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3742
3743 REG_WR(bp, BNX2_HC_COM_TICKS,
3744 (bp->com_ticks_int << 16) | bp->com_ticks);
3745
3746 REG_WR(bp, BNX2_HC_CMD_TICKS,
3747 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3748
3749 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3750 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3751
3752 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3753 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3754 else {
3755 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3756 BNX2_HC_CONFIG_TX_TMR_MODE |
3757 BNX2_HC_CONFIG_COLLECT_STATS);
3758 }
3759
3760 /* Clear internal stats counters. */
3761 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3762
3763 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3764
Michael Chane29054f2006-01-23 16:06:06 -08003765 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3766 BNX2_PORT_FEATURE_ASF_ENABLED)
3767 bp->flags |= ASF_ENABLE_FLAG;
3768
Michael Chanb6016b72005-05-26 13:03:09 -07003769 /* Initialize the receive filter. */
3770 bnx2_set_rx_mode(bp->dev);
3771
Michael Chanb090ae22006-01-23 16:07:10 -08003772 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3773 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003774
3775 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3776 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3777
3778 udelay(20);
3779
Michael Chanbf5295b2006-03-23 01:11:56 -08003780 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3781
Michael Chanb090ae22006-01-23 16:07:10 -08003782 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003783}
3784
Michael Chan59b47d82006-11-19 14:10:45 -08003785static void
3786bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3787{
3788 u32 val, offset0, offset1, offset2, offset3;
3789
3790 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3791 offset0 = BNX2_L2CTX_TYPE_XI;
3792 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3793 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3794 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3795 } else {
3796 offset0 = BNX2_L2CTX_TYPE;
3797 offset1 = BNX2_L2CTX_CMD_TYPE;
3798 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3799 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3800 }
3801 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3802 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3803
3804 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3805 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3806
3807 val = (u64) bp->tx_desc_mapping >> 32;
3808 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3809
3810 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3811 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3812}
Michael Chanb6016b72005-05-26 13:03:09 -07003813
3814static void
3815bnx2_init_tx_ring(struct bnx2 *bp)
3816{
3817 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003818 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003819
Michael Chan2f8af122006-08-15 01:39:10 -07003820 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3821
Michael Chanb6016b72005-05-26 13:03:09 -07003822 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003823
Michael Chanb6016b72005-05-26 13:03:09 -07003824 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3825 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3826
3827 bp->tx_prod = 0;
3828 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003829 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003830 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003831
Michael Chan59b47d82006-11-19 14:10:45 -08003832 cid = TX_CID;
3833 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3834 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003835
Michael Chan59b47d82006-11-19 14:10:45 -08003836 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003837}
3838
3839static void
3840bnx2_init_rx_ring(struct bnx2 *bp)
3841{
3842 struct rx_bd *rxbd;
3843 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003844 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003845 u32 val;
3846
3847 /* 8 for CRC and VLAN */
3848 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003849 /* hw alignment */
3850 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003851
3852 ring_prod = prod = bp->rx_prod = 0;
3853 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003854 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003855 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003856
Michael Chan13daffa2006-03-20 17:49:20 -08003857 for (i = 0; i < bp->rx_max_ring; i++) {
3858 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003859
Michael Chan13daffa2006-03-20 17:49:20 -08003860 rxbd = &bp->rx_desc_ring[i][0];
3861 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3862 rxbd->rx_bd_len = bp->rx_buf_use_size;
3863 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3864 }
3865 if (i == (bp->rx_max_ring - 1))
3866 j = 0;
3867 else
3868 j = i + 1;
3869 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3870 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3871 0xffffffff;
3872 }
Michael Chanb6016b72005-05-26 13:03:09 -07003873
3874 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3875 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3876 val |= 0x02 << 8;
3877 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3878
Michael Chan13daffa2006-03-20 17:49:20 -08003879 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003880 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3881
Michael Chan13daffa2006-03-20 17:49:20 -08003882 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003883 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3884
Michael Chan236b6392006-03-20 17:49:02 -08003885 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003886 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3887 break;
3888 }
3889 prod = NEXT_RX_BD(prod);
3890 ring_prod = RX_RING_IDX(prod);
3891 }
3892 bp->rx_prod = prod;
3893
3894 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3895
3896 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3897}
3898
3899static void
Michael Chan13daffa2006-03-20 17:49:20 -08003900bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3901{
3902 u32 num_rings, max;
3903
3904 bp->rx_ring_size = size;
3905 num_rings = 1;
3906 while (size > MAX_RX_DESC_CNT) {
3907 size -= MAX_RX_DESC_CNT;
3908 num_rings++;
3909 }
3910 /* round to next power of 2 */
3911 max = MAX_RX_RINGS;
3912 while ((max & num_rings) == 0)
3913 max >>= 1;
3914
3915 if (num_rings != max)
3916 max <<= 1;
3917
3918 bp->rx_max_ring = max;
3919 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3920}
3921
3922static void
Michael Chanb6016b72005-05-26 13:03:09 -07003923bnx2_free_tx_skbs(struct bnx2 *bp)
3924{
3925 int i;
3926
3927 if (bp->tx_buf_ring == NULL)
3928 return;
3929
3930 for (i = 0; i < TX_DESC_CNT; ) {
3931 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3932 struct sk_buff *skb = tx_buf->skb;
3933 int j, last;
3934
3935 if (skb == NULL) {
3936 i++;
3937 continue;
3938 }
3939
3940 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3941 skb_headlen(skb), PCI_DMA_TODEVICE);
3942
3943 tx_buf->skb = NULL;
3944
3945 last = skb_shinfo(skb)->nr_frags;
3946 for (j = 0; j < last; j++) {
3947 tx_buf = &bp->tx_buf_ring[i + j + 1];
3948 pci_unmap_page(bp->pdev,
3949 pci_unmap_addr(tx_buf, mapping),
3950 skb_shinfo(skb)->frags[j].size,
3951 PCI_DMA_TODEVICE);
3952 }
Michael Chan745720e2006-06-29 12:37:41 -07003953 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003954 i += j + 1;
3955 }
3956
3957}
3958
3959static void
3960bnx2_free_rx_skbs(struct bnx2 *bp)
3961{
3962 int i;
3963
3964 if (bp->rx_buf_ring == NULL)
3965 return;
3966
Michael Chan13daffa2006-03-20 17:49:20 -08003967 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003968 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3969 struct sk_buff *skb = rx_buf->skb;
3970
Michael Chan05d0f1c2005-11-04 08:53:48 -08003971 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003972 continue;
3973
3974 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3975 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3976
3977 rx_buf->skb = NULL;
3978
Michael Chan745720e2006-06-29 12:37:41 -07003979 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003980 }
3981}
3982
3983static void
3984bnx2_free_skbs(struct bnx2 *bp)
3985{
3986 bnx2_free_tx_skbs(bp);
3987 bnx2_free_rx_skbs(bp);
3988}
3989
3990static int
3991bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3992{
3993 int rc;
3994
3995 rc = bnx2_reset_chip(bp, reset_code);
3996 bnx2_free_skbs(bp);
3997 if (rc)
3998 return rc;
3999
Michael Chanfba9fe92006-06-12 22:21:25 -07004000 if ((rc = bnx2_init_chip(bp)) != 0)
4001 return rc;
4002
Michael Chanb6016b72005-05-26 13:03:09 -07004003 bnx2_init_tx_ring(bp);
4004 bnx2_init_rx_ring(bp);
4005 return 0;
4006}
4007
4008static int
4009bnx2_init_nic(struct bnx2 *bp)
4010{
4011 int rc;
4012
4013 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4014 return rc;
4015
Michael Chan80be4432006-11-19 14:07:28 -08004016 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004017 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08004018 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004019 bnx2_set_link(bp);
4020 return 0;
4021}
4022
4023static int
4024bnx2_test_registers(struct bnx2 *bp)
4025{
4026 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004027 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004028 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004029 u16 offset;
4030 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004031#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004032 u32 rw_mask;
4033 u32 ro_mask;
4034 } reg_tbl[] = {
4035 { 0x006c, 0, 0x00000000, 0x0000003f },
4036 { 0x0090, 0, 0xffffffff, 0x00000000 },
4037 { 0x0094, 0, 0x00000000, 0x00000000 },
4038
Michael Chan5bae30c2007-05-03 13:18:46 -07004039 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4040 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4041 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4042 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4043 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4044 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4045 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4046 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4047 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004048
Michael Chan5bae30c2007-05-03 13:18:46 -07004049 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4050 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4051 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4052 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4053 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4054 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004055
Michael Chan5bae30c2007-05-03 13:18:46 -07004056 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4057 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4058 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004059
4060 { 0x1000, 0, 0x00000000, 0x00000001 },
4061 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004062
4063 { 0x1408, 0, 0x01c00800, 0x00000000 },
4064 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4065 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004066 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004067 { 0x14b0, 0, 0x00000002, 0x00000001 },
4068 { 0x14b8, 0, 0x00000000, 0x00000000 },
4069 { 0x14c0, 0, 0x00000000, 0x00000009 },
4070 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4071 { 0x14cc, 0, 0x00000000, 0x00000001 },
4072 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004073
4074 { 0x1800, 0, 0x00000000, 0x00000001 },
4075 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004076
4077 { 0x2800, 0, 0x00000000, 0x00000001 },
4078 { 0x2804, 0, 0x00000000, 0x00003f01 },
4079 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4080 { 0x2810, 0, 0xffff0000, 0x00000000 },
4081 { 0x2814, 0, 0xffff0000, 0x00000000 },
4082 { 0x2818, 0, 0xffff0000, 0x00000000 },
4083 { 0x281c, 0, 0xffff0000, 0x00000000 },
4084 { 0x2834, 0, 0xffffffff, 0x00000000 },
4085 { 0x2840, 0, 0x00000000, 0xffffffff },
4086 { 0x2844, 0, 0x00000000, 0xffffffff },
4087 { 0x2848, 0, 0xffffffff, 0x00000000 },
4088 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4089
4090 { 0x2c00, 0, 0x00000000, 0x00000011 },
4091 { 0x2c04, 0, 0x00000000, 0x00030007 },
4092
Michael Chanb6016b72005-05-26 13:03:09 -07004093 { 0x3c00, 0, 0x00000000, 0x00000001 },
4094 { 0x3c04, 0, 0x00000000, 0x00070000 },
4095 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4096 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4097 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4098 { 0x3c14, 0, 0x00000000, 0xffffffff },
4099 { 0x3c18, 0, 0x00000000, 0xffffffff },
4100 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4101 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004102
4103 { 0x5004, 0, 0x00000000, 0x0000007f },
4104 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004105
Michael Chanb6016b72005-05-26 13:03:09 -07004106 { 0x5c00, 0, 0x00000000, 0x00000001 },
4107 { 0x5c04, 0, 0x00000000, 0x0003000f },
4108 { 0x5c08, 0, 0x00000003, 0x00000000 },
4109 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4110 { 0x5c10, 0, 0x00000000, 0xffffffff },
4111 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4112 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4113 { 0x5c88, 0, 0x00000000, 0x00077373 },
4114 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4115
4116 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4117 { 0x680c, 0, 0xffffffff, 0x00000000 },
4118 { 0x6810, 0, 0xffffffff, 0x00000000 },
4119 { 0x6814, 0, 0xffffffff, 0x00000000 },
4120 { 0x6818, 0, 0xffffffff, 0x00000000 },
4121 { 0x681c, 0, 0xffffffff, 0x00000000 },
4122 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4123 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4124 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4125 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4126 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4127 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4128 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4129 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4130 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4131 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4132 { 0x684c, 0, 0xffffffff, 0x00000000 },
4133 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4134 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4135 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4136 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4137 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4138 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4139
4140 { 0xffff, 0, 0x00000000, 0x00000000 },
4141 };
4142
4143 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004144 is_5709 = 0;
4145 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4146 is_5709 = 1;
4147
Michael Chanb6016b72005-05-26 13:03:09 -07004148 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4149 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004150 u16 flags = reg_tbl[i].flags;
4151
4152 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4153 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004154
4155 offset = (u32) reg_tbl[i].offset;
4156 rw_mask = reg_tbl[i].rw_mask;
4157 ro_mask = reg_tbl[i].ro_mask;
4158
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004159 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004160
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004161 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004162
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004163 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004164 if ((val & rw_mask) != 0) {
4165 goto reg_test_err;
4166 }
4167
4168 if ((val & ro_mask) != (save_val & ro_mask)) {
4169 goto reg_test_err;
4170 }
4171
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004172 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004173
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004174 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004175 if ((val & rw_mask) != rw_mask) {
4176 goto reg_test_err;
4177 }
4178
4179 if ((val & ro_mask) != (save_val & ro_mask)) {
4180 goto reg_test_err;
4181 }
4182
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004183 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004184 continue;
4185
4186reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004187 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004188 ret = -ENODEV;
4189 break;
4190 }
4191 return ret;
4192}
4193
4194static int
4195bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4196{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004197 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004198 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4199 int i;
4200
4201 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4202 u32 offset;
4203
4204 for (offset = 0; offset < size; offset += 4) {
4205
4206 REG_WR_IND(bp, start + offset, test_pattern[i]);
4207
4208 if (REG_RD_IND(bp, start + offset) !=
4209 test_pattern[i]) {
4210 return -ENODEV;
4211 }
4212 }
4213 }
4214 return 0;
4215}
4216
4217static int
4218bnx2_test_memory(struct bnx2 *bp)
4219{
4220 int ret = 0;
4221 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004222 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004223 u32 offset;
4224 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004225 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004226 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004227 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004228 { 0xe0000, 0x4000 },
4229 { 0x120000, 0x4000 },
4230 { 0x1a0000, 0x4000 },
4231 { 0x160000, 0x4000 },
4232 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004233 },
4234 mem_tbl_5709[] = {
4235 { 0x60000, 0x4000 },
4236 { 0xa0000, 0x3000 },
4237 { 0xe0000, 0x4000 },
4238 { 0x120000, 0x4000 },
4239 { 0x1a0000, 0x4000 },
4240 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004241 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004242 struct mem_entry *mem_tbl;
4243
4244 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4245 mem_tbl = mem_tbl_5709;
4246 else
4247 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004248
4249 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4250 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4251 mem_tbl[i].len)) != 0) {
4252 return ret;
4253 }
4254 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004255
Michael Chanb6016b72005-05-26 13:03:09 -07004256 return ret;
4257}
4258
Michael Chanbc5a0692006-01-23 16:13:22 -08004259#define BNX2_MAC_LOOPBACK 0
4260#define BNX2_PHY_LOOPBACK 1
4261
Michael Chanb6016b72005-05-26 13:03:09 -07004262static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004263bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004264{
4265 unsigned int pkt_size, num_pkts, i;
4266 struct sk_buff *skb, *rx_skb;
4267 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004268 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004269 dma_addr_t map;
4270 struct tx_bd *txbd;
4271 struct sw_bd *rx_buf;
4272 struct l2_fhdr *rx_hdr;
4273 int ret = -ENODEV;
4274
Michael Chanbc5a0692006-01-23 16:13:22 -08004275 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4276 bp->loopback = MAC_LOOPBACK;
4277 bnx2_set_mac_loopback(bp);
4278 }
4279 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004280 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004281 bnx2_set_phy_loopback(bp);
4282 }
4283 else
4284 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004285
4286 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004287 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004288 if (!skb)
4289 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004290 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004291 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004292 memset(packet + 6, 0x0, 8);
4293 for (i = 14; i < pkt_size; i++)
4294 packet[i] = (unsigned char) (i & 0xff);
4295
4296 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4297 PCI_DMA_TODEVICE);
4298
Michael Chanbf5295b2006-03-23 01:11:56 -08004299 REG_WR(bp, BNX2_HC_COMMAND,
4300 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4301
Michael Chanb6016b72005-05-26 13:03:09 -07004302 REG_RD(bp, BNX2_HC_COMMAND);
4303
4304 udelay(5);
4305 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4306
Michael Chanb6016b72005-05-26 13:03:09 -07004307 num_pkts = 0;
4308
Michael Chanbc5a0692006-01-23 16:13:22 -08004309 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004310
4311 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4312 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4313 txbd->tx_bd_mss_nbytes = pkt_size;
4314 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4315
4316 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004317 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4318 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004319
Michael Chan234754d2006-11-19 14:11:41 -08004320 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4321 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004322
4323 udelay(100);
4324
Michael Chanbf5295b2006-03-23 01:11:56 -08004325 REG_WR(bp, BNX2_HC_COMMAND,
4326 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4327
Michael Chanb6016b72005-05-26 13:03:09 -07004328 REG_RD(bp, BNX2_HC_COMMAND);
4329
4330 udelay(5);
4331
4332 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004333 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004334
Michael Chanbc5a0692006-01-23 16:13:22 -08004335 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004336 goto loopback_test_done;
4337 }
4338
4339 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4340 if (rx_idx != rx_start_idx + num_pkts) {
4341 goto loopback_test_done;
4342 }
4343
4344 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4345 rx_skb = rx_buf->skb;
4346
4347 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4348 skb_reserve(rx_skb, bp->rx_offset);
4349
4350 pci_dma_sync_single_for_cpu(bp->pdev,
4351 pci_unmap_addr(rx_buf, mapping),
4352 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4353
Michael Chanade2bfe2006-01-23 16:09:51 -08004354 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004355 (L2_FHDR_ERRORS_BAD_CRC |
4356 L2_FHDR_ERRORS_PHY_DECODE |
4357 L2_FHDR_ERRORS_ALIGNMENT |
4358 L2_FHDR_ERRORS_TOO_SHORT |
4359 L2_FHDR_ERRORS_GIANT_FRAME)) {
4360
4361 goto loopback_test_done;
4362 }
4363
4364 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4365 goto loopback_test_done;
4366 }
4367
4368 for (i = 14; i < pkt_size; i++) {
4369 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4370 goto loopback_test_done;
4371 }
4372 }
4373
4374 ret = 0;
4375
4376loopback_test_done:
4377 bp->loopback = 0;
4378 return ret;
4379}
4380
Michael Chanbc5a0692006-01-23 16:13:22 -08004381#define BNX2_MAC_LOOPBACK_FAILED 1
4382#define BNX2_PHY_LOOPBACK_FAILED 2
4383#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4384 BNX2_PHY_LOOPBACK_FAILED)
4385
4386static int
4387bnx2_test_loopback(struct bnx2 *bp)
4388{
4389 int rc = 0;
4390
4391 if (!netif_running(bp->dev))
4392 return BNX2_LOOPBACK_FAILED;
4393
4394 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4395 spin_lock_bh(&bp->phy_lock);
4396 bnx2_init_phy(bp);
4397 spin_unlock_bh(&bp->phy_lock);
4398 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4399 rc |= BNX2_MAC_LOOPBACK_FAILED;
4400 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4401 rc |= BNX2_PHY_LOOPBACK_FAILED;
4402 return rc;
4403}
4404
Michael Chanb6016b72005-05-26 13:03:09 -07004405#define NVRAM_SIZE 0x200
4406#define CRC32_RESIDUAL 0xdebb20e3
4407
4408static int
4409bnx2_test_nvram(struct bnx2 *bp)
4410{
4411 u32 buf[NVRAM_SIZE / 4];
4412 u8 *data = (u8 *) buf;
4413 int rc = 0;
4414 u32 magic, csum;
4415
4416 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4417 goto test_nvram_done;
4418
4419 magic = be32_to_cpu(buf[0]);
4420 if (magic != 0x669955aa) {
4421 rc = -ENODEV;
4422 goto test_nvram_done;
4423 }
4424
4425 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4426 goto test_nvram_done;
4427
4428 csum = ether_crc_le(0x100, data);
4429 if (csum != CRC32_RESIDUAL) {
4430 rc = -ENODEV;
4431 goto test_nvram_done;
4432 }
4433
4434 csum = ether_crc_le(0x100, data + 0x100);
4435 if (csum != CRC32_RESIDUAL) {
4436 rc = -ENODEV;
4437 }
4438
4439test_nvram_done:
4440 return rc;
4441}
4442
4443static int
4444bnx2_test_link(struct bnx2 *bp)
4445{
4446 u32 bmsr;
4447
Michael Chanc770a652005-08-25 15:38:39 -07004448 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004449 bnx2_enable_bmsr1(bp);
4450 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4451 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4452 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004453 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004454
Michael Chanb6016b72005-05-26 13:03:09 -07004455 if (bmsr & BMSR_LSTATUS) {
4456 return 0;
4457 }
4458 return -ENODEV;
4459}
4460
4461static int
4462bnx2_test_intr(struct bnx2 *bp)
4463{
4464 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004465 u16 status_idx;
4466
4467 if (!netif_running(bp->dev))
4468 return -ENODEV;
4469
4470 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4471
4472 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004473 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004474 REG_RD(bp, BNX2_HC_COMMAND);
4475
4476 for (i = 0; i < 10; i++) {
4477 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4478 status_idx) {
4479
4480 break;
4481 }
4482
4483 msleep_interruptible(10);
4484 }
4485 if (i < 10)
4486 return 0;
4487
4488 return -ENODEV;
4489}
4490
4491static void
Michael Chan48b01e22006-11-19 14:08:00 -08004492bnx2_5706_serdes_timer(struct bnx2 *bp)
4493{
4494 spin_lock(&bp->phy_lock);
4495 if (bp->serdes_an_pending)
4496 bp->serdes_an_pending--;
4497 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4498 u32 bmcr;
4499
4500 bp->current_interval = bp->timer_interval;
4501
Michael Chanca58c3a2007-05-03 13:22:52 -07004502 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004503
4504 if (bmcr & BMCR_ANENABLE) {
4505 u32 phy1, phy2;
4506
4507 bnx2_write_phy(bp, 0x1c, 0x7c00);
4508 bnx2_read_phy(bp, 0x1c, &phy1);
4509
4510 bnx2_write_phy(bp, 0x17, 0x0f01);
4511 bnx2_read_phy(bp, 0x15, &phy2);
4512 bnx2_write_phy(bp, 0x17, 0x0f01);
4513 bnx2_read_phy(bp, 0x15, &phy2);
4514
4515 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4516 !(phy2 & 0x20)) { /* no CONFIG */
4517
4518 bmcr &= ~BMCR_ANENABLE;
4519 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004520 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004521 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4522 }
4523 }
4524 }
4525 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4526 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4527 u32 phy2;
4528
4529 bnx2_write_phy(bp, 0x17, 0x0f01);
4530 bnx2_read_phy(bp, 0x15, &phy2);
4531 if (phy2 & 0x20) {
4532 u32 bmcr;
4533
Michael Chanca58c3a2007-05-03 13:22:52 -07004534 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004535 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004536 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004537
4538 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4539 }
4540 } else
4541 bp->current_interval = bp->timer_interval;
4542
4543 spin_unlock(&bp->phy_lock);
4544}
4545
4546static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004547bnx2_5708_serdes_timer(struct bnx2 *bp)
4548{
4549 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4550 bp->serdes_an_pending = 0;
4551 return;
4552 }
4553
4554 spin_lock(&bp->phy_lock);
4555 if (bp->serdes_an_pending)
4556 bp->serdes_an_pending--;
4557 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4558 u32 bmcr;
4559
Michael Chanca58c3a2007-05-03 13:22:52 -07004560 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004561 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004562 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004563 bp->current_interval = SERDES_FORCED_TIMEOUT;
4564 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004565 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004566 bp->serdes_an_pending = 2;
4567 bp->current_interval = bp->timer_interval;
4568 }
4569
4570 } else
4571 bp->current_interval = bp->timer_interval;
4572
4573 spin_unlock(&bp->phy_lock);
4574}
4575
4576static void
Michael Chanb6016b72005-05-26 13:03:09 -07004577bnx2_timer(unsigned long data)
4578{
4579 struct bnx2 *bp = (struct bnx2 *) data;
4580 u32 msg;
4581
Michael Chancd339a02005-08-25 15:35:24 -07004582 if (!netif_running(bp->dev))
4583 return;
4584
Michael Chanb6016b72005-05-26 13:03:09 -07004585 if (atomic_read(&bp->intr_sem) != 0)
4586 goto bnx2_restart_timer;
4587
4588 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004589 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004590
Michael Chancea94db2006-06-12 22:16:13 -07004591 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4592
Michael Chanf8dd0642006-11-19 14:08:29 -08004593 if (bp->phy_flags & PHY_SERDES_FLAG) {
4594 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4595 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004596 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004597 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004598 }
4599
4600bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004601 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004602}
4603
4604/* Called with rtnl_lock */
4605static int
4606bnx2_open(struct net_device *dev)
4607{
Michael Chan972ec0d2006-01-23 16:12:43 -08004608 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004609 int rc;
4610
Michael Chan1b2f9222007-05-03 13:20:19 -07004611 netif_carrier_off(dev);
4612
Pavel Machek829ca9a2005-09-03 15:56:56 -07004613 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004614 bnx2_disable_int(bp);
4615
4616 rc = bnx2_alloc_mem(bp);
4617 if (rc)
4618 return rc;
4619
4620 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4621 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4622 !disable_msi) {
4623
4624 if (pci_enable_msi(bp->pdev) == 0) {
4625 bp->flags |= USING_MSI_FLAG;
4626 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4627 dev);
4628 }
4629 else {
4630 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004631 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004632 }
4633 }
4634 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004635 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004636 dev->name, dev);
4637 }
4638 if (rc) {
4639 bnx2_free_mem(bp);
4640 return rc;
4641 }
4642
4643 rc = bnx2_init_nic(bp);
4644
4645 if (rc) {
4646 free_irq(bp->pdev->irq, dev);
4647 if (bp->flags & USING_MSI_FLAG) {
4648 pci_disable_msi(bp->pdev);
4649 bp->flags &= ~USING_MSI_FLAG;
4650 }
4651 bnx2_free_skbs(bp);
4652 bnx2_free_mem(bp);
4653 return rc;
4654 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004655
Michael Chancd339a02005-08-25 15:35:24 -07004656 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004657
4658 atomic_set(&bp->intr_sem, 0);
4659
4660 bnx2_enable_int(bp);
4661
4662 if (bp->flags & USING_MSI_FLAG) {
4663 /* Test MSI to make sure it is working
4664 * If MSI test fails, go back to INTx mode
4665 */
4666 if (bnx2_test_intr(bp) != 0) {
4667 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4668 " using MSI, switching to INTx mode. Please"
4669 " report this failure to the PCI maintainer"
4670 " and include system chipset information.\n",
4671 bp->dev->name);
4672
4673 bnx2_disable_int(bp);
4674 free_irq(bp->pdev->irq, dev);
4675 pci_disable_msi(bp->pdev);
4676 bp->flags &= ~USING_MSI_FLAG;
4677
4678 rc = bnx2_init_nic(bp);
4679
4680 if (!rc) {
4681 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004682 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004683 }
4684 if (rc) {
4685 bnx2_free_skbs(bp);
4686 bnx2_free_mem(bp);
4687 del_timer_sync(&bp->timer);
4688 return rc;
4689 }
4690 bnx2_enable_int(bp);
4691 }
4692 }
4693 if (bp->flags & USING_MSI_FLAG) {
4694 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4695 }
4696
4697 netif_start_queue(dev);
4698
4699 return 0;
4700}
4701
4702static void
David Howellsc4028952006-11-22 14:57:56 +00004703bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004704{
David Howellsc4028952006-11-22 14:57:56 +00004705 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004706
Michael Chanafdc08b2005-08-25 15:34:29 -07004707 if (!netif_running(bp->dev))
4708 return;
4709
4710 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004711 bnx2_netif_stop(bp);
4712
4713 bnx2_init_nic(bp);
4714
4715 atomic_set(&bp->intr_sem, 1);
4716 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004717 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004718}
4719
4720static void
4721bnx2_tx_timeout(struct net_device *dev)
4722{
Michael Chan972ec0d2006-01-23 16:12:43 -08004723 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004724
4725 /* This allows the netif to be shutdown gracefully before resetting */
4726 schedule_work(&bp->reset_task);
4727}
4728
4729#ifdef BCM_VLAN
4730/* Called with rtnl_lock */
4731static void
4732bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4733{
Michael Chan972ec0d2006-01-23 16:12:43 -08004734 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004735
4736 bnx2_netif_stop(bp);
4737
4738 bp->vlgrp = vlgrp;
4739 bnx2_set_rx_mode(dev);
4740
4741 bnx2_netif_start(bp);
4742}
4743
4744/* Called with rtnl_lock */
4745static void
4746bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4747{
Michael Chan972ec0d2006-01-23 16:12:43 -08004748 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004749
4750 bnx2_netif_stop(bp);
Dan Aloni5c15bde2007-03-02 20:44:51 -08004751 vlan_group_set_device(bp->vlgrp, vid, NULL);
Michael Chanb6016b72005-05-26 13:03:09 -07004752 bnx2_set_rx_mode(dev);
4753
4754 bnx2_netif_start(bp);
4755}
4756#endif
4757
Herbert Xu932ff272006-06-09 12:20:56 -07004758/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004759 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4760 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004761 */
4762static int
4763bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4764{
Michael Chan972ec0d2006-01-23 16:12:43 -08004765 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004766 dma_addr_t mapping;
4767 struct tx_bd *txbd;
4768 struct sw_bd *tx_buf;
4769 u32 len, vlan_tag_flags, last_frag, mss;
4770 u16 prod, ring_prod;
4771 int i;
4772
Michael Chane89bbf12005-08-25 15:36:58 -07004773 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004774 netif_stop_queue(dev);
4775 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4776 dev->name);
4777
4778 return NETDEV_TX_BUSY;
4779 }
4780 len = skb_headlen(skb);
4781 prod = bp->tx_prod;
4782 ring_prod = TX_RING_IDX(prod);
4783
4784 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004786 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4787 }
4788
4789 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4790 vlan_tag_flags |=
4791 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4792 }
Herbert Xu79671682006-06-22 02:40:14 -07004793 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004794 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4795 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004796 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004797
Michael Chanb6016b72005-05-26 13:03:09 -07004798 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4799
Michael Chan4666f872007-05-03 13:22:28 -07004800 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004801
Michael Chan4666f872007-05-03 13:22:28 -07004802 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4803 u32 tcp_off = skb_transport_offset(skb) -
4804 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004805
Michael Chan4666f872007-05-03 13:22:28 -07004806 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4807 TX_BD_FLAGS_SW_FLAGS;
4808 if (likely(tcp_off == 0))
4809 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4810 else {
4811 tcp_off >>= 3;
4812 vlan_tag_flags |= ((tcp_off & 0x3) <<
4813 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4814 ((tcp_off & 0x10) <<
4815 TX_BD_FLAGS_TCP6_OFF4_SHL);
4816 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4817 }
4818 } else {
4819 if (skb_header_cloned(skb) &&
4820 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4821 dev_kfree_skb(skb);
4822 return NETDEV_TX_OK;
4823 }
4824
4825 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4826
4827 iph = ip_hdr(skb);
4828 iph->check = 0;
4829 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4830 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4831 iph->daddr, 0,
4832 IPPROTO_TCP,
4833 0);
4834 if (tcp_opt_len || (iph->ihl > 5)) {
4835 vlan_tag_flags |= ((iph->ihl - 5) +
4836 (tcp_opt_len >> 2)) << 8;
4837 }
Michael Chanb6016b72005-05-26 13:03:09 -07004838 }
Michael Chan4666f872007-05-03 13:22:28 -07004839 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004840 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004841
4842 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004843
Michael Chanb6016b72005-05-26 13:03:09 -07004844 tx_buf = &bp->tx_buf_ring[ring_prod];
4845 tx_buf->skb = skb;
4846 pci_unmap_addr_set(tx_buf, mapping, mapping);
4847
4848 txbd = &bp->tx_desc_ring[ring_prod];
4849
4850 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4851 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4852 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4853 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4854
4855 last_frag = skb_shinfo(skb)->nr_frags;
4856
4857 for (i = 0; i < last_frag; i++) {
4858 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4859
4860 prod = NEXT_TX_BD(prod);
4861 ring_prod = TX_RING_IDX(prod);
4862 txbd = &bp->tx_desc_ring[ring_prod];
4863
4864 len = frag->size;
4865 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4866 len, PCI_DMA_TODEVICE);
4867 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4868 mapping, mapping);
4869
4870 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4871 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4872 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4873 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4874
4875 }
4876 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4877
4878 prod = NEXT_TX_BD(prod);
4879 bp->tx_prod_bseq += skb->len;
4880
Michael Chan234754d2006-11-19 14:11:41 -08004881 REG_WR16(bp, bp->tx_bidx_addr, prod);
4882 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004883
4884 mmiowb();
4885
4886 bp->tx_prod = prod;
4887 dev->trans_start = jiffies;
4888
Michael Chane89bbf12005-08-25 15:36:58 -07004889 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004890 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004891 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004892 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004893 }
4894
4895 return NETDEV_TX_OK;
4896}
4897
4898/* Called with rtnl_lock */
4899static int
4900bnx2_close(struct net_device *dev)
4901{
Michael Chan972ec0d2006-01-23 16:12:43 -08004902 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004903 u32 reset_code;
4904
Michael Chanafdc08b2005-08-25 15:34:29 -07004905 /* Calling flush_scheduled_work() may deadlock because
4906 * linkwatch_event() may be on the workqueue and it will try to get
4907 * the rtnl_lock which we are holding.
4908 */
4909 while (bp->in_reset_task)
4910 msleep(1);
4911
Michael Chanb6016b72005-05-26 13:03:09 -07004912 bnx2_netif_stop(bp);
4913 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004914 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004915 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004916 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004917 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4918 else
4919 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4920 bnx2_reset_chip(bp, reset_code);
4921 free_irq(bp->pdev->irq, dev);
4922 if (bp->flags & USING_MSI_FLAG) {
4923 pci_disable_msi(bp->pdev);
4924 bp->flags &= ~USING_MSI_FLAG;
4925 }
4926 bnx2_free_skbs(bp);
4927 bnx2_free_mem(bp);
4928 bp->link_up = 0;
4929 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004930 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004931 return 0;
4932}
4933
4934#define GET_NET_STATS64(ctr) \
4935 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4936 (unsigned long) (ctr##_lo)
4937
4938#define GET_NET_STATS32(ctr) \
4939 (ctr##_lo)
4940
4941#if (BITS_PER_LONG == 64)
4942#define GET_NET_STATS GET_NET_STATS64
4943#else
4944#define GET_NET_STATS GET_NET_STATS32
4945#endif
4946
4947static struct net_device_stats *
4948bnx2_get_stats(struct net_device *dev)
4949{
Michael Chan972ec0d2006-01-23 16:12:43 -08004950 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004951 struct statistics_block *stats_blk = bp->stats_blk;
4952 struct net_device_stats *net_stats = &bp->net_stats;
4953
4954 if (bp->stats_blk == NULL) {
4955 return net_stats;
4956 }
4957 net_stats->rx_packets =
4958 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4959 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4960 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4961
4962 net_stats->tx_packets =
4963 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4964 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4965 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4966
4967 net_stats->rx_bytes =
4968 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4969
4970 net_stats->tx_bytes =
4971 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4972
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004973 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004974 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4975
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004976 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004977 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4978
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004979 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004980 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4981 stats_blk->stat_EtherStatsOverrsizePkts);
4982
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004983 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004984 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4985
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004986 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004987 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4988
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004989 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004990 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4991
4992 net_stats->rx_errors = net_stats->rx_length_errors +
4993 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4994 net_stats->rx_crc_errors;
4995
4996 net_stats->tx_aborted_errors =
4997 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4998 stats_blk->stat_Dot3StatsLateCollisions);
4999
Michael Chan5b0c76a2005-11-04 08:45:49 -08005000 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5001 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005002 net_stats->tx_carrier_errors = 0;
5003 else {
5004 net_stats->tx_carrier_errors =
5005 (unsigned long)
5006 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5007 }
5008
5009 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005010 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005011 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5012 +
5013 net_stats->tx_aborted_errors +
5014 net_stats->tx_carrier_errors;
5015
Michael Chancea94db2006-06-12 22:16:13 -07005016 net_stats->rx_missed_errors =
5017 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5018 stats_blk->stat_FwRxDrop);
5019
Michael Chanb6016b72005-05-26 13:03:09 -07005020 return net_stats;
5021}
5022
5023/* All ethtool functions called with rtnl_lock */
5024
5025static int
5026bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5027{
Michael Chan972ec0d2006-01-23 16:12:43 -08005028 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005029
5030 cmd->supported = SUPPORTED_Autoneg;
5031 if (bp->phy_flags & PHY_SERDES_FLAG) {
5032 cmd->supported |= SUPPORTED_1000baseT_Full |
5033 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005034 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5035 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005036
5037 cmd->port = PORT_FIBRE;
5038 }
5039 else {
5040 cmd->supported |= SUPPORTED_10baseT_Half |
5041 SUPPORTED_10baseT_Full |
5042 SUPPORTED_100baseT_Half |
5043 SUPPORTED_100baseT_Full |
5044 SUPPORTED_1000baseT_Full |
5045 SUPPORTED_TP;
5046
5047 cmd->port = PORT_TP;
5048 }
5049
5050 cmd->advertising = bp->advertising;
5051
5052 if (bp->autoneg & AUTONEG_SPEED) {
5053 cmd->autoneg = AUTONEG_ENABLE;
5054 }
5055 else {
5056 cmd->autoneg = AUTONEG_DISABLE;
5057 }
5058
5059 if (netif_carrier_ok(dev)) {
5060 cmd->speed = bp->line_speed;
5061 cmd->duplex = bp->duplex;
5062 }
5063 else {
5064 cmd->speed = -1;
5065 cmd->duplex = -1;
5066 }
5067
5068 cmd->transceiver = XCVR_INTERNAL;
5069 cmd->phy_address = bp->phy_addr;
5070
5071 return 0;
5072}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005073
Michael Chanb6016b72005-05-26 13:03:09 -07005074static int
5075bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5076{
Michael Chan972ec0d2006-01-23 16:12:43 -08005077 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005078 u8 autoneg = bp->autoneg;
5079 u8 req_duplex = bp->req_duplex;
5080 u16 req_line_speed = bp->req_line_speed;
5081 u32 advertising = bp->advertising;
5082
5083 if (cmd->autoneg == AUTONEG_ENABLE) {
5084 autoneg |= AUTONEG_SPEED;
5085
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005086 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005087
5088 /* allow advertising 1 speed */
5089 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5090 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5091 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5092 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5093
5094 if (bp->phy_flags & PHY_SERDES_FLAG)
5095 return -EINVAL;
5096
5097 advertising = cmd->advertising;
5098
Michael Chan27a005b2007-05-03 13:23:41 -07005099 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5100 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5101 return -EINVAL;
5102 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005103 advertising = cmd->advertising;
5104 }
5105 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5106 return -EINVAL;
5107 }
5108 else {
5109 if (bp->phy_flags & PHY_SERDES_FLAG) {
5110 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5111 }
5112 else {
5113 advertising = ETHTOOL_ALL_COPPER_SPEED;
5114 }
5115 }
5116 advertising |= ADVERTISED_Autoneg;
5117 }
5118 else {
5119 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005120 if ((cmd->speed != SPEED_1000 &&
5121 cmd->speed != SPEED_2500) ||
5122 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005123 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005124
5125 if (cmd->speed == SPEED_2500 &&
5126 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5127 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005128 }
5129 else if (cmd->speed == SPEED_1000) {
5130 return -EINVAL;
5131 }
5132 autoneg &= ~AUTONEG_SPEED;
5133 req_line_speed = cmd->speed;
5134 req_duplex = cmd->duplex;
5135 advertising = 0;
5136 }
5137
5138 bp->autoneg = autoneg;
5139 bp->advertising = advertising;
5140 bp->req_line_speed = req_line_speed;
5141 bp->req_duplex = req_duplex;
5142
Michael Chanc770a652005-08-25 15:38:39 -07005143 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005144
5145 bnx2_setup_phy(bp);
5146
Michael Chanc770a652005-08-25 15:38:39 -07005147 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005148
5149 return 0;
5150}
5151
5152static void
5153bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5154{
Michael Chan972ec0d2006-01-23 16:12:43 -08005155 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005156
5157 strcpy(info->driver, DRV_MODULE_NAME);
5158 strcpy(info->version, DRV_MODULE_VERSION);
5159 strcpy(info->bus_info, pci_name(bp->pdev));
5160 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5161 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5162 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005163 info->fw_version[1] = info->fw_version[3] = '.';
5164 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005165}
5166
Michael Chan244ac4f2006-03-20 17:48:46 -08005167#define BNX2_REGDUMP_LEN (32 * 1024)
5168
5169static int
5170bnx2_get_regs_len(struct net_device *dev)
5171{
5172 return BNX2_REGDUMP_LEN;
5173}
5174
5175static void
5176bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5177{
5178 u32 *p = _p, i, offset;
5179 u8 *orig_p = _p;
5180 struct bnx2 *bp = netdev_priv(dev);
5181 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5182 0x0800, 0x0880, 0x0c00, 0x0c10,
5183 0x0c30, 0x0d08, 0x1000, 0x101c,
5184 0x1040, 0x1048, 0x1080, 0x10a4,
5185 0x1400, 0x1490, 0x1498, 0x14f0,
5186 0x1500, 0x155c, 0x1580, 0x15dc,
5187 0x1600, 0x1658, 0x1680, 0x16d8,
5188 0x1800, 0x1820, 0x1840, 0x1854,
5189 0x1880, 0x1894, 0x1900, 0x1984,
5190 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5191 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5192 0x2000, 0x2030, 0x23c0, 0x2400,
5193 0x2800, 0x2820, 0x2830, 0x2850,
5194 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5195 0x3c00, 0x3c94, 0x4000, 0x4010,
5196 0x4080, 0x4090, 0x43c0, 0x4458,
5197 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5198 0x4fc0, 0x5010, 0x53c0, 0x5444,
5199 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5200 0x5fc0, 0x6000, 0x6400, 0x6428,
5201 0x6800, 0x6848, 0x684c, 0x6860,
5202 0x6888, 0x6910, 0x8000 };
5203
5204 regs->version = 0;
5205
5206 memset(p, 0, BNX2_REGDUMP_LEN);
5207
5208 if (!netif_running(bp->dev))
5209 return;
5210
5211 i = 0;
5212 offset = reg_boundaries[0];
5213 p += offset;
5214 while (offset < BNX2_REGDUMP_LEN) {
5215 *p++ = REG_RD(bp, offset);
5216 offset += 4;
5217 if (offset == reg_boundaries[i + 1]) {
5218 offset = reg_boundaries[i + 2];
5219 p = (u32 *) (orig_p + offset);
5220 i += 2;
5221 }
5222 }
5223}
5224
Michael Chanb6016b72005-05-26 13:03:09 -07005225static void
5226bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5227{
Michael Chan972ec0d2006-01-23 16:12:43 -08005228 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005229
5230 if (bp->flags & NO_WOL_FLAG) {
5231 wol->supported = 0;
5232 wol->wolopts = 0;
5233 }
5234 else {
5235 wol->supported = WAKE_MAGIC;
5236 if (bp->wol)
5237 wol->wolopts = WAKE_MAGIC;
5238 else
5239 wol->wolopts = 0;
5240 }
5241 memset(&wol->sopass, 0, sizeof(wol->sopass));
5242}
5243
5244static int
5245bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5246{
Michael Chan972ec0d2006-01-23 16:12:43 -08005247 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005248
5249 if (wol->wolopts & ~WAKE_MAGIC)
5250 return -EINVAL;
5251
5252 if (wol->wolopts & WAKE_MAGIC) {
5253 if (bp->flags & NO_WOL_FLAG)
5254 return -EINVAL;
5255
5256 bp->wol = 1;
5257 }
5258 else {
5259 bp->wol = 0;
5260 }
5261 return 0;
5262}
5263
5264static int
5265bnx2_nway_reset(struct net_device *dev)
5266{
Michael Chan972ec0d2006-01-23 16:12:43 -08005267 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005268 u32 bmcr;
5269
5270 if (!(bp->autoneg & AUTONEG_SPEED)) {
5271 return -EINVAL;
5272 }
5273
Michael Chanc770a652005-08-25 15:38:39 -07005274 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005275
5276 /* Force a link down visible on the other side */
5277 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005278 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005279 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005280
5281 msleep(20);
5282
Michael Chanc770a652005-08-25 15:38:39 -07005283 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005284
5285 bp->current_interval = SERDES_AN_TIMEOUT;
5286 bp->serdes_an_pending = 1;
5287 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005288 }
5289
Michael Chanca58c3a2007-05-03 13:22:52 -07005290 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005291 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005292 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005293
Michael Chanc770a652005-08-25 15:38:39 -07005294 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005295
5296 return 0;
5297}
5298
5299static int
5300bnx2_get_eeprom_len(struct net_device *dev)
5301{
Michael Chan972ec0d2006-01-23 16:12:43 -08005302 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005303
Michael Chan1122db72006-01-23 16:11:42 -08005304 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005305 return 0;
5306
Michael Chan1122db72006-01-23 16:11:42 -08005307 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005308}
5309
5310static int
5311bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5312 u8 *eebuf)
5313{
Michael Chan972ec0d2006-01-23 16:12:43 -08005314 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005315 int rc;
5316
John W. Linville1064e942005-11-10 12:58:24 -08005317 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005318
5319 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5320
5321 return rc;
5322}
5323
5324static int
5325bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5326 u8 *eebuf)
5327{
Michael Chan972ec0d2006-01-23 16:12:43 -08005328 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005329 int rc;
5330
John W. Linville1064e942005-11-10 12:58:24 -08005331 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005332
5333 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5334
5335 return rc;
5336}
5337
5338static int
5339bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5340{
Michael Chan972ec0d2006-01-23 16:12:43 -08005341 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005342
5343 memset(coal, 0, sizeof(struct ethtool_coalesce));
5344
5345 coal->rx_coalesce_usecs = bp->rx_ticks;
5346 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5347 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5348 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5349
5350 coal->tx_coalesce_usecs = bp->tx_ticks;
5351 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5352 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5353 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5354
5355 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5356
5357 return 0;
5358}
5359
5360static int
5361bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5362{
Michael Chan972ec0d2006-01-23 16:12:43 -08005363 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005364
5365 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5366 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5367
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005368 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005369 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5370
5371 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5372 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5373
5374 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5375 if (bp->rx_quick_cons_trip_int > 0xff)
5376 bp->rx_quick_cons_trip_int = 0xff;
5377
5378 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5379 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5380
5381 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5382 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5383
5384 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5385 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5386
5387 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5388 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5389 0xff;
5390
5391 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5392 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5393 bp->stats_ticks &= 0xffff00;
5394
5395 if (netif_running(bp->dev)) {
5396 bnx2_netif_stop(bp);
5397 bnx2_init_nic(bp);
5398 bnx2_netif_start(bp);
5399 }
5400
5401 return 0;
5402}
5403
5404static void
5405bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5406{
Michael Chan972ec0d2006-01-23 16:12:43 -08005407 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005408
Michael Chan13daffa2006-03-20 17:49:20 -08005409 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005410 ering->rx_mini_max_pending = 0;
5411 ering->rx_jumbo_max_pending = 0;
5412
5413 ering->rx_pending = bp->rx_ring_size;
5414 ering->rx_mini_pending = 0;
5415 ering->rx_jumbo_pending = 0;
5416
5417 ering->tx_max_pending = MAX_TX_DESC_CNT;
5418 ering->tx_pending = bp->tx_ring_size;
5419}
5420
5421static int
5422bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5423{
Michael Chan972ec0d2006-01-23 16:12:43 -08005424 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005425
Michael Chan13daffa2006-03-20 17:49:20 -08005426 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005427 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5428 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5429
5430 return -EINVAL;
5431 }
Michael Chan13daffa2006-03-20 17:49:20 -08005432 if (netif_running(bp->dev)) {
5433 bnx2_netif_stop(bp);
5434 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5435 bnx2_free_skbs(bp);
5436 bnx2_free_mem(bp);
5437 }
5438
5439 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005440 bp->tx_ring_size = ering->tx_pending;
5441
5442 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005443 int rc;
5444
5445 rc = bnx2_alloc_mem(bp);
5446 if (rc)
5447 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005448 bnx2_init_nic(bp);
5449 bnx2_netif_start(bp);
5450 }
5451
5452 return 0;
5453}
5454
5455static void
5456bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5457{
Michael Chan972ec0d2006-01-23 16:12:43 -08005458 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005459
5460 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5461 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5462 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5463}
5464
5465static int
5466bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5467{
Michael Chan972ec0d2006-01-23 16:12:43 -08005468 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005469
5470 bp->req_flow_ctrl = 0;
5471 if (epause->rx_pause)
5472 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5473 if (epause->tx_pause)
5474 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5475
5476 if (epause->autoneg) {
5477 bp->autoneg |= AUTONEG_FLOW_CTRL;
5478 }
5479 else {
5480 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5481 }
5482
Michael Chanc770a652005-08-25 15:38:39 -07005483 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005484
5485 bnx2_setup_phy(bp);
5486
Michael Chanc770a652005-08-25 15:38:39 -07005487 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005488
5489 return 0;
5490}
5491
5492static u32
5493bnx2_get_rx_csum(struct net_device *dev)
5494{
Michael Chan972ec0d2006-01-23 16:12:43 -08005495 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005496
5497 return bp->rx_csum;
5498}
5499
5500static int
5501bnx2_set_rx_csum(struct net_device *dev, u32 data)
5502{
Michael Chan972ec0d2006-01-23 16:12:43 -08005503 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005504
5505 bp->rx_csum = data;
5506 return 0;
5507}
5508
Michael Chanb11d6212006-06-29 12:31:21 -07005509static int
5510bnx2_set_tso(struct net_device *dev, u32 data)
5511{
Michael Chan4666f872007-05-03 13:22:28 -07005512 struct bnx2 *bp = netdev_priv(dev);
5513
5514 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005515 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005516 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5517 dev->features |= NETIF_F_TSO6;
5518 } else
5519 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5520 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005521 return 0;
5522}
5523
Michael Chancea94db2006-06-12 22:16:13 -07005524#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005525
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005526static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005527 char string[ETH_GSTRING_LEN];
5528} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5529 { "rx_bytes" },
5530 { "rx_error_bytes" },
5531 { "tx_bytes" },
5532 { "tx_error_bytes" },
5533 { "rx_ucast_packets" },
5534 { "rx_mcast_packets" },
5535 { "rx_bcast_packets" },
5536 { "tx_ucast_packets" },
5537 { "tx_mcast_packets" },
5538 { "tx_bcast_packets" },
5539 { "tx_mac_errors" },
5540 { "tx_carrier_errors" },
5541 { "rx_crc_errors" },
5542 { "rx_align_errors" },
5543 { "tx_single_collisions" },
5544 { "tx_multi_collisions" },
5545 { "tx_deferred" },
5546 { "tx_excess_collisions" },
5547 { "tx_late_collisions" },
5548 { "tx_total_collisions" },
5549 { "rx_fragments" },
5550 { "rx_jabbers" },
5551 { "rx_undersize_packets" },
5552 { "rx_oversize_packets" },
5553 { "rx_64_byte_packets" },
5554 { "rx_65_to_127_byte_packets" },
5555 { "rx_128_to_255_byte_packets" },
5556 { "rx_256_to_511_byte_packets" },
5557 { "rx_512_to_1023_byte_packets" },
5558 { "rx_1024_to_1522_byte_packets" },
5559 { "rx_1523_to_9022_byte_packets" },
5560 { "tx_64_byte_packets" },
5561 { "tx_65_to_127_byte_packets" },
5562 { "tx_128_to_255_byte_packets" },
5563 { "tx_256_to_511_byte_packets" },
5564 { "tx_512_to_1023_byte_packets" },
5565 { "tx_1024_to_1522_byte_packets" },
5566 { "tx_1523_to_9022_byte_packets" },
5567 { "rx_xon_frames" },
5568 { "rx_xoff_frames" },
5569 { "tx_xon_frames" },
5570 { "tx_xoff_frames" },
5571 { "rx_mac_ctrl_frames" },
5572 { "rx_filtered_packets" },
5573 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005574 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005575};
5576
5577#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5578
Arjan van de Venf71e1302006-03-03 21:33:57 -05005579static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005580 STATS_OFFSET32(stat_IfHCInOctets_hi),
5581 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5582 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5583 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5584 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5585 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5586 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5587 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5588 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5589 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5590 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005591 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5592 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5593 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5594 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5595 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5596 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5597 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5598 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5599 STATS_OFFSET32(stat_EtherStatsCollisions),
5600 STATS_OFFSET32(stat_EtherStatsFragments),
5601 STATS_OFFSET32(stat_EtherStatsJabbers),
5602 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5603 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5604 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5605 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5606 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5607 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5608 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5609 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5610 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5611 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5612 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5613 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5614 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5615 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5616 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5617 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5618 STATS_OFFSET32(stat_XonPauseFramesReceived),
5619 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5620 STATS_OFFSET32(stat_OutXonSent),
5621 STATS_OFFSET32(stat_OutXoffSent),
5622 STATS_OFFSET32(stat_MacControlFramesReceived),
5623 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5624 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005625 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005626};
5627
5628/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5629 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005630 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005631static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005632 8,0,8,8,8,8,8,8,8,8,
5633 4,0,4,4,4,4,4,4,4,4,
5634 4,4,4,4,4,4,4,4,4,4,
5635 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005636 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005637};
5638
Michael Chan5b0c76a2005-11-04 08:45:49 -08005639static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5640 8,0,8,8,8,8,8,8,8,8,
5641 4,4,4,4,4,4,4,4,4,4,
5642 4,4,4,4,4,4,4,4,4,4,
5643 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005644 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005645};
5646
Michael Chanb6016b72005-05-26 13:03:09 -07005647#define BNX2_NUM_TESTS 6
5648
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005649static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005650 char string[ETH_GSTRING_LEN];
5651} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5652 { "register_test (offline)" },
5653 { "memory_test (offline)" },
5654 { "loopback_test (offline)" },
5655 { "nvram_test (online)" },
5656 { "interrupt_test (online)" },
5657 { "link_test (online)" },
5658};
5659
5660static int
5661bnx2_self_test_count(struct net_device *dev)
5662{
5663 return BNX2_NUM_TESTS;
5664}
5665
5666static void
5667bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5668{
Michael Chan972ec0d2006-01-23 16:12:43 -08005669 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005670
5671 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5672 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005673 int i;
5674
Michael Chanb6016b72005-05-26 13:03:09 -07005675 bnx2_netif_stop(bp);
5676 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5677 bnx2_free_skbs(bp);
5678
5679 if (bnx2_test_registers(bp) != 0) {
5680 buf[0] = 1;
5681 etest->flags |= ETH_TEST_FL_FAILED;
5682 }
5683 if (bnx2_test_memory(bp) != 0) {
5684 buf[1] = 1;
5685 etest->flags |= ETH_TEST_FL_FAILED;
5686 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005687 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005688 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005689
5690 if (!netif_running(bp->dev)) {
5691 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5692 }
5693 else {
5694 bnx2_init_nic(bp);
5695 bnx2_netif_start(bp);
5696 }
5697
5698 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005699 for (i = 0; i < 7; i++) {
5700 if (bp->link_up)
5701 break;
5702 msleep_interruptible(1000);
5703 }
Michael Chanb6016b72005-05-26 13:03:09 -07005704 }
5705
5706 if (bnx2_test_nvram(bp) != 0) {
5707 buf[3] = 1;
5708 etest->flags |= ETH_TEST_FL_FAILED;
5709 }
5710 if (bnx2_test_intr(bp) != 0) {
5711 buf[4] = 1;
5712 etest->flags |= ETH_TEST_FL_FAILED;
5713 }
5714
5715 if (bnx2_test_link(bp) != 0) {
5716 buf[5] = 1;
5717 etest->flags |= ETH_TEST_FL_FAILED;
5718
5719 }
5720}
5721
5722static void
5723bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5724{
5725 switch (stringset) {
5726 case ETH_SS_STATS:
5727 memcpy(buf, bnx2_stats_str_arr,
5728 sizeof(bnx2_stats_str_arr));
5729 break;
5730 case ETH_SS_TEST:
5731 memcpy(buf, bnx2_tests_str_arr,
5732 sizeof(bnx2_tests_str_arr));
5733 break;
5734 }
5735}
5736
5737static int
5738bnx2_get_stats_count(struct net_device *dev)
5739{
5740 return BNX2_NUM_STATS;
5741}
5742
5743static void
5744bnx2_get_ethtool_stats(struct net_device *dev,
5745 struct ethtool_stats *stats, u64 *buf)
5746{
Michael Chan972ec0d2006-01-23 16:12:43 -08005747 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005748 int i;
5749 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005750 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005751
5752 if (hw_stats == NULL) {
5753 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5754 return;
5755 }
5756
Michael Chan5b0c76a2005-11-04 08:45:49 -08005757 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5758 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5759 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5760 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005761 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005762 else
5763 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005764
5765 for (i = 0; i < BNX2_NUM_STATS; i++) {
5766 if (stats_len_arr[i] == 0) {
5767 /* skip this counter */
5768 buf[i] = 0;
5769 continue;
5770 }
5771 if (stats_len_arr[i] == 4) {
5772 /* 4-byte counter */
5773 buf[i] = (u64)
5774 *(hw_stats + bnx2_stats_offset_arr[i]);
5775 continue;
5776 }
5777 /* 8-byte counter */
5778 buf[i] = (((u64) *(hw_stats +
5779 bnx2_stats_offset_arr[i])) << 32) +
5780 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5781 }
5782}
5783
5784static int
5785bnx2_phys_id(struct net_device *dev, u32 data)
5786{
Michael Chan972ec0d2006-01-23 16:12:43 -08005787 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005788 int i;
5789 u32 save;
5790
5791 if (data == 0)
5792 data = 2;
5793
5794 save = REG_RD(bp, BNX2_MISC_CFG);
5795 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5796
5797 for (i = 0; i < (data * 2); i++) {
5798 if ((i % 2) == 0) {
5799 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5800 }
5801 else {
5802 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5803 BNX2_EMAC_LED_1000MB_OVERRIDE |
5804 BNX2_EMAC_LED_100MB_OVERRIDE |
5805 BNX2_EMAC_LED_10MB_OVERRIDE |
5806 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5807 BNX2_EMAC_LED_TRAFFIC);
5808 }
5809 msleep_interruptible(500);
5810 if (signal_pending(current))
5811 break;
5812 }
5813 REG_WR(bp, BNX2_EMAC_LED, 0);
5814 REG_WR(bp, BNX2_MISC_CFG, save);
5815 return 0;
5816}
5817
Michael Chan4666f872007-05-03 13:22:28 -07005818static int
5819bnx2_set_tx_csum(struct net_device *dev, u32 data)
5820{
5821 struct bnx2 *bp = netdev_priv(dev);
5822
5823 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5824 return (ethtool_op_set_tx_hw_csum(dev, data));
5825 else
5826 return (ethtool_op_set_tx_csum(dev, data));
5827}
5828
Jeff Garzik7282d492006-09-13 14:30:00 -04005829static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005830 .get_settings = bnx2_get_settings,
5831 .set_settings = bnx2_set_settings,
5832 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005833 .get_regs_len = bnx2_get_regs_len,
5834 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005835 .get_wol = bnx2_get_wol,
5836 .set_wol = bnx2_set_wol,
5837 .nway_reset = bnx2_nway_reset,
5838 .get_link = ethtool_op_get_link,
5839 .get_eeprom_len = bnx2_get_eeprom_len,
5840 .get_eeprom = bnx2_get_eeprom,
5841 .set_eeprom = bnx2_set_eeprom,
5842 .get_coalesce = bnx2_get_coalesce,
5843 .set_coalesce = bnx2_set_coalesce,
5844 .get_ringparam = bnx2_get_ringparam,
5845 .set_ringparam = bnx2_set_ringparam,
5846 .get_pauseparam = bnx2_get_pauseparam,
5847 .set_pauseparam = bnx2_set_pauseparam,
5848 .get_rx_csum = bnx2_get_rx_csum,
5849 .set_rx_csum = bnx2_set_rx_csum,
5850 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005851 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005852 .get_sg = ethtool_op_get_sg,
5853 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005854 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005855 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005856 .self_test_count = bnx2_self_test_count,
5857 .self_test = bnx2_self_test,
5858 .get_strings = bnx2_get_strings,
5859 .phys_id = bnx2_phys_id,
5860 .get_stats_count = bnx2_get_stats_count,
5861 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005862 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005863};
5864
5865/* Called with rtnl_lock */
5866static int
5867bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5868{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005869 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005870 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005871 int err;
5872
5873 switch(cmd) {
5874 case SIOCGMIIPHY:
5875 data->phy_id = bp->phy_addr;
5876
5877 /* fallthru */
5878 case SIOCGMIIREG: {
5879 u32 mii_regval;
5880
Michael Chandad3e452007-05-03 13:18:03 -07005881 if (!netif_running(dev))
5882 return -EAGAIN;
5883
Michael Chanc770a652005-08-25 15:38:39 -07005884 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005885 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005886 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005887
5888 data->val_out = mii_regval;
5889
5890 return err;
5891 }
5892
5893 case SIOCSMIIREG:
5894 if (!capable(CAP_NET_ADMIN))
5895 return -EPERM;
5896
Michael Chandad3e452007-05-03 13:18:03 -07005897 if (!netif_running(dev))
5898 return -EAGAIN;
5899
Michael Chanc770a652005-08-25 15:38:39 -07005900 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005901 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005902 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005903
5904 return err;
5905
5906 default:
5907 /* do nothing */
5908 break;
5909 }
5910 return -EOPNOTSUPP;
5911}
5912
5913/* Called with rtnl_lock */
5914static int
5915bnx2_change_mac_addr(struct net_device *dev, void *p)
5916{
5917 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005918 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005919
Michael Chan73eef4c2005-08-25 15:39:15 -07005920 if (!is_valid_ether_addr(addr->sa_data))
5921 return -EINVAL;
5922
Michael Chanb6016b72005-05-26 13:03:09 -07005923 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5924 if (netif_running(dev))
5925 bnx2_set_mac_addr(bp);
5926
5927 return 0;
5928}
5929
5930/* Called with rtnl_lock */
5931static int
5932bnx2_change_mtu(struct net_device *dev, int new_mtu)
5933{
Michael Chan972ec0d2006-01-23 16:12:43 -08005934 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005935
5936 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5937 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5938 return -EINVAL;
5939
5940 dev->mtu = new_mtu;
5941 if (netif_running(dev)) {
5942 bnx2_netif_stop(bp);
5943
5944 bnx2_init_nic(bp);
5945
5946 bnx2_netif_start(bp);
5947 }
5948 return 0;
5949}
5950
5951#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5952static void
5953poll_bnx2(struct net_device *dev)
5954{
Michael Chan972ec0d2006-01-23 16:12:43 -08005955 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005956
5957 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005958 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005959 enable_irq(bp->pdev->irq);
5960}
5961#endif
5962
Michael Chan253c8b72007-01-08 19:56:01 -08005963static void __devinit
5964bnx2_get_5709_media(struct bnx2 *bp)
5965{
5966 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5967 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5968 u32 strap;
5969
5970 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5971 return;
5972 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5973 bp->phy_flags |= PHY_SERDES_FLAG;
5974 return;
5975 }
5976
5977 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5978 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5979 else
5980 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5981
5982 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5983 switch (strap) {
5984 case 0x4:
5985 case 0x5:
5986 case 0x6:
5987 bp->phy_flags |= PHY_SERDES_FLAG;
5988 return;
5989 }
5990 } else {
5991 switch (strap) {
5992 case 0x1:
5993 case 0x2:
5994 case 0x4:
5995 bp->phy_flags |= PHY_SERDES_FLAG;
5996 return;
5997 }
5998 }
5999}
6000
Michael Chanb6016b72005-05-26 13:03:09 -07006001static int __devinit
6002bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6003{
6004 struct bnx2 *bp;
6005 unsigned long mem_len;
6006 int rc;
6007 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006008 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006009
6010 SET_MODULE_OWNER(dev);
6011 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006012 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006013
6014 bp->flags = 0;
6015 bp->phy_flags = 0;
6016
6017 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6018 rc = pci_enable_device(pdev);
6019 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006020 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006021 goto err_out;
6022 }
6023
6024 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006025 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006026 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006027 rc = -ENODEV;
6028 goto err_out_disable;
6029 }
6030
6031 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6032 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006033 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006034 goto err_out_disable;
6035 }
6036
6037 pci_set_master(pdev);
6038
6039 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6040 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006041 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006042 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006043 rc = -EIO;
6044 goto err_out_release;
6045 }
6046
Michael Chanb6016b72005-05-26 13:03:09 -07006047 bp->dev = dev;
6048 bp->pdev = pdev;
6049
6050 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006051 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006052 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006053
6054 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006055 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006056 dev->mem_end = dev->mem_start + mem_len;
6057 dev->irq = pdev->irq;
6058
6059 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6060
6061 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006062 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006063 rc = -ENOMEM;
6064 goto err_out_release;
6065 }
6066
6067 /* Configure byte swap and enable write to the reg_window registers.
6068 * Rely on CPU to do target byte swapping on big endian systems
6069 * The chip's target access swapping will not swap all accesses
6070 */
6071 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6072 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6073 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6074
Pavel Machek829ca9a2005-09-03 15:56:56 -07006075 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006076
6077 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6078
Michael Chan59b47d82006-11-19 14:10:45 -08006079 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6080 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6081 if (bp->pcix_cap == 0) {
6082 dev_err(&pdev->dev,
6083 "Cannot find PCIX capability, aborting.\n");
6084 rc = -EIO;
6085 goto err_out_unmap;
6086 }
6087 }
6088
Michael Chan40453c82007-05-03 13:19:18 -07006089 /* 5708 cannot support DMA addresses > 40-bit. */
6090 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6091 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6092 else
6093 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6094
6095 /* Configure DMA attributes. */
6096 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6097 dev->features |= NETIF_F_HIGHDMA;
6098 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6099 if (rc) {
6100 dev_err(&pdev->dev,
6101 "pci_set_consistent_dma_mask failed, aborting.\n");
6102 goto err_out_unmap;
6103 }
6104 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6105 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6106 goto err_out_unmap;
6107 }
6108
Michael Chanb6016b72005-05-26 13:03:09 -07006109 /* Get bus information. */
6110 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6111 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6112 u32 clkreg;
6113
6114 bp->flags |= PCIX_FLAG;
6115
6116 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006117
Michael Chanb6016b72005-05-26 13:03:09 -07006118 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6119 switch (clkreg) {
6120 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6121 bp->bus_speed_mhz = 133;
6122 break;
6123
6124 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6125 bp->bus_speed_mhz = 100;
6126 break;
6127
6128 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6129 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6130 bp->bus_speed_mhz = 66;
6131 break;
6132
6133 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6134 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6135 bp->bus_speed_mhz = 50;
6136 break;
6137
6138 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6139 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6140 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6141 bp->bus_speed_mhz = 33;
6142 break;
6143 }
6144 }
6145 else {
6146 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6147 bp->bus_speed_mhz = 66;
6148 else
6149 bp->bus_speed_mhz = 33;
6150 }
6151
6152 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6153 bp->flags |= PCI_32BIT_FLAG;
6154
6155 /* 5706A0 may falsely detect SERR and PERR. */
6156 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6157 reg = REG_RD(bp, PCI_COMMAND);
6158 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6159 REG_WR(bp, PCI_COMMAND, reg);
6160 }
6161 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6162 !(bp->flags & PCIX_FLAG)) {
6163
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006164 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006165 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006166 goto err_out_unmap;
6167 }
6168
6169 bnx2_init_nvram(bp);
6170
Michael Chane3648b32005-11-04 08:51:21 -08006171 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6172
6173 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006174 BNX2_SHM_HDR_SIGNATURE_SIG) {
6175 u32 off = PCI_FUNC(pdev->devfn) << 2;
6176
6177 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6178 } else
Michael Chane3648b32005-11-04 08:51:21 -08006179 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6180
Michael Chanb6016b72005-05-26 13:03:09 -07006181 /* Get the permanent MAC address. First we need to make sure the
6182 * firmware is actually running.
6183 */
Michael Chane3648b32005-11-04 08:51:21 -08006184 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006185
6186 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6187 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006188 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006189 rc = -ENODEV;
6190 goto err_out_unmap;
6191 }
6192
Michael Chane3648b32005-11-04 08:51:21 -08006193 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006194
Michael Chane3648b32005-11-04 08:51:21 -08006195 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006196 bp->mac_addr[0] = (u8) (reg >> 8);
6197 bp->mac_addr[1] = (u8) reg;
6198
Michael Chane3648b32005-11-04 08:51:21 -08006199 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006200 bp->mac_addr[2] = (u8) (reg >> 24);
6201 bp->mac_addr[3] = (u8) (reg >> 16);
6202 bp->mac_addr[4] = (u8) (reg >> 8);
6203 bp->mac_addr[5] = (u8) reg;
6204
6205 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006206 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006207
6208 bp->rx_csum = 1;
6209
6210 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6211
6212 bp->tx_quick_cons_trip_int = 20;
6213 bp->tx_quick_cons_trip = 20;
6214 bp->tx_ticks_int = 80;
6215 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006216
Michael Chanb6016b72005-05-26 13:03:09 -07006217 bp->rx_quick_cons_trip_int = 6;
6218 bp->rx_quick_cons_trip = 6;
6219 bp->rx_ticks_int = 18;
6220 bp->rx_ticks = 18;
6221
6222 bp->stats_ticks = 1000000 & 0xffff00;
6223
6224 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006225 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006226
Michael Chan5b0c76a2005-11-04 08:45:49 -08006227 bp->phy_addr = 1;
6228
Michael Chanb6016b72005-05-26 13:03:09 -07006229 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006230 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6231 bnx2_get_5709_media(bp);
6232 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006233 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006234
6235 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07006236 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006237 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006238 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006239 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006240 BNX2_SHARED_HW_CFG_CONFIG);
6241 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6242 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6243 }
Michael Chan261dd5c2007-01-08 19:55:46 -08006244 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6245 CHIP_NUM(bp) == CHIP_NUM_5708)
6246 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006247 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6248 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006249
Michael Chan16088272006-06-12 22:16:43 -07006250 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6251 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6252 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006253 bp->flags |= NO_WOL_FLAG;
6254
Michael Chanb6016b72005-05-26 13:03:09 -07006255 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6256 bp->tx_quick_cons_trip_int =
6257 bp->tx_quick_cons_trip;
6258 bp->tx_ticks_int = bp->tx_ticks;
6259 bp->rx_quick_cons_trip_int =
6260 bp->rx_quick_cons_trip;
6261 bp->rx_ticks_int = bp->rx_ticks;
6262 bp->comp_prod_trip_int = bp->comp_prod_trip;
6263 bp->com_ticks_int = bp->com_ticks;
6264 bp->cmd_ticks_int = bp->cmd_ticks;
6265 }
6266
Michael Chanf9317a42006-09-29 17:06:23 -07006267 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6268 *
6269 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6270 * with byte enables disabled on the unused 32-bit word. This is legal
6271 * but causes problems on the AMD 8132 which will eventually stop
6272 * responding after a while.
6273 *
6274 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006275 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006276 */
6277 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6278 struct pci_dev *amd_8132 = NULL;
6279
6280 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6281 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6282 amd_8132))) {
6283 u8 rev;
6284
6285 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6286 if (rev >= 0x10 && rev <= 0x13) {
6287 disable_msi = 1;
6288 pci_dev_put(amd_8132);
6289 break;
6290 }
6291 }
6292 }
6293
Michael Chanb6016b72005-05-26 13:03:09 -07006294 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6295 bp->req_line_speed = 0;
6296 if (bp->phy_flags & PHY_SERDES_FLAG) {
6297 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006298
Michael Chane3648b32005-11-04 08:51:21 -08006299 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006300 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6301 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6302 bp->autoneg = 0;
6303 bp->req_line_speed = bp->line_speed = SPEED_1000;
6304 bp->req_duplex = DUPLEX_FULL;
6305 }
Michael Chanb6016b72005-05-26 13:03:09 -07006306 }
6307 else {
6308 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6309 }
6310
6311 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6312
Michael Chancd339a02005-08-25 15:35:24 -07006313 init_timer(&bp->timer);
6314 bp->timer.expires = RUN_AT(bp->timer_interval);
6315 bp->timer.data = (unsigned long) bp;
6316 bp->timer.function = bnx2_timer;
6317
Michael Chanb6016b72005-05-26 13:03:09 -07006318 return 0;
6319
6320err_out_unmap:
6321 if (bp->regview) {
6322 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006323 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006324 }
6325
6326err_out_release:
6327 pci_release_regions(pdev);
6328
6329err_out_disable:
6330 pci_disable_device(pdev);
6331 pci_set_drvdata(pdev, NULL);
6332
6333err_out:
6334 return rc;
6335}
6336
6337static int __devinit
6338bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6339{
6340 static int version_printed = 0;
6341 struct net_device *dev = NULL;
6342 struct bnx2 *bp;
6343 int rc, i;
6344
6345 if (version_printed++ == 0)
6346 printk(KERN_INFO "%s", version);
6347
6348 /* dev zeroed in init_etherdev */
6349 dev = alloc_etherdev(sizeof(*bp));
6350
6351 if (!dev)
6352 return -ENOMEM;
6353
6354 rc = bnx2_init_board(pdev, dev);
6355 if (rc < 0) {
6356 free_netdev(dev);
6357 return rc;
6358 }
6359
6360 dev->open = bnx2_open;
6361 dev->hard_start_xmit = bnx2_start_xmit;
6362 dev->stop = bnx2_close;
6363 dev->get_stats = bnx2_get_stats;
6364 dev->set_multicast_list = bnx2_set_rx_mode;
6365 dev->do_ioctl = bnx2_ioctl;
6366 dev->set_mac_address = bnx2_change_mac_addr;
6367 dev->change_mtu = bnx2_change_mtu;
6368 dev->tx_timeout = bnx2_tx_timeout;
6369 dev->watchdog_timeo = TX_TIMEOUT;
6370#ifdef BCM_VLAN
6371 dev->vlan_rx_register = bnx2_vlan_rx_register;
6372 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6373#endif
6374 dev->poll = bnx2_poll;
6375 dev->ethtool_ops = &bnx2_ethtool_ops;
6376 dev->weight = 64;
6377
Michael Chan972ec0d2006-01-23 16:12:43 -08006378 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006379
6380#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6381 dev->poll_controller = poll_bnx2;
6382#endif
6383
Michael Chan1b2f9222007-05-03 13:20:19 -07006384 pci_set_drvdata(pdev, dev);
6385
6386 memcpy(dev->dev_addr, bp->mac_addr, 6);
6387 memcpy(dev->perm_addr, bp->mac_addr, 6);
6388 bp->name = board_info[ent->driver_data].name;
6389
Michael Chan4666f872007-05-03 13:22:28 -07006390 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6391 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6392 else
6393 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan1b2f9222007-05-03 13:20:19 -07006394#ifdef BCM_VLAN
6395 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6396#endif
6397 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006398 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6399 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006400
Michael Chanb6016b72005-05-26 13:03:09 -07006401 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006402 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006403 if (bp->regview)
6404 iounmap(bp->regview);
6405 pci_release_regions(pdev);
6406 pci_disable_device(pdev);
6407 pci_set_drvdata(pdev, NULL);
6408 free_netdev(dev);
6409 return rc;
6410 }
6411
Michael Chanb6016b72005-05-26 13:03:09 -07006412 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6413 "IRQ %d, ",
6414 dev->name,
6415 bp->name,
6416 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6417 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6418 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6419 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6420 bp->bus_speed_mhz,
6421 dev->base_addr,
6422 bp->pdev->irq);
6423
6424 printk("node addr ");
6425 for (i = 0; i < 6; i++)
6426 printk("%2.2x", dev->dev_addr[i]);
6427 printk("\n");
6428
Michael Chanb6016b72005-05-26 13:03:09 -07006429 return 0;
6430}
6431
6432static void __devexit
6433bnx2_remove_one(struct pci_dev *pdev)
6434{
6435 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006436 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006437
Michael Chanafdc08b2005-08-25 15:34:29 -07006438 flush_scheduled_work();
6439
Michael Chanb6016b72005-05-26 13:03:09 -07006440 unregister_netdev(dev);
6441
6442 if (bp->regview)
6443 iounmap(bp->regview);
6444
6445 free_netdev(dev);
6446 pci_release_regions(pdev);
6447 pci_disable_device(pdev);
6448 pci_set_drvdata(pdev, NULL);
6449}
6450
6451static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006452bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006453{
6454 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006455 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006456 u32 reset_code;
6457
6458 if (!netif_running(dev))
6459 return 0;
6460
Michael Chan1d60290f2006-03-20 17:50:08 -08006461 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006462 bnx2_netif_stop(bp);
6463 netif_device_detach(dev);
6464 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006465 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006466 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006467 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006468 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6469 else
6470 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6471 bnx2_reset_chip(bp, reset_code);
6472 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006473 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006474 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006475 return 0;
6476}
6477
6478static int
6479bnx2_resume(struct pci_dev *pdev)
6480{
6481 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006482 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006483
6484 if (!netif_running(dev))
6485 return 0;
6486
Michael Chan30c517b2007-05-03 13:20:40 -07006487 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006488 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006489 netif_device_attach(dev);
6490 bnx2_init_nic(bp);
6491 bnx2_netif_start(bp);
6492 return 0;
6493}
6494
6495static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006496 .name = DRV_MODULE_NAME,
6497 .id_table = bnx2_pci_tbl,
6498 .probe = bnx2_init_one,
6499 .remove = __devexit_p(bnx2_remove_one),
6500 .suspend = bnx2_suspend,
6501 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006502};
6503
6504static int __init bnx2_init(void)
6505{
Jeff Garzik29917622006-08-19 17:48:59 -04006506 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006507}
6508
6509static void __exit bnx2_cleanup(void)
6510{
6511 pci_unregister_driver(&bnx2_pci_driver);
6512}
6513
6514module_init(bnx2_init);
6515module_exit(bnx2_cleanup);
6516
6517
6518