blob: 97ed4006a3459ad854adab8b3525f3e28cb5a952 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan68c9f752007-04-24 15:35:53 -070057#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
553 if (bp->link_up) {
554 u32 bmsr;
555
556 switch (bp->line_speed) {
557 case SPEED_10:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
562 break;
563 case SPEED_100:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
568 break;
569 case SPEED_1000:
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572 else
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574 break;
575 case SPEED_2500:
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578 else
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580 break;
581 }
582
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585 if (bp->autoneg) {
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
Michael Chanca58c3a2007-05-03 13:22:52 -0700588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800590
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594 else
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596 }
597 }
598 else
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
604static void
Michael Chanb6016b72005-05-26 13:03:09 -0700605bnx2_report_link(struct bnx2 *bp)
606{
607 if (bp->link_up) {
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611 printk("%d Mbps ", bp->line_speed);
612
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
615 else
616 printk("half duplex");
617
618 if (bp->flow_ctrl) {
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
623 }
624 else {
625 printk(", transmit ");
626 }
627 printk("flow control ON");
628 }
629 printk("\n");
630 }
631 else {
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634 }
Michael Chane3648b32005-11-04 08:51:21 -0800635
636 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642 u32 local_adv, remote_adv;
643
644 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
650 }
651 return;
652 }
653
654 if (bp->duplex != DUPLEX_FULL) {
655 return;
656 }
657
Michael Chan5b0c76a2005-11-04 08:45:49 -0800658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660 u32 val;
661
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
667 return;
668 }
669
Michael Chanca58c3a2007-05-03 13:22:52 -0700670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700672
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
676
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
688 }
689
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695 }
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
698 }
699 }
700 else {
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703 }
704 }
705 }
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710 bp->flow_ctrl = FLOW_CTRL_TX;
711 }
712 }
713}
714
715static int
Michael Chan27a005b2007-05-03 13:23:41 -0700716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718 u32 val, speed;
719
720 bp->link_up = 1;
721
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
729 return 0;
730 }
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732 switch (speed) {
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
735 break;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
742 break;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
745 break;
746 }
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
749 else
750 bp->duplex = DUPLEX_HALF;
751 return 0;
752}
753
754static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757 u32 val;
758
759 bp->link_up = 1;
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
764 break;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
773 break;
774 }
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
777 else
778 bp->duplex = DUPLEX_HALF;
779
780 return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700785{
786 u32 bmcr, local_adv, remote_adv, common;
787
788 bp->link_up = 1;
789 bp->line_speed = SPEED_1000;
790
Michael Chanca58c3a2007-05-03 13:22:52 -0700791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
794 }
795 else {
796 bp->duplex = DUPLEX_HALF;
797 }
798
799 if (!(bmcr & BMCR_ANENABLE)) {
800 return 0;
801 }
802
Michael Chanca58c3a2007-05-03 13:22:52 -0700803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700805
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
811 }
812 else {
813 bp->duplex = DUPLEX_HALF;
814 }
815 }
816
817 return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823 u32 bmcr;
824
Michael Chanca58c3a2007-05-03 13:22:52 -0700825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
828
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
836 }
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
840 }
841 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700844
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
857 }
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
861 }
862 else {
863 bp->line_speed = 0;
864 bp->link_up = 0;
865 }
866 }
867 }
868 else {
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
871 }
872 else {
873 bp->line_speed = SPEED_10;
874 }
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
877 }
878 else {
879 bp->duplex = DUPLEX_HALF;
880 }
881 }
882
883 return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889 u32 val;
890
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895 }
896
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
899
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800902 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700903
904 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800905 switch (bp->line_speed) {
906 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800909 break;
910 }
911 /* fall through */
912 case SPEED_100:
913 val |= BNX2_EMAC_MODE_PORT_MII;
914 break;
915 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800916 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800917 /* fall through */
918 case SPEED_1000:
919 val |= BNX2_EMAC_MODE_PORT_GMII;
920 break;
921 }
Michael Chanb6016b72005-05-26 13:03:09 -0700922 }
923 else {
924 val |= BNX2_EMAC_MODE_PORT_GMII;
925 }
926
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
931
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950 return 0;
951}
952
Michael Chan27a005b2007-05-03 13:23:41 -0700953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
Michael Chanb6016b72005-05-26 13:03:09 -0700971static int
Michael Chan605a9e22007-05-03 13:23:13 -0700972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974 u32 up1;
975 int ret = 1;
976
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978 return 0;
979
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
982
Michael Chan27a005b2007-05-03 13:23:41 -0700983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
Michael Chan605a9e22007-05-03 13:23:13 -0700986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
990 ret = 0;
991 }
992
Michael Chan27a005b2007-05-03 13:23:41 -0700993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
Michael Chan605a9e22007-05-03 13:23:13 -0700997 return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003 u32 up1;
1004 int ret = 0;
1005
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007 return 0;
1008
Michael Chan27a005b2007-05-03 13:23:41 -07001009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
Michael Chan605a9e22007-05-03 13:23:13 -07001012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 1;
1017 }
1018
Michael Chan27a005b2007-05-03 13:23:41 -07001019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
Michael Chan605a9e22007-05-03 13:23:13 -07001023 return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029 u32 bmcr;
1030
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032 return;
1033
Michael Chan27a005b2007-05-03 13:23:41 -07001034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035 u32 val;
1036
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051 }
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1057 }
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064 u32 bmcr;
1065
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067 return;
1068
Michael Chan27a005b2007-05-03 13:23:41 -07001069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070 u32 val;
1071
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085 }
1086
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
1092static int
Michael Chanb6016b72005-05-26 13:03:09 -07001093bnx2_set_link(struct bnx2 *bp)
1094{
1095 u32 bmsr;
1096 u8 link_up;
1097
Michael Chan80be4432006-11-19 14:07:28 -08001098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001099 bp->link_up = 1;
1100 return 0;
1101 }
1102
1103 link_up = bp->link_up;
1104
Michael Chan27a005b2007-05-03 13:23:41 -07001105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001109
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112 u32 val;
1113
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1117 else
1118 bmsr &= ~BMSR_LSTATUS;
1119 }
1120
1121 if (bmsr & BMSR_LSTATUS) {
1122 bp->link_up = 1;
1123
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001131 }
1132 else {
1133 bnx2_copper_linkup(bp);
1134 }
1135 bnx2_resolve_flow_ctrl(bp);
1136 }
1137 else {
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001141
Michael Chanb6016b72005-05-26 13:03:09 -07001142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143 bp->link_up = 0;
1144 }
1145
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1148 }
1149
1150 bnx2_set_mac_link(bp);
1151
1152 return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158 int i;
1159 u32 reg;
1160
Michael Chanca58c3a2007-05-03 13:22:52 -07001161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163#define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165 udelay(10);
1166
Michael Chanca58c3a2007-05-03 13:22:52 -07001167 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001168 if (!(reg & BMCR_RESET)) {
1169 udelay(20);
1170 break;
1171 }
1172 }
1173 if (i == PHY_RESET_MAX_WAIT) {
1174 return -EBUSY;
1175 }
1176 return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182 u32 adv = 0;
1183
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1189 }
1190 else {
1191 adv = ADVERTISE_PAUSE_CAP;
1192 }
1193 }
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1197 }
1198 else {
1199 adv = ADVERTISE_PAUSE_ASYM;
1200 }
1201 }
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208 }
1209 }
1210 return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
Michael Chan605a9e22007-05-03 13:23:13 -07001216 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001217 u32 new_adv = 0;
1218
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001221 int force_link_down = 0;
1222
Michael Chan605a9e22007-05-03 13:23:13 -07001223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1229 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001230 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
Michael Chanca58c3a2007-05-03 13:22:52 -07001233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001234 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001235 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001236
Michael Chan27a005b2007-05-03 13:23:41 -07001237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1243 }
1244
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248 else
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001250 }
1251
Michael Chanb6016b72005-05-26 13:03:09 -07001252 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001253 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001254 new_bmcr |= BMCR_FULLDPLX;
1255 }
1256 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001257 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001258 new_bmcr &= ~BMCR_FULLDPLX;
1259 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001260 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001261 /* Force a link down visible on the other side */
1262 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001263 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001267 BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269 bp->link_up = 0;
1270 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001272 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001273 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001276 } else {
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001279 }
1280 return 0;
1281 }
1282
Michael Chan605a9e22007-05-03 13:23:13 -07001283 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001284
Michael Chanb6016b72005-05-26 13:03:09 -07001285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1287
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1289
Michael Chanca58c3a2007-05-03 13:22:52 -07001290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001292
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1296 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001298 spin_unlock_bh(&bp->phy_lock);
1299 msleep(20);
1300 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001301 }
1302
Michael Chanca58c3a2007-05-03 13:22:52 -07001303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001305 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1313 */
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001317 } else {
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001320 }
1321
1322 return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1327
1328#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1332
1333#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001335
Michael Chanb6016b72005-05-26 13:03:09 -07001336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338static int
1339bnx2_setup_copper_phy(struct bnx2 *bp)
1340{
1341 u32 bmcr;
1342 u32 new_bmcr;
1343
Michael Chanca58c3a2007-05-03 13:22:52 -07001344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001345
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1350
Michael Chanca58c3a2007-05-03 13:22:52 -07001351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1354
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001368
Michael Chanb6016b72005-05-26 13:03:09 -07001369 new_adv_reg |= ADVERTISE_CSMA;
1370
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1376
Michael Chanca58c3a2007-05-03 13:22:52 -07001377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001380 BMCR_ANENABLE);
1381 }
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1385
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1388 }
1389 return 0;
1390 }
1391
1392 new_bmcr = 0;
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1395 }
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1398 }
1399 if (new_bmcr != bmcr) {
1400 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001401
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001404
Michael Chanb6016b72005-05-26 13:03:09 -07001405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001408 spin_unlock_bh(&bp->phy_lock);
1409 msleep(50);
1410 spin_lock_bh(&bp->phy_lock);
1411
Michael Chanca58c3a2007-05-03 13:22:52 -07001412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001414 }
1415
Michael Chanca58c3a2007-05-03 13:22:52 -07001416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001417
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1421 */
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1427 }
Michael Chan27a005b2007-05-03 13:23:41 -07001428 } else {
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001431 }
1432 return 0;
1433}
1434
1435static int
1436bnx2_setup_phy(struct bnx2 *bp)
1437{
1438 if (bp->loopback == MAC_LOOPBACK)
1439 return 0;
1440
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1443 }
1444 else {
1445 return (bnx2_setup_copper_phy(bp));
1446 }
1447}
1448
1449static int
Michael Chan27a005b2007-05-03 13:23:41 -07001450bnx2_init_5709s_phy(struct bnx2 *bp)
1451{
1452 u32 val;
1453
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_reset_phy(bp);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1478 else
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495 return 0;
1496}
1497
1498static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001499bnx2_init_5708s_phy(struct bnx2 *bp)
1500{
1501 u32 val;
1502
Michael Chan27a005b2007-05-03 13:23:41 -07001503 bnx2_reset_phy(bp);
1504
1505 bp->mii_up1 = BCM5708S_UP1;
1506
Michael Chan5b0c76a2005-11-04 08:45:49 -08001507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523 }
1524
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535 }
1536
Michael Chane3648b32005-11-04 08:51:21 -08001537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540 if (val) {
1541 u32 is_backplane;
1542
Michael Chane3648b32005-11-04 08:51:21 -08001543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1551 }
1552 }
1553 return 0;
1554}
1555
1556static int
1557bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001558{
Michael Chan27a005b2007-05-03 13:23:41 -07001559 bnx2_reset_phy(bp);
1560
Michael Chanb6016b72005-05-26 13:03:09 -07001561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
Michael Chan59b47d82006-11-19 14:10:45 -08001563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001565
1566 if (bp->dev->mtu > 1500) {
1567 u32 val;
1568
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577 }
1578 else {
1579 u32 val;
1580
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588 }
1589
1590 return 0;
1591}
1592
1593static int
1594bnx2_init_copper_phy(struct bnx2 *bp)
1595{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001596 u32 val;
1597
Michael Chan27a005b2007-05-03 13:23:41 -07001598 bnx2_reset_phy(bp);
1599
Michael Chanb6016b72005-05-26 13:03:09 -07001600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1609 }
1610
Michael Chanb659f442007-02-02 00:46:35 -08001611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615 val &= ~(1 << 8);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617 }
1618
Michael Chanb6016b72005-05-26 13:03:09 -07001619 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1627 }
1628 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635 }
1636
Michael Chan5b0c76a2005-11-04 08:45:49 -08001637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001641 return 0;
1642}
1643
1644
1645static int
1646bnx2_init_phy(struct bnx2 *bp)
1647{
1648 u32 val;
1649 int rc = 0;
1650
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
Michael Chanca58c3a2007-05-03 13:22:52 -07001654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001656 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1659
Michael Chanb6016b72005-05-26 13:03:09 -07001660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
Michael Chanb6016b72005-05-26 13:03:09 -07001662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1666
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001674 }
1675 else {
1676 rc = bnx2_init_copper_phy(bp);
1677 }
1678
1679 bnx2_setup_phy(bp);
1680
1681 return rc;
1682}
1683
1684static int
1685bnx2_set_mac_loopback(struct bnx2 *bp)
1686{
1687 u32 mac_mode;
1688
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693 bp->link_up = 1;
1694 return 0;
1695}
1696
Michael Chanbc5a0692006-01-23 16:13:22 -08001697static int bnx2_test_link(struct bnx2 *);
1698
1699static int
1700bnx2_set_phy_loopback(struct bnx2 *bp)
1701{
1702 u32 mac_mode;
1703 int rc, i;
1704
1705 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001707 BMCR_SPEED1000);
1708 spin_unlock_bh(&bp->phy_lock);
1709 if (rc)
1710 return rc;
1711
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1714 break;
Michael Chan80be4432006-11-19 14:07:28 -08001715 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001716 }
1717
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001721 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001722
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725 bp->link_up = 1;
1726 return 0;
1727}
1728
Michael Chanb6016b72005-05-26 13:03:09 -07001729static int
Michael Chanb090ae22006-01-23 16:07:10 -08001730bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001731{
1732 int i;
1733 u32 val;
1734
Michael Chanb6016b72005-05-26 13:03:09 -07001735 bp->fw_wr_seq++;
1736 msg_data |= bp->fw_wr_seq;
1737
Michael Chane3648b32005-11-04 08:51:21 -08001738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001739
1740 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001743
Michael Chane3648b32005-11-04 08:51:21 -08001744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001745
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747 break;
1748 }
Michael Chanb090ae22006-01-23 16:07:10 -08001749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001751
1752 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754 if (!silent)
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001757
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
Michael Chane3648b32005-11-04 08:51:21 -08001761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001762
Michael Chanb6016b72005-05-26 13:03:09 -07001763 return -EBUSY;
1764 }
1765
Michael Chanb090ae22006-01-23 16:07:10 -08001766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767 return -EIO;
1768
Michael Chanb6016b72005-05-26 13:03:09 -07001769 return 0;
1770}
1771
Michael Chan59b47d82006-11-19 14:10:45 -08001772static int
1773bnx2_init_5709_context(struct bnx2 *bp)
1774{
1775 int i, ret = 0;
1776 u32 val;
1777
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1782 int j;
1783
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1792
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1795 break;
1796 udelay(5);
1797 }
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1799 ret = -EBUSY;
1800 break;
1801 }
1802 }
1803 return ret;
1804}
1805
Michael Chanb6016b72005-05-26 13:03:09 -07001806static void
1807bnx2_init_context(struct bnx2 *bp)
1808{
1809 u32 vcid;
1810
1811 vcid = 96;
1812 while (vcid) {
1813 u32 vcid_addr, pcid_addr, offset;
1814
1815 vcid--;
1816
1817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1818 u32 new_vcid;
1819
1820 vcid_addr = GET_PCID_ADDR(vcid);
1821 if (vcid & 0x8) {
1822 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1823 }
1824 else {
1825 new_vcid = vcid;
1826 }
1827 pcid_addr = GET_PCID_ADDR(new_vcid);
1828 }
1829 else {
1830 vcid_addr = GET_CID_ADDR(vcid);
1831 pcid_addr = vcid_addr;
1832 }
1833
1834 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1836
1837 /* Zero out the context. */
1838 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839 CTX_WR(bp, 0x00, offset, 0);
1840 }
1841
1842 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1844 }
1845}
1846
1847static int
1848bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1849{
1850 u16 *good_mbuf;
1851 u32 good_mbuf_cnt;
1852 u32 val;
1853
1854 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855 if (good_mbuf == NULL) {
1856 printk(KERN_ERR PFX "Failed to allocate memory in "
1857 "bnx2_alloc_bad_rbuf\n");
1858 return -ENOMEM;
1859 }
1860
1861 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1863
1864 good_mbuf_cnt = 0;
1865
1866 /* Allocate a bunch of mbufs and save the good ones in an array. */
1867 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1870
1871 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1872
1873 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1874
1875 /* The addresses with Bit 9 set are bad memory blocks. */
1876 if (!(val & (1 << 9))) {
1877 good_mbuf[good_mbuf_cnt] = (u16) val;
1878 good_mbuf_cnt++;
1879 }
1880
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882 }
1883
1884 /* Free the good ones back to the mbuf pool thus discarding
1885 * all the bad ones. */
1886 while (good_mbuf_cnt) {
1887 good_mbuf_cnt--;
1888
1889 val = good_mbuf[good_mbuf_cnt];
1890 val = (val << 9) | val | 1;
1891
1892 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1893 }
1894 kfree(good_mbuf);
1895 return 0;
1896}
1897
1898static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001899bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001900{
1901 u32 val;
1902 u8 *mac_addr = bp->dev->dev_addr;
1903
1904 val = (mac_addr[0] << 8) | mac_addr[1];
1905
1906 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1907
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001909 (mac_addr[4] << 8) | mac_addr[5];
1910
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1912}
1913
1914static inline int
1915bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1916{
1917 struct sk_buff *skb;
1918 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1919 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001920 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001921 unsigned long align;
1922
Michael Chan932f3772006-08-15 01:39:36 -07001923 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001924 if (skb == NULL) {
1925 return -ENOMEM;
1926 }
1927
Michael Chan59b47d82006-11-19 14:10:45 -08001928 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001930
Michael Chanb6016b72005-05-26 13:03:09 -07001931 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932 PCI_DMA_FROMDEVICE);
1933
1934 rx_buf->skb = skb;
1935 pci_unmap_addr_set(rx_buf, mapping, mapping);
1936
1937 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1939
1940 bp->rx_prod_bseq += bp->rx_buf_use_size;
1941
1942 return 0;
1943}
1944
Michael Chanda3e4fb2007-05-03 13:24:23 -07001945static int
1946bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1947{
1948 struct status_block *sblk = bp->status_blk;
1949 u32 new_link_state, old_link_state;
1950 int is_set = 1;
1951
1952 new_link_state = sblk->status_attn_bits & event;
1953 old_link_state = sblk->status_attn_bits_ack & event;
1954 if (new_link_state != old_link_state) {
1955 if (new_link_state)
1956 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1957 else
1958 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1959 } else
1960 is_set = 0;
1961
1962 return is_set;
1963}
1964
Michael Chanb6016b72005-05-26 13:03:09 -07001965static void
1966bnx2_phy_int(struct bnx2 *bp)
1967{
Michael Chanda3e4fb2007-05-03 13:24:23 -07001968 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1969 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001970 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07001971 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001972 }
1973}
1974
1975static void
1976bnx2_tx_int(struct bnx2 *bp)
1977{
Michael Chanf4e418f2005-11-04 08:53:48 -08001978 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001979 u16 hw_cons, sw_cons, sw_ring_cons;
1980 int tx_free_bd = 0;
1981
Michael Chanf4e418f2005-11-04 08:53:48 -08001982 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001983 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1984 hw_cons++;
1985 }
1986 sw_cons = bp->tx_cons;
1987
1988 while (sw_cons != hw_cons) {
1989 struct sw_bd *tx_buf;
1990 struct sk_buff *skb;
1991 int i, last;
1992
1993 sw_ring_cons = TX_RING_IDX(sw_cons);
1994
1995 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1996 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001997
Michael Chanb6016b72005-05-26 13:03:09 -07001998 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001999 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002000 u16 last_idx, last_ring_idx;
2001
2002 last_idx = sw_cons +
2003 skb_shinfo(skb)->nr_frags + 1;
2004 last_ring_idx = sw_ring_cons +
2005 skb_shinfo(skb)->nr_frags + 1;
2006 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2007 last_idx++;
2008 }
2009 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2010 break;
2011 }
2012 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002013
Michael Chanb6016b72005-05-26 13:03:09 -07002014 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2015 skb_headlen(skb), PCI_DMA_TODEVICE);
2016
2017 tx_buf->skb = NULL;
2018 last = skb_shinfo(skb)->nr_frags;
2019
2020 for (i = 0; i < last; i++) {
2021 sw_cons = NEXT_TX_BD(sw_cons);
2022
2023 pci_unmap_page(bp->pdev,
2024 pci_unmap_addr(
2025 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2026 mapping),
2027 skb_shinfo(skb)->frags[i].size,
2028 PCI_DMA_TODEVICE);
2029 }
2030
2031 sw_cons = NEXT_TX_BD(sw_cons);
2032
2033 tx_free_bd += last + 1;
2034
Michael Chan745720e2006-06-29 12:37:41 -07002035 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002036
Michael Chanf4e418f2005-11-04 08:53:48 -08002037 hw_cons = bp->hw_tx_cons =
2038 sblk->status_tx_quick_consumer_index0;
2039
Michael Chanb6016b72005-05-26 13:03:09 -07002040 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2041 hw_cons++;
2042 }
2043 }
2044
Michael Chane89bbf12005-08-25 15:36:58 -07002045 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002046 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2047 * before checking for netif_queue_stopped(). Without the
2048 * memory barrier, there is a small possibility that bnx2_start_xmit()
2049 * will miss it and cause the queue to be stopped forever.
2050 */
2051 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002052
Michael Chan2f8af122006-08-15 01:39:10 -07002053 if (unlikely(netif_queue_stopped(bp->dev)) &&
2054 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2055 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002056 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002057 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002058 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002059 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002060 }
Michael Chanb6016b72005-05-26 13:03:09 -07002061}
2062
2063static inline void
2064bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2065 u16 cons, u16 prod)
2066{
Michael Chan236b6392006-03-20 17:49:02 -08002067 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2068 struct rx_bd *cons_bd, *prod_bd;
2069
2070 cons_rx_buf = &bp->rx_buf_ring[cons];
2071 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002072
2073 pci_dma_sync_single_for_device(bp->pdev,
2074 pci_unmap_addr(cons_rx_buf, mapping),
2075 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2076
Michael Chan236b6392006-03-20 17:49:02 -08002077 bp->rx_prod_bseq += bp->rx_buf_use_size;
2078
2079 prod_rx_buf->skb = skb;
2080
2081 if (cons == prod)
2082 return;
2083
Michael Chanb6016b72005-05-26 13:03:09 -07002084 pci_unmap_addr_set(prod_rx_buf, mapping,
2085 pci_unmap_addr(cons_rx_buf, mapping));
2086
Michael Chan3fdfcc22006-03-20 17:49:49 -08002087 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2088 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002089 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2090 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002091}
2092
2093static int
2094bnx2_rx_int(struct bnx2 *bp, int budget)
2095{
Michael Chanf4e418f2005-11-04 08:53:48 -08002096 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002097 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2098 struct l2_fhdr *rx_hdr;
2099 int rx_pkt = 0;
2100
Michael Chanf4e418f2005-11-04 08:53:48 -08002101 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002102 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2103 hw_cons++;
2104 }
2105 sw_cons = bp->rx_cons;
2106 sw_prod = bp->rx_prod;
2107
2108 /* Memory barrier necessary as speculative reads of the rx
2109 * buffer can be ahead of the index in the status block
2110 */
2111 rmb();
2112 while (sw_cons != hw_cons) {
2113 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002114 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002115 struct sw_bd *rx_buf;
2116 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002117 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002118
2119 sw_ring_cons = RX_RING_IDX(sw_cons);
2120 sw_ring_prod = RX_RING_IDX(sw_prod);
2121
2122 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2123 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002124
2125 rx_buf->skb = NULL;
2126
2127 dma_addr = pci_unmap_addr(rx_buf, mapping);
2128
2129 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002130 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2131
2132 rx_hdr = (struct l2_fhdr *) skb->data;
2133 len = rx_hdr->l2_fhdr_pkt_len - 4;
2134
Michael Chanade2bfe2006-01-23 16:09:51 -08002135 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002136 (L2_FHDR_ERRORS_BAD_CRC |
2137 L2_FHDR_ERRORS_PHY_DECODE |
2138 L2_FHDR_ERRORS_ALIGNMENT |
2139 L2_FHDR_ERRORS_TOO_SHORT |
2140 L2_FHDR_ERRORS_GIANT_FRAME)) {
2141
2142 goto reuse_rx;
2143 }
2144
2145 /* Since we don't have a jumbo ring, copy small packets
2146 * if mtu > 1500
2147 */
2148 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2149 struct sk_buff *new_skb;
2150
Michael Chan932f3772006-08-15 01:39:36 -07002151 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002152 if (new_skb == NULL)
2153 goto reuse_rx;
2154
2155 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002156 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2157 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002158 skb_reserve(new_skb, 2);
2159 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002160
2161 bnx2_reuse_rx_skb(bp, skb,
2162 sw_ring_cons, sw_ring_prod);
2163
2164 skb = new_skb;
2165 }
2166 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002167 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002168 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2169
2170 skb_reserve(skb, bp->rx_offset);
2171 skb_put(skb, len);
2172 }
2173 else {
2174reuse_rx:
2175 bnx2_reuse_rx_skb(bp, skb,
2176 sw_ring_cons, sw_ring_prod);
2177 goto next_rx;
2178 }
2179
2180 skb->protocol = eth_type_trans(skb, bp->dev);
2181
2182 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002183 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002184
Michael Chan745720e2006-06-29 12:37:41 -07002185 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002186 goto next_rx;
2187
2188 }
2189
Michael Chanb6016b72005-05-26 13:03:09 -07002190 skb->ip_summed = CHECKSUM_NONE;
2191 if (bp->rx_csum &&
2192 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2193 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2194
Michael Chanade2bfe2006-01-23 16:09:51 -08002195 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2196 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002197 skb->ip_summed = CHECKSUM_UNNECESSARY;
2198 }
2199
2200#ifdef BCM_VLAN
2201 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2202 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2203 rx_hdr->l2_fhdr_vlan_tag);
2204 }
2205 else
2206#endif
2207 netif_receive_skb(skb);
2208
2209 bp->dev->last_rx = jiffies;
2210 rx_pkt++;
2211
2212next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002213 sw_cons = NEXT_RX_BD(sw_cons);
2214 sw_prod = NEXT_RX_BD(sw_prod);
2215
2216 if ((rx_pkt == budget))
2217 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002218
2219 /* Refresh hw_cons to see if there is new work */
2220 if (sw_cons == hw_cons) {
2221 hw_cons = bp->hw_rx_cons =
2222 sblk->status_rx_quick_consumer_index0;
2223 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2224 hw_cons++;
2225 rmb();
2226 }
Michael Chanb6016b72005-05-26 13:03:09 -07002227 }
2228 bp->rx_cons = sw_cons;
2229 bp->rx_prod = sw_prod;
2230
2231 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2232
2233 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2234
2235 mmiowb();
2236
2237 return rx_pkt;
2238
2239}
2240
2241/* MSI ISR - The only difference between this and the INTx ISR
2242 * is that the MSI interrupt is always serviced.
2243 */
2244static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002245bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002246{
2247 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002248 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002249
Michael Chanc921e4c2005-09-08 13:15:32 -07002250 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002251 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2252 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2253 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2254
2255 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002256 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2257 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002258
Michael Chan73eef4c2005-08-25 15:39:15 -07002259 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002260
Michael Chan73eef4c2005-08-25 15:39:15 -07002261 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002262}
2263
2264static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002265bnx2_msi_1shot(int irq, void *dev_instance)
2266{
2267 struct net_device *dev = dev_instance;
2268 struct bnx2 *bp = netdev_priv(dev);
2269
2270 prefetch(bp->status_blk);
2271
2272 /* Return here if interrupt is disabled. */
2273 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2274 return IRQ_HANDLED;
2275
2276 netif_rx_schedule(dev);
2277
2278 return IRQ_HANDLED;
2279}
2280
2281static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002282bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002283{
2284 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002285 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002286
2287 /* When using INTx, it is possible for the interrupt to arrive
2288 * at the CPU before the status block posted prior to the
2289 * interrupt. Reading a register will flush the status block.
2290 * When using MSI, the MSI message will always complete after
2291 * the status block write.
2292 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002293 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002294 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2295 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002296 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002297
2298 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2299 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2300 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2301
2302 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002303 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2304 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002305
Michael Chan73eef4c2005-08-25 15:39:15 -07002306 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002307
Michael Chan73eef4c2005-08-25 15:39:15 -07002308 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002309}
2310
Michael Chanda3e4fb2007-05-03 13:24:23 -07002311#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2312
Michael Chanf4e418f2005-11-04 08:53:48 -08002313static inline int
2314bnx2_has_work(struct bnx2 *bp)
2315{
2316 struct status_block *sblk = bp->status_blk;
2317
2318 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2319 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2320 return 1;
2321
Michael Chanda3e4fb2007-05-03 13:24:23 -07002322 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2323 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002324 return 1;
2325
2326 return 0;
2327}
2328
Michael Chanb6016b72005-05-26 13:03:09 -07002329static int
2330bnx2_poll(struct net_device *dev, int *budget)
2331{
Michael Chan972ec0d2006-01-23 16:12:43 -08002332 struct bnx2 *bp = netdev_priv(dev);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002333 struct status_block *sblk = bp->status_blk;
2334 u32 status_attn_bits = sblk->status_attn_bits;
2335 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002336
Michael Chanda3e4fb2007-05-03 13:24:23 -07002337 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2338 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002339
Michael Chanb6016b72005-05-26 13:03:09 -07002340 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002341
2342 /* This is needed to take care of transient status
2343 * during link changes.
2344 */
2345 REG_WR(bp, BNX2_HC_COMMAND,
2346 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2347 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002348 }
2349
Michael Chanf4e418f2005-11-04 08:53:48 -08002350 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002351 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002352
Michael Chanf4e418f2005-11-04 08:53:48 -08002353 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002354 int orig_budget = *budget;
2355 int work_done;
2356
2357 if (orig_budget > dev->quota)
2358 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002359
Michael Chanb6016b72005-05-26 13:03:09 -07002360 work_done = bnx2_rx_int(bp, orig_budget);
2361 *budget -= work_done;
2362 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002363 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002364
Michael Chanf4e418f2005-11-04 08:53:48 -08002365 bp->last_status_idx = bp->status_blk->status_idx;
2366 rmb();
2367
2368 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002369 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002370 if (likely(bp->flags & USING_MSI_FLAG)) {
2371 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2372 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2373 bp->last_status_idx);
2374 return 0;
2375 }
Michael Chanb6016b72005-05-26 13:03:09 -07002376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002377 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2378 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2379 bp->last_status_idx);
2380
2381 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2382 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2383 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002384 return 0;
2385 }
2386
2387 return 1;
2388}
2389
Herbert Xu932ff272006-06-09 12:20:56 -07002390/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002391 * from set_multicast.
2392 */
2393static void
2394bnx2_set_rx_mode(struct net_device *dev)
2395{
Michael Chan972ec0d2006-01-23 16:12:43 -08002396 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002397 u32 rx_mode, sort_mode;
2398 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002399
Michael Chanc770a652005-08-25 15:38:39 -07002400 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002401
2402 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2403 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2404 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2405#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002406 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002407 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002408#else
Michael Chane29054f2006-01-23 16:06:06 -08002409 if (!(bp->flags & ASF_ENABLE_FLAG))
2410 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002411#endif
2412 if (dev->flags & IFF_PROMISC) {
2413 /* Promiscuous mode. */
2414 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002415 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2416 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002417 }
2418 else if (dev->flags & IFF_ALLMULTI) {
2419 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2420 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2421 0xffffffff);
2422 }
2423 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2424 }
2425 else {
2426 /* Accept one or more multicast(s). */
2427 struct dev_mc_list *mclist;
2428 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2429 u32 regidx;
2430 u32 bit;
2431 u32 crc;
2432
2433 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2434
2435 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2436 i++, mclist = mclist->next) {
2437
2438 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2439 bit = crc & 0xff;
2440 regidx = (bit & 0xe0) >> 5;
2441 bit &= 0x1f;
2442 mc_filter[regidx] |= (1 << bit);
2443 }
2444
2445 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2446 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2447 mc_filter[i]);
2448 }
2449
2450 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2451 }
2452
2453 if (rx_mode != bp->rx_mode) {
2454 bp->rx_mode = rx_mode;
2455 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2456 }
2457
2458 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2459 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2460 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2461
Michael Chanc770a652005-08-25 15:38:39 -07002462 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002463}
2464
Michael Chanfba9fe92006-06-12 22:21:25 -07002465#define FW_BUF_SIZE 0x8000
2466
2467static int
2468bnx2_gunzip_init(struct bnx2 *bp)
2469{
2470 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2471 goto gunzip_nomem1;
2472
2473 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2474 goto gunzip_nomem2;
2475
2476 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2477 if (bp->strm->workspace == NULL)
2478 goto gunzip_nomem3;
2479
2480 return 0;
2481
2482gunzip_nomem3:
2483 kfree(bp->strm);
2484 bp->strm = NULL;
2485
2486gunzip_nomem2:
2487 vfree(bp->gunzip_buf);
2488 bp->gunzip_buf = NULL;
2489
2490gunzip_nomem1:
2491 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2492 "uncompression.\n", bp->dev->name);
2493 return -ENOMEM;
2494}
2495
2496static void
2497bnx2_gunzip_end(struct bnx2 *bp)
2498{
2499 kfree(bp->strm->workspace);
2500
2501 kfree(bp->strm);
2502 bp->strm = NULL;
2503
2504 if (bp->gunzip_buf) {
2505 vfree(bp->gunzip_buf);
2506 bp->gunzip_buf = NULL;
2507 }
2508}
2509
2510static int
2511bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2512{
2513 int n, rc;
2514
2515 /* check gzip header */
2516 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2517 return -EINVAL;
2518
2519 n = 10;
2520
2521#define FNAME 0x8
2522 if (zbuf[3] & FNAME)
2523 while ((zbuf[n++] != 0) && (n < len));
2524
2525 bp->strm->next_in = zbuf + n;
2526 bp->strm->avail_in = len - n;
2527 bp->strm->next_out = bp->gunzip_buf;
2528 bp->strm->avail_out = FW_BUF_SIZE;
2529
2530 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2531 if (rc != Z_OK)
2532 return rc;
2533
2534 rc = zlib_inflate(bp->strm, Z_FINISH);
2535
2536 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2537 *outbuf = bp->gunzip_buf;
2538
2539 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2540 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2541 bp->dev->name, bp->strm->msg);
2542
2543 zlib_inflateEnd(bp->strm);
2544
2545 if (rc == Z_STREAM_END)
2546 return 0;
2547
2548 return rc;
2549}
2550
Michael Chanb6016b72005-05-26 13:03:09 -07002551static void
2552load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2553 u32 rv2p_proc)
2554{
2555 int i;
2556 u32 val;
2557
2558
2559 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002560 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002561 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002562 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002563 rv2p_code++;
2564
2565 if (rv2p_proc == RV2P_PROC1) {
2566 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2567 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2568 }
2569 else {
2570 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2571 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2572 }
2573 }
2574
2575 /* Reset the processor, un-stall is done later. */
2576 if (rv2p_proc == RV2P_PROC1) {
2577 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2578 }
2579 else {
2580 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2581 }
2582}
2583
Michael Chanaf3ee512006-11-19 14:09:25 -08002584static int
Michael Chanb6016b72005-05-26 13:03:09 -07002585load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2586{
2587 u32 offset;
2588 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002589 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002590
2591 /* Halt the CPU. */
2592 val = REG_RD_IND(bp, cpu_reg->mode);
2593 val |= cpu_reg->mode_value_halt;
2594 REG_WR_IND(bp, cpu_reg->mode, val);
2595 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2596
2597 /* Load the Text area. */
2598 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002599 if (fw->gz_text) {
2600 u32 text_len;
2601 void *text;
2602
2603 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2604 &text_len);
2605 if (rc)
2606 return rc;
2607
2608 fw->text = text;
2609 }
2610 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002611 int j;
2612
2613 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002614 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002615 }
2616 }
2617
2618 /* Load the Data area. */
2619 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2620 if (fw->data) {
2621 int j;
2622
2623 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2624 REG_WR_IND(bp, offset, fw->data[j]);
2625 }
2626 }
2627
2628 /* Load the SBSS area. */
2629 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2630 if (fw->sbss) {
2631 int j;
2632
2633 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2634 REG_WR_IND(bp, offset, fw->sbss[j]);
2635 }
2636 }
2637
2638 /* Load the BSS area. */
2639 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2640 if (fw->bss) {
2641 int j;
2642
2643 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2644 REG_WR_IND(bp, offset, fw->bss[j]);
2645 }
2646 }
2647
2648 /* Load the Read-Only area. */
2649 offset = cpu_reg->spad_base +
2650 (fw->rodata_addr - cpu_reg->mips_view_base);
2651 if (fw->rodata) {
2652 int j;
2653
2654 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2655 REG_WR_IND(bp, offset, fw->rodata[j]);
2656 }
2657 }
2658
2659 /* Clear the pre-fetch instruction. */
2660 REG_WR_IND(bp, cpu_reg->inst, 0);
2661 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2662
2663 /* Start the CPU. */
2664 val = REG_RD_IND(bp, cpu_reg->mode);
2665 val &= ~cpu_reg->mode_value_halt;
2666 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2667 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002668
2669 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002670}
2671
Michael Chanfba9fe92006-06-12 22:21:25 -07002672static int
Michael Chanb6016b72005-05-26 13:03:09 -07002673bnx2_init_cpus(struct bnx2 *bp)
2674{
2675 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002676 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002677 int rc = 0;
2678 void *text;
2679 u32 text_len;
2680
2681 if ((rc = bnx2_gunzip_init(bp)) != 0)
2682 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002683
2684 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002685 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2686 &text_len);
2687 if (rc)
2688 goto init_cpu_err;
2689
2690 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2691
2692 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2693 &text_len);
2694 if (rc)
2695 goto init_cpu_err;
2696
2697 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002698
2699 /* Initialize the RX Processor. */
2700 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2701 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2702 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2703 cpu_reg.state = BNX2_RXP_CPU_STATE;
2704 cpu_reg.state_value_clear = 0xffffff;
2705 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2706 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2707 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2708 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2709 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2710 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2711 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002712
Michael Chand43584c2006-11-19 14:14:35 -08002713 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2714 fw = &bnx2_rxp_fw_09;
2715 else
2716 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002717
Michael Chanaf3ee512006-11-19 14:09:25 -08002718 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002719 if (rc)
2720 goto init_cpu_err;
2721
Michael Chanb6016b72005-05-26 13:03:09 -07002722 /* Initialize the TX Processor. */
2723 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2724 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2725 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2726 cpu_reg.state = BNX2_TXP_CPU_STATE;
2727 cpu_reg.state_value_clear = 0xffffff;
2728 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2729 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2730 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2731 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2732 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2733 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2734 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002735
Michael Chand43584c2006-11-19 14:14:35 -08002736 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2737 fw = &bnx2_txp_fw_09;
2738 else
2739 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002740
Michael Chanaf3ee512006-11-19 14:09:25 -08002741 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002742 if (rc)
2743 goto init_cpu_err;
2744
Michael Chanb6016b72005-05-26 13:03:09 -07002745 /* Initialize the TX Patch-up Processor. */
2746 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2747 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2748 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2749 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2750 cpu_reg.state_value_clear = 0xffffff;
2751 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2752 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2753 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2754 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2755 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2756 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2757 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002758
Michael Chand43584c2006-11-19 14:14:35 -08002759 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2760 fw = &bnx2_tpat_fw_09;
2761 else
2762 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002763
Michael Chanaf3ee512006-11-19 14:09:25 -08002764 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002765 if (rc)
2766 goto init_cpu_err;
2767
Michael Chanb6016b72005-05-26 13:03:09 -07002768 /* Initialize the Completion Processor. */
2769 cpu_reg.mode = BNX2_COM_CPU_MODE;
2770 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2771 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2772 cpu_reg.state = BNX2_COM_CPU_STATE;
2773 cpu_reg.state_value_clear = 0xffffff;
2774 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2775 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2776 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2777 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2778 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2779 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2780 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002781
Michael Chand43584c2006-11-19 14:14:35 -08002782 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2783 fw = &bnx2_com_fw_09;
2784 else
2785 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002786
Michael Chanaf3ee512006-11-19 14:09:25 -08002787 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002788 if (rc)
2789 goto init_cpu_err;
2790
Michael Chand43584c2006-11-19 14:14:35 -08002791 /* Initialize the Command Processor. */
2792 cpu_reg.mode = BNX2_CP_CPU_MODE;
2793 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2794 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2795 cpu_reg.state = BNX2_CP_CPU_STATE;
2796 cpu_reg.state_value_clear = 0xffffff;
2797 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2798 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2799 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2800 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2801 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2802 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2803 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002804
Michael Chand43584c2006-11-19 14:14:35 -08002805 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2806 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002807
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002808 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002809 if (rc)
2810 goto init_cpu_err;
2811 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002812init_cpu_err:
2813 bnx2_gunzip_end(bp);
2814 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002815}
2816
2817static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002818bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002819{
2820 u16 pmcsr;
2821
2822 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2823
2824 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002825 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002826 u32 val;
2827
2828 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2829 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2830 PCI_PM_CTRL_PME_STATUS);
2831
2832 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2833 /* delay required during transition out of D3hot */
2834 msleep(20);
2835
2836 val = REG_RD(bp, BNX2_EMAC_MODE);
2837 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2838 val &= ~BNX2_EMAC_MODE_MPKT;
2839 REG_WR(bp, BNX2_EMAC_MODE, val);
2840
2841 val = REG_RD(bp, BNX2_RPM_CONFIG);
2842 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2843 REG_WR(bp, BNX2_RPM_CONFIG, val);
2844 break;
2845 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002846 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002847 int i;
2848 u32 val, wol_msg;
2849
2850 if (bp->wol) {
2851 u32 advertising;
2852 u8 autoneg;
2853
2854 autoneg = bp->autoneg;
2855 advertising = bp->advertising;
2856
2857 bp->autoneg = AUTONEG_SPEED;
2858 bp->advertising = ADVERTISED_10baseT_Half |
2859 ADVERTISED_10baseT_Full |
2860 ADVERTISED_100baseT_Half |
2861 ADVERTISED_100baseT_Full |
2862 ADVERTISED_Autoneg;
2863
2864 bnx2_setup_copper_phy(bp);
2865
2866 bp->autoneg = autoneg;
2867 bp->advertising = advertising;
2868
2869 bnx2_set_mac_addr(bp);
2870
2871 val = REG_RD(bp, BNX2_EMAC_MODE);
2872
2873 /* Enable port mode. */
2874 val &= ~BNX2_EMAC_MODE_PORT;
2875 val |= BNX2_EMAC_MODE_PORT_MII |
2876 BNX2_EMAC_MODE_MPKT_RCVD |
2877 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002878 BNX2_EMAC_MODE_MPKT;
2879
2880 REG_WR(bp, BNX2_EMAC_MODE, val);
2881
2882 /* receive all multicast */
2883 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2884 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2885 0xffffffff);
2886 }
2887 REG_WR(bp, BNX2_EMAC_RX_MODE,
2888 BNX2_EMAC_RX_MODE_SORT_MODE);
2889
2890 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2891 BNX2_RPM_SORT_USER0_MC_EN;
2892 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2893 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2894 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2895 BNX2_RPM_SORT_USER0_ENA);
2896
2897 /* Need to enable EMAC and RPM for WOL. */
2898 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2899 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2900 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2901 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2902
2903 val = REG_RD(bp, BNX2_RPM_CONFIG);
2904 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2905 REG_WR(bp, BNX2_RPM_CONFIG, val);
2906
2907 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2908 }
2909 else {
2910 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2911 }
2912
Michael Chandda1e392006-01-23 16:08:14 -08002913 if (!(bp->flags & NO_WOL_FLAG))
2914 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002915
2916 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2917 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2918 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2919
2920 if (bp->wol)
2921 pmcsr |= 3;
2922 }
2923 else {
2924 pmcsr |= 3;
2925 }
2926 if (bp->wol) {
2927 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2928 }
2929 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2930 pmcsr);
2931
2932 /* No more memory access after this point until
2933 * device is brought back to D0.
2934 */
2935 udelay(50);
2936 break;
2937 }
2938 default:
2939 return -EINVAL;
2940 }
2941 return 0;
2942}
2943
2944static int
2945bnx2_acquire_nvram_lock(struct bnx2 *bp)
2946{
2947 u32 val;
2948 int j;
2949
2950 /* Request access to the flash interface. */
2951 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2952 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2953 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2954 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2955 break;
2956
2957 udelay(5);
2958 }
2959
2960 if (j >= NVRAM_TIMEOUT_COUNT)
2961 return -EBUSY;
2962
2963 return 0;
2964}
2965
2966static int
2967bnx2_release_nvram_lock(struct bnx2 *bp)
2968{
2969 int j;
2970 u32 val;
2971
2972 /* Relinquish nvram interface. */
2973 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2974
2975 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2976 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2977 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2978 break;
2979
2980 udelay(5);
2981 }
2982
2983 if (j >= NVRAM_TIMEOUT_COUNT)
2984 return -EBUSY;
2985
2986 return 0;
2987}
2988
2989
2990static int
2991bnx2_enable_nvram_write(struct bnx2 *bp)
2992{
2993 u32 val;
2994
2995 val = REG_RD(bp, BNX2_MISC_CFG);
2996 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2997
2998 if (!bp->flash_info->buffered) {
2999 int j;
3000
3001 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3002 REG_WR(bp, BNX2_NVM_COMMAND,
3003 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3004
3005 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3006 udelay(5);
3007
3008 val = REG_RD(bp, BNX2_NVM_COMMAND);
3009 if (val & BNX2_NVM_COMMAND_DONE)
3010 break;
3011 }
3012
3013 if (j >= NVRAM_TIMEOUT_COUNT)
3014 return -EBUSY;
3015 }
3016 return 0;
3017}
3018
3019static void
3020bnx2_disable_nvram_write(struct bnx2 *bp)
3021{
3022 u32 val;
3023
3024 val = REG_RD(bp, BNX2_MISC_CFG);
3025 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3026}
3027
3028
3029static void
3030bnx2_enable_nvram_access(struct bnx2 *bp)
3031{
3032 u32 val;
3033
3034 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3035 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003036 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003037 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3038}
3039
3040static void
3041bnx2_disable_nvram_access(struct bnx2 *bp)
3042{
3043 u32 val;
3044
3045 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3046 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003047 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003048 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3049 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3050}
3051
3052static int
3053bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3054{
3055 u32 cmd;
3056 int j;
3057
3058 if (bp->flash_info->buffered)
3059 /* Buffered flash, no erase needed */
3060 return 0;
3061
3062 /* Build an erase command */
3063 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3064 BNX2_NVM_COMMAND_DOIT;
3065
3066 /* Need to clear DONE bit separately. */
3067 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3068
3069 /* Address of the NVRAM to read from. */
3070 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3071
3072 /* Issue an erase command. */
3073 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3074
3075 /* Wait for completion. */
3076 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3077 u32 val;
3078
3079 udelay(5);
3080
3081 val = REG_RD(bp, BNX2_NVM_COMMAND);
3082 if (val & BNX2_NVM_COMMAND_DONE)
3083 break;
3084 }
3085
3086 if (j >= NVRAM_TIMEOUT_COUNT)
3087 return -EBUSY;
3088
3089 return 0;
3090}
3091
3092static int
3093bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3094{
3095 u32 cmd;
3096 int j;
3097
3098 /* Build the command word. */
3099 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3100
3101 /* Calculate an offset of a buffered flash. */
3102 if (bp->flash_info->buffered) {
3103 offset = ((offset / bp->flash_info->page_size) <<
3104 bp->flash_info->page_bits) +
3105 (offset % bp->flash_info->page_size);
3106 }
3107
3108 /* Need to clear DONE bit separately. */
3109 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3110
3111 /* Address of the NVRAM to read from. */
3112 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3113
3114 /* Issue a read command. */
3115 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3116
3117 /* Wait for completion. */
3118 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3119 u32 val;
3120
3121 udelay(5);
3122
3123 val = REG_RD(bp, BNX2_NVM_COMMAND);
3124 if (val & BNX2_NVM_COMMAND_DONE) {
3125 val = REG_RD(bp, BNX2_NVM_READ);
3126
3127 val = be32_to_cpu(val);
3128 memcpy(ret_val, &val, 4);
3129 break;
3130 }
3131 }
3132 if (j >= NVRAM_TIMEOUT_COUNT)
3133 return -EBUSY;
3134
3135 return 0;
3136}
3137
3138
3139static int
3140bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3141{
3142 u32 cmd, val32;
3143 int j;
3144
3145 /* Build the command word. */
3146 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3147
3148 /* Calculate an offset of a buffered flash. */
3149 if (bp->flash_info->buffered) {
3150 offset = ((offset / bp->flash_info->page_size) <<
3151 bp->flash_info->page_bits) +
3152 (offset % bp->flash_info->page_size);
3153 }
3154
3155 /* Need to clear DONE bit separately. */
3156 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3157
3158 memcpy(&val32, val, 4);
3159 val32 = cpu_to_be32(val32);
3160
3161 /* Write the data. */
3162 REG_WR(bp, BNX2_NVM_WRITE, val32);
3163
3164 /* Address of the NVRAM to write to. */
3165 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3166
3167 /* Issue the write command. */
3168 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3169
3170 /* Wait for completion. */
3171 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3172 udelay(5);
3173
3174 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3175 break;
3176 }
3177 if (j >= NVRAM_TIMEOUT_COUNT)
3178 return -EBUSY;
3179
3180 return 0;
3181}
3182
3183static int
3184bnx2_init_nvram(struct bnx2 *bp)
3185{
3186 u32 val;
3187 int j, entry_count, rc;
3188 struct flash_spec *flash;
3189
3190 /* Determine the selected interface. */
3191 val = REG_RD(bp, BNX2_NVM_CFG1);
3192
3193 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3194
3195 rc = 0;
3196 if (val & 0x40000000) {
3197
3198 /* Flash interface has been reconfigured */
3199 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003200 j++, flash++) {
3201 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3202 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003203 bp->flash_info = flash;
3204 break;
3205 }
3206 }
3207 }
3208 else {
Michael Chan37137702005-11-04 08:49:17 -08003209 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003210 /* Not yet been reconfigured */
3211
Michael Chan37137702005-11-04 08:49:17 -08003212 if (val & (1 << 23))
3213 mask = FLASH_BACKUP_STRAP_MASK;
3214 else
3215 mask = FLASH_STRAP_MASK;
3216
Michael Chanb6016b72005-05-26 13:03:09 -07003217 for (j = 0, flash = &flash_table[0]; j < entry_count;
3218 j++, flash++) {
3219
Michael Chan37137702005-11-04 08:49:17 -08003220 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003221 bp->flash_info = flash;
3222
3223 /* Request access to the flash interface. */
3224 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3225 return rc;
3226
3227 /* Enable access to flash interface */
3228 bnx2_enable_nvram_access(bp);
3229
3230 /* Reconfigure the flash interface */
3231 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3232 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3233 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3234 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3235
3236 /* Disable access to flash interface */
3237 bnx2_disable_nvram_access(bp);
3238 bnx2_release_nvram_lock(bp);
3239
3240 break;
3241 }
3242 }
3243 } /* if (val & 0x40000000) */
3244
3245 if (j == entry_count) {
3246 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003247 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003248 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003249 }
3250
Michael Chan1122db72006-01-23 16:11:42 -08003251 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3252 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3253 if (val)
3254 bp->flash_size = val;
3255 else
3256 bp->flash_size = bp->flash_info->total_size;
3257
Michael Chanb6016b72005-05-26 13:03:09 -07003258 return rc;
3259}
3260
3261static int
3262bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3263 int buf_size)
3264{
3265 int rc = 0;
3266 u32 cmd_flags, offset32, len32, extra;
3267
3268 if (buf_size == 0)
3269 return 0;
3270
3271 /* Request access to the flash interface. */
3272 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3273 return rc;
3274
3275 /* Enable access to flash interface */
3276 bnx2_enable_nvram_access(bp);
3277
3278 len32 = buf_size;
3279 offset32 = offset;
3280 extra = 0;
3281
3282 cmd_flags = 0;
3283
3284 if (offset32 & 3) {
3285 u8 buf[4];
3286 u32 pre_len;
3287
3288 offset32 &= ~3;
3289 pre_len = 4 - (offset & 3);
3290
3291 if (pre_len >= len32) {
3292 pre_len = len32;
3293 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3294 BNX2_NVM_COMMAND_LAST;
3295 }
3296 else {
3297 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3298 }
3299
3300 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3301
3302 if (rc)
3303 return rc;
3304
3305 memcpy(ret_buf, buf + (offset & 3), pre_len);
3306
3307 offset32 += 4;
3308 ret_buf += pre_len;
3309 len32 -= pre_len;
3310 }
3311 if (len32 & 3) {
3312 extra = 4 - (len32 & 3);
3313 len32 = (len32 + 4) & ~3;
3314 }
3315
3316 if (len32 == 4) {
3317 u8 buf[4];
3318
3319 if (cmd_flags)
3320 cmd_flags = BNX2_NVM_COMMAND_LAST;
3321 else
3322 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3323 BNX2_NVM_COMMAND_LAST;
3324
3325 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3326
3327 memcpy(ret_buf, buf, 4 - extra);
3328 }
3329 else if (len32 > 0) {
3330 u8 buf[4];
3331
3332 /* Read the first word. */
3333 if (cmd_flags)
3334 cmd_flags = 0;
3335 else
3336 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3337
3338 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3339
3340 /* Advance to the next dword. */
3341 offset32 += 4;
3342 ret_buf += 4;
3343 len32 -= 4;
3344
3345 while (len32 > 4 && rc == 0) {
3346 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3347
3348 /* Advance to the next dword. */
3349 offset32 += 4;
3350 ret_buf += 4;
3351 len32 -= 4;
3352 }
3353
3354 if (rc)
3355 return rc;
3356
3357 cmd_flags = BNX2_NVM_COMMAND_LAST;
3358 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3359
3360 memcpy(ret_buf, buf, 4 - extra);
3361 }
3362
3363 /* Disable access to flash interface */
3364 bnx2_disable_nvram_access(bp);
3365
3366 bnx2_release_nvram_lock(bp);
3367
3368 return rc;
3369}
3370
3371static int
3372bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3373 int buf_size)
3374{
3375 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003376 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003377 int rc = 0;
3378 int align_start, align_end;
3379
3380 buf = data_buf;
3381 offset32 = offset;
3382 len32 = buf_size;
3383 align_start = align_end = 0;
3384
3385 if ((align_start = (offset32 & 3))) {
3386 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003387 len32 += align_start;
3388 if (len32 < 4)
3389 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003390 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3391 return rc;
3392 }
3393
3394 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003395 align_end = 4 - (len32 & 3);
3396 len32 += align_end;
3397 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3398 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003399 }
3400
3401 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003402 align_buf = kmalloc(len32, GFP_KERNEL);
3403 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003404 return -ENOMEM;
3405 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003406 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003407 }
3408 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003409 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003410 }
Michael Chane6be7632007-01-08 19:56:13 -08003411 memcpy(align_buf + align_start, data_buf, buf_size);
3412 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003413 }
3414
Michael Chanae181bc2006-05-22 16:39:20 -07003415 if (bp->flash_info->buffered == 0) {
3416 flash_buffer = kmalloc(264, GFP_KERNEL);
3417 if (flash_buffer == NULL) {
3418 rc = -ENOMEM;
3419 goto nvram_write_end;
3420 }
3421 }
3422
Michael Chanb6016b72005-05-26 13:03:09 -07003423 written = 0;
3424 while ((written < len32) && (rc == 0)) {
3425 u32 page_start, page_end, data_start, data_end;
3426 u32 addr, cmd_flags;
3427 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003428
3429 /* Find the page_start addr */
3430 page_start = offset32 + written;
3431 page_start -= (page_start % bp->flash_info->page_size);
3432 /* Find the page_end addr */
3433 page_end = page_start + bp->flash_info->page_size;
3434 /* Find the data_start addr */
3435 data_start = (written == 0) ? offset32 : page_start;
3436 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003437 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003438 (offset32 + len32) : page_end;
3439
3440 /* Request access to the flash interface. */
3441 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3442 goto nvram_write_end;
3443
3444 /* Enable access to flash interface */
3445 bnx2_enable_nvram_access(bp);
3446
3447 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3448 if (bp->flash_info->buffered == 0) {
3449 int j;
3450
3451 /* Read the whole page into the buffer
3452 * (non-buffer flash only) */
3453 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3454 if (j == (bp->flash_info->page_size - 4)) {
3455 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3456 }
3457 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003458 page_start + j,
3459 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003460 cmd_flags);
3461
3462 if (rc)
3463 goto nvram_write_end;
3464
3465 cmd_flags = 0;
3466 }
3467 }
3468
3469 /* Enable writes to flash interface (unlock write-protect) */
3470 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3471 goto nvram_write_end;
3472
Michael Chanb6016b72005-05-26 13:03:09 -07003473 /* Loop to write back the buffer data from page_start to
3474 * data_start */
3475 i = 0;
3476 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003477 /* Erase the page */
3478 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3479 goto nvram_write_end;
3480
3481 /* Re-enable the write again for the actual write */
3482 bnx2_enable_nvram_write(bp);
3483
Michael Chanb6016b72005-05-26 13:03:09 -07003484 for (addr = page_start; addr < data_start;
3485 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003486
Michael Chanb6016b72005-05-26 13:03:09 -07003487 rc = bnx2_nvram_write_dword(bp, addr,
3488 &flash_buffer[i], cmd_flags);
3489
3490 if (rc != 0)
3491 goto nvram_write_end;
3492
3493 cmd_flags = 0;
3494 }
3495 }
3496
3497 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003498 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003499 if ((addr == page_end - 4) ||
3500 ((bp->flash_info->buffered) &&
3501 (addr == data_end - 4))) {
3502
3503 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3504 }
3505 rc = bnx2_nvram_write_dword(bp, addr, buf,
3506 cmd_flags);
3507
3508 if (rc != 0)
3509 goto nvram_write_end;
3510
3511 cmd_flags = 0;
3512 buf += 4;
3513 }
3514
3515 /* Loop to write back the buffer data from data_end
3516 * to page_end */
3517 if (bp->flash_info->buffered == 0) {
3518 for (addr = data_end; addr < page_end;
3519 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003520
Michael Chanb6016b72005-05-26 13:03:09 -07003521 if (addr == page_end-4) {
3522 cmd_flags = BNX2_NVM_COMMAND_LAST;
3523 }
3524 rc = bnx2_nvram_write_dword(bp, addr,
3525 &flash_buffer[i], cmd_flags);
3526
3527 if (rc != 0)
3528 goto nvram_write_end;
3529
3530 cmd_flags = 0;
3531 }
3532 }
3533
3534 /* Disable writes to flash interface (lock write-protect) */
3535 bnx2_disable_nvram_write(bp);
3536
3537 /* Disable access to flash interface */
3538 bnx2_disable_nvram_access(bp);
3539 bnx2_release_nvram_lock(bp);
3540
3541 /* Increment written */
3542 written += data_end - data_start;
3543 }
3544
3545nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003546 kfree(flash_buffer);
3547 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003548 return rc;
3549}
3550
3551static int
3552bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3553{
3554 u32 val;
3555 int i, rc = 0;
3556
3557 /* Wait for the current PCI transaction to complete before
3558 * issuing a reset. */
3559 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3560 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3561 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3562 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3563 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3564 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3565 udelay(5);
3566
Michael Chanb090ae22006-01-23 16:07:10 -08003567 /* Wait for the firmware to tell us it is ok to issue a reset. */
3568 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3569
Michael Chanb6016b72005-05-26 13:03:09 -07003570 /* Deposit a driver reset signature so the firmware knows that
3571 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003572 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003573 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3574
Michael Chanb6016b72005-05-26 13:03:09 -07003575 /* Do a dummy read to force the chip to complete all current transaction
3576 * before we issue a reset. */
3577 val = REG_RD(bp, BNX2_MISC_ID);
3578
Michael Chan234754d2006-11-19 14:11:41 -08003579 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3580 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3581 REG_RD(bp, BNX2_MISC_COMMAND);
3582 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003583
Michael Chan234754d2006-11-19 14:11:41 -08003584 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3585 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003586
Michael Chan234754d2006-11-19 14:11:41 -08003587 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003588
Michael Chan234754d2006-11-19 14:11:41 -08003589 } else {
3590 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3591 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3592 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3593
3594 /* Chip reset. */
3595 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3596
3597 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3598 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3599 current->state = TASK_UNINTERRUPTIBLE;
3600 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003601 }
Michael Chanb6016b72005-05-26 13:03:09 -07003602
Michael Chan234754d2006-11-19 14:11:41 -08003603 /* Reset takes approximate 30 usec */
3604 for (i = 0; i < 10; i++) {
3605 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3606 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3607 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3608 break;
3609 udelay(10);
3610 }
3611
3612 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3613 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3614 printk(KERN_ERR PFX "Chip reset did not complete\n");
3615 return -EBUSY;
3616 }
Michael Chanb6016b72005-05-26 13:03:09 -07003617 }
3618
3619 /* Make sure byte swapping is properly configured. */
3620 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3621 if (val != 0x01020304) {
3622 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3623 return -ENODEV;
3624 }
3625
Michael Chanb6016b72005-05-26 13:03:09 -07003626 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003627 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3628 if (rc)
3629 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003630
3631 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3632 /* Adjust the voltage regular to two steps lower. The default
3633 * of this register is 0x0000000e. */
3634 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3635
3636 /* Remove bad rbuf memory from the free pool. */
3637 rc = bnx2_alloc_bad_rbuf(bp);
3638 }
3639
3640 return rc;
3641}
3642
3643static int
3644bnx2_init_chip(struct bnx2 *bp)
3645{
3646 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003647 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003648
3649 /* Make sure the interrupt is not active. */
3650 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3651
3652 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3653 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3654#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003655 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003656#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003657 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003658 DMA_READ_CHANS << 12 |
3659 DMA_WRITE_CHANS << 16;
3660
3661 val |= (0x2 << 20) | (1 << 11);
3662
Michael Chandda1e392006-01-23 16:08:14 -08003663 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003664 val |= (1 << 23);
3665
3666 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3667 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3668 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3669
3670 REG_WR(bp, BNX2_DMA_CONFIG, val);
3671
3672 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3673 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3674 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3675 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3676 }
3677
3678 if (bp->flags & PCIX_FLAG) {
3679 u16 val16;
3680
3681 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3682 &val16);
3683 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3684 val16 & ~PCI_X_CMD_ERO);
3685 }
3686
3687 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3688 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3689 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3690 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3691
3692 /* Initialize context mapping and zero out the quick contexts. The
3693 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003694 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3695 bnx2_init_5709_context(bp);
3696 else
3697 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003698
Michael Chanfba9fe92006-06-12 22:21:25 -07003699 if ((rc = bnx2_init_cpus(bp)) != 0)
3700 return rc;
3701
Michael Chanb6016b72005-05-26 13:03:09 -07003702 bnx2_init_nvram(bp);
3703
3704 bnx2_set_mac_addr(bp);
3705
3706 val = REG_RD(bp, BNX2_MQ_CONFIG);
3707 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3708 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003709 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3710 val |= BNX2_MQ_CONFIG_HALT_DIS;
3711
Michael Chanb6016b72005-05-26 13:03:09 -07003712 REG_WR(bp, BNX2_MQ_CONFIG, val);
3713
3714 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3715 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3716 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3717
3718 val = (BCM_PAGE_BITS - 8) << 24;
3719 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3720
3721 /* Configure page size. */
3722 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3723 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3724 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3725 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3726
3727 val = bp->mac_addr[0] +
3728 (bp->mac_addr[1] << 8) +
3729 (bp->mac_addr[2] << 16) +
3730 bp->mac_addr[3] +
3731 (bp->mac_addr[4] << 8) +
3732 (bp->mac_addr[5] << 16);
3733 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3734
3735 /* Program the MTU. Also include 4 bytes for CRC32. */
3736 val = bp->dev->mtu + ETH_HLEN + 4;
3737 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3738 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3739 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3740
3741 bp->last_status_idx = 0;
3742 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3743
3744 /* Set up how to generate a link change interrupt. */
3745 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3746
3747 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3748 (u64) bp->status_blk_mapping & 0xffffffff);
3749 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3750
3751 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3752 (u64) bp->stats_blk_mapping & 0xffffffff);
3753 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3754 (u64) bp->stats_blk_mapping >> 32);
3755
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003756 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003757 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3758
3759 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3760 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3761
3762 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3763 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3764
3765 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3766
3767 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3768
3769 REG_WR(bp, BNX2_HC_COM_TICKS,
3770 (bp->com_ticks_int << 16) | bp->com_ticks);
3771
3772 REG_WR(bp, BNX2_HC_CMD_TICKS,
3773 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3774
3775 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3776 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3777
3778 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07003779 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003780 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07003781 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3782 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003783 }
3784
Michael Chan8e6a72c2007-05-03 13:24:48 -07003785 if (bp->flags & ONE_SHOT_MSI_FLAG)
3786 val |= BNX2_HC_CONFIG_ONE_SHOT;
3787
3788 REG_WR(bp, BNX2_HC_CONFIG, val);
3789
Michael Chanb6016b72005-05-26 13:03:09 -07003790 /* Clear internal stats counters. */
3791 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3792
Michael Chanda3e4fb2007-05-03 13:24:23 -07003793 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07003794
Michael Chane29054f2006-01-23 16:06:06 -08003795 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3796 BNX2_PORT_FEATURE_ASF_ENABLED)
3797 bp->flags |= ASF_ENABLE_FLAG;
3798
Michael Chanb6016b72005-05-26 13:03:09 -07003799 /* Initialize the receive filter. */
3800 bnx2_set_rx_mode(bp->dev);
3801
Michael Chanb090ae22006-01-23 16:07:10 -08003802 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3803 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003804
3805 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3806 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3807
3808 udelay(20);
3809
Michael Chanbf5295b2006-03-23 01:11:56 -08003810 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3811
Michael Chanb090ae22006-01-23 16:07:10 -08003812 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003813}
3814
Michael Chan59b47d82006-11-19 14:10:45 -08003815static void
3816bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3817{
3818 u32 val, offset0, offset1, offset2, offset3;
3819
3820 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3821 offset0 = BNX2_L2CTX_TYPE_XI;
3822 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3823 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3824 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3825 } else {
3826 offset0 = BNX2_L2CTX_TYPE;
3827 offset1 = BNX2_L2CTX_CMD_TYPE;
3828 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3829 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3830 }
3831 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3832 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3833
3834 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3835 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3836
3837 val = (u64) bp->tx_desc_mapping >> 32;
3838 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3839
3840 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3841 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3842}
Michael Chanb6016b72005-05-26 13:03:09 -07003843
3844static void
3845bnx2_init_tx_ring(struct bnx2 *bp)
3846{
3847 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003848 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003849
Michael Chan2f8af122006-08-15 01:39:10 -07003850 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3851
Michael Chanb6016b72005-05-26 13:03:09 -07003852 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003853
Michael Chanb6016b72005-05-26 13:03:09 -07003854 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3855 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3856
3857 bp->tx_prod = 0;
3858 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003859 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003860 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003861
Michael Chan59b47d82006-11-19 14:10:45 -08003862 cid = TX_CID;
3863 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3864 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003865
Michael Chan59b47d82006-11-19 14:10:45 -08003866 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003867}
3868
3869static void
3870bnx2_init_rx_ring(struct bnx2 *bp)
3871{
3872 struct rx_bd *rxbd;
3873 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003874 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003875 u32 val;
3876
3877 /* 8 for CRC and VLAN */
3878 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003879 /* hw alignment */
3880 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003881
3882 ring_prod = prod = bp->rx_prod = 0;
3883 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003884 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003885 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003886
Michael Chan13daffa2006-03-20 17:49:20 -08003887 for (i = 0; i < bp->rx_max_ring; i++) {
3888 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003889
Michael Chan13daffa2006-03-20 17:49:20 -08003890 rxbd = &bp->rx_desc_ring[i][0];
3891 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3892 rxbd->rx_bd_len = bp->rx_buf_use_size;
3893 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3894 }
3895 if (i == (bp->rx_max_ring - 1))
3896 j = 0;
3897 else
3898 j = i + 1;
3899 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3900 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3901 0xffffffff;
3902 }
Michael Chanb6016b72005-05-26 13:03:09 -07003903
3904 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3905 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3906 val |= 0x02 << 8;
3907 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3908
Michael Chan13daffa2006-03-20 17:49:20 -08003909 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003910 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3911
Michael Chan13daffa2006-03-20 17:49:20 -08003912 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003913 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3914
Michael Chan236b6392006-03-20 17:49:02 -08003915 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003916 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3917 break;
3918 }
3919 prod = NEXT_RX_BD(prod);
3920 ring_prod = RX_RING_IDX(prod);
3921 }
3922 bp->rx_prod = prod;
3923
3924 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3925
3926 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3927}
3928
3929static void
Michael Chan13daffa2006-03-20 17:49:20 -08003930bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3931{
3932 u32 num_rings, max;
3933
3934 bp->rx_ring_size = size;
3935 num_rings = 1;
3936 while (size > MAX_RX_DESC_CNT) {
3937 size -= MAX_RX_DESC_CNT;
3938 num_rings++;
3939 }
3940 /* round to next power of 2 */
3941 max = MAX_RX_RINGS;
3942 while ((max & num_rings) == 0)
3943 max >>= 1;
3944
3945 if (num_rings != max)
3946 max <<= 1;
3947
3948 bp->rx_max_ring = max;
3949 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3950}
3951
3952static void
Michael Chanb6016b72005-05-26 13:03:09 -07003953bnx2_free_tx_skbs(struct bnx2 *bp)
3954{
3955 int i;
3956
3957 if (bp->tx_buf_ring == NULL)
3958 return;
3959
3960 for (i = 0; i < TX_DESC_CNT; ) {
3961 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3962 struct sk_buff *skb = tx_buf->skb;
3963 int j, last;
3964
3965 if (skb == NULL) {
3966 i++;
3967 continue;
3968 }
3969
3970 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3971 skb_headlen(skb), PCI_DMA_TODEVICE);
3972
3973 tx_buf->skb = NULL;
3974
3975 last = skb_shinfo(skb)->nr_frags;
3976 for (j = 0; j < last; j++) {
3977 tx_buf = &bp->tx_buf_ring[i + j + 1];
3978 pci_unmap_page(bp->pdev,
3979 pci_unmap_addr(tx_buf, mapping),
3980 skb_shinfo(skb)->frags[j].size,
3981 PCI_DMA_TODEVICE);
3982 }
Michael Chan745720e2006-06-29 12:37:41 -07003983 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003984 i += j + 1;
3985 }
3986
3987}
3988
3989static void
3990bnx2_free_rx_skbs(struct bnx2 *bp)
3991{
3992 int i;
3993
3994 if (bp->rx_buf_ring == NULL)
3995 return;
3996
Michael Chan13daffa2006-03-20 17:49:20 -08003997 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003998 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3999 struct sk_buff *skb = rx_buf->skb;
4000
Michael Chan05d0f1c2005-11-04 08:53:48 -08004001 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004002 continue;
4003
4004 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4005 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4006
4007 rx_buf->skb = NULL;
4008
Michael Chan745720e2006-06-29 12:37:41 -07004009 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004010 }
4011}
4012
4013static void
4014bnx2_free_skbs(struct bnx2 *bp)
4015{
4016 bnx2_free_tx_skbs(bp);
4017 bnx2_free_rx_skbs(bp);
4018}
4019
4020static int
4021bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4022{
4023 int rc;
4024
4025 rc = bnx2_reset_chip(bp, reset_code);
4026 bnx2_free_skbs(bp);
4027 if (rc)
4028 return rc;
4029
Michael Chanfba9fe92006-06-12 22:21:25 -07004030 if ((rc = bnx2_init_chip(bp)) != 0)
4031 return rc;
4032
Michael Chanb6016b72005-05-26 13:03:09 -07004033 bnx2_init_tx_ring(bp);
4034 bnx2_init_rx_ring(bp);
4035 return 0;
4036}
4037
4038static int
4039bnx2_init_nic(struct bnx2 *bp)
4040{
4041 int rc;
4042
4043 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4044 return rc;
4045
Michael Chan80be4432006-11-19 14:07:28 -08004046 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004047 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08004048 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004049 bnx2_set_link(bp);
4050 return 0;
4051}
4052
4053static int
4054bnx2_test_registers(struct bnx2 *bp)
4055{
4056 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004057 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004058 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004059 u16 offset;
4060 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004061#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004062 u32 rw_mask;
4063 u32 ro_mask;
4064 } reg_tbl[] = {
4065 { 0x006c, 0, 0x00000000, 0x0000003f },
4066 { 0x0090, 0, 0xffffffff, 0x00000000 },
4067 { 0x0094, 0, 0x00000000, 0x00000000 },
4068
Michael Chan5bae30c2007-05-03 13:18:46 -07004069 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4070 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4071 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4072 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4073 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4074 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4075 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4076 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4077 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004078
Michael Chan5bae30c2007-05-03 13:18:46 -07004079 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4080 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4081 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4082 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4083 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4084 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004085
Michael Chan5bae30c2007-05-03 13:18:46 -07004086 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4087 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4088 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004089
4090 { 0x1000, 0, 0x00000000, 0x00000001 },
4091 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004092
4093 { 0x1408, 0, 0x01c00800, 0x00000000 },
4094 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4095 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004096 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004097 { 0x14b0, 0, 0x00000002, 0x00000001 },
4098 { 0x14b8, 0, 0x00000000, 0x00000000 },
4099 { 0x14c0, 0, 0x00000000, 0x00000009 },
4100 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4101 { 0x14cc, 0, 0x00000000, 0x00000001 },
4102 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004103
4104 { 0x1800, 0, 0x00000000, 0x00000001 },
4105 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004106
4107 { 0x2800, 0, 0x00000000, 0x00000001 },
4108 { 0x2804, 0, 0x00000000, 0x00003f01 },
4109 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4110 { 0x2810, 0, 0xffff0000, 0x00000000 },
4111 { 0x2814, 0, 0xffff0000, 0x00000000 },
4112 { 0x2818, 0, 0xffff0000, 0x00000000 },
4113 { 0x281c, 0, 0xffff0000, 0x00000000 },
4114 { 0x2834, 0, 0xffffffff, 0x00000000 },
4115 { 0x2840, 0, 0x00000000, 0xffffffff },
4116 { 0x2844, 0, 0x00000000, 0xffffffff },
4117 { 0x2848, 0, 0xffffffff, 0x00000000 },
4118 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4119
4120 { 0x2c00, 0, 0x00000000, 0x00000011 },
4121 { 0x2c04, 0, 0x00000000, 0x00030007 },
4122
Michael Chanb6016b72005-05-26 13:03:09 -07004123 { 0x3c00, 0, 0x00000000, 0x00000001 },
4124 { 0x3c04, 0, 0x00000000, 0x00070000 },
4125 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4126 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4127 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4128 { 0x3c14, 0, 0x00000000, 0xffffffff },
4129 { 0x3c18, 0, 0x00000000, 0xffffffff },
4130 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4131 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004132
4133 { 0x5004, 0, 0x00000000, 0x0000007f },
4134 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004135
Michael Chanb6016b72005-05-26 13:03:09 -07004136 { 0x5c00, 0, 0x00000000, 0x00000001 },
4137 { 0x5c04, 0, 0x00000000, 0x0003000f },
4138 { 0x5c08, 0, 0x00000003, 0x00000000 },
4139 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4140 { 0x5c10, 0, 0x00000000, 0xffffffff },
4141 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4142 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4143 { 0x5c88, 0, 0x00000000, 0x00077373 },
4144 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4145
4146 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4147 { 0x680c, 0, 0xffffffff, 0x00000000 },
4148 { 0x6810, 0, 0xffffffff, 0x00000000 },
4149 { 0x6814, 0, 0xffffffff, 0x00000000 },
4150 { 0x6818, 0, 0xffffffff, 0x00000000 },
4151 { 0x681c, 0, 0xffffffff, 0x00000000 },
4152 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4153 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4154 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4155 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4156 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4157 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4158 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4159 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4160 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4161 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4162 { 0x684c, 0, 0xffffffff, 0x00000000 },
4163 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4164 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4165 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4166 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4167 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4168 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4169
4170 { 0xffff, 0, 0x00000000, 0x00000000 },
4171 };
4172
4173 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004174 is_5709 = 0;
4175 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4176 is_5709 = 1;
4177
Michael Chanb6016b72005-05-26 13:03:09 -07004178 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4179 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004180 u16 flags = reg_tbl[i].flags;
4181
4182 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4183 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004184
4185 offset = (u32) reg_tbl[i].offset;
4186 rw_mask = reg_tbl[i].rw_mask;
4187 ro_mask = reg_tbl[i].ro_mask;
4188
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004189 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004190
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004191 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004192
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004193 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004194 if ((val & rw_mask) != 0) {
4195 goto reg_test_err;
4196 }
4197
4198 if ((val & ro_mask) != (save_val & ro_mask)) {
4199 goto reg_test_err;
4200 }
4201
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004202 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004203
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004204 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004205 if ((val & rw_mask) != rw_mask) {
4206 goto reg_test_err;
4207 }
4208
4209 if ((val & ro_mask) != (save_val & ro_mask)) {
4210 goto reg_test_err;
4211 }
4212
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004213 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004214 continue;
4215
4216reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004217 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004218 ret = -ENODEV;
4219 break;
4220 }
4221 return ret;
4222}
4223
4224static int
4225bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4226{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004227 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004228 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4229 int i;
4230
4231 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4232 u32 offset;
4233
4234 for (offset = 0; offset < size; offset += 4) {
4235
4236 REG_WR_IND(bp, start + offset, test_pattern[i]);
4237
4238 if (REG_RD_IND(bp, start + offset) !=
4239 test_pattern[i]) {
4240 return -ENODEV;
4241 }
4242 }
4243 }
4244 return 0;
4245}
4246
4247static int
4248bnx2_test_memory(struct bnx2 *bp)
4249{
4250 int ret = 0;
4251 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004252 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004253 u32 offset;
4254 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004255 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004256 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004257 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004258 { 0xe0000, 0x4000 },
4259 { 0x120000, 0x4000 },
4260 { 0x1a0000, 0x4000 },
4261 { 0x160000, 0x4000 },
4262 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004263 },
4264 mem_tbl_5709[] = {
4265 { 0x60000, 0x4000 },
4266 { 0xa0000, 0x3000 },
4267 { 0xe0000, 0x4000 },
4268 { 0x120000, 0x4000 },
4269 { 0x1a0000, 0x4000 },
4270 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004271 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004272 struct mem_entry *mem_tbl;
4273
4274 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4275 mem_tbl = mem_tbl_5709;
4276 else
4277 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004278
4279 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4280 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4281 mem_tbl[i].len)) != 0) {
4282 return ret;
4283 }
4284 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004285
Michael Chanb6016b72005-05-26 13:03:09 -07004286 return ret;
4287}
4288
Michael Chanbc5a0692006-01-23 16:13:22 -08004289#define BNX2_MAC_LOOPBACK 0
4290#define BNX2_PHY_LOOPBACK 1
4291
Michael Chanb6016b72005-05-26 13:03:09 -07004292static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004293bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004294{
4295 unsigned int pkt_size, num_pkts, i;
4296 struct sk_buff *skb, *rx_skb;
4297 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004298 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004299 dma_addr_t map;
4300 struct tx_bd *txbd;
4301 struct sw_bd *rx_buf;
4302 struct l2_fhdr *rx_hdr;
4303 int ret = -ENODEV;
4304
Michael Chanbc5a0692006-01-23 16:13:22 -08004305 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4306 bp->loopback = MAC_LOOPBACK;
4307 bnx2_set_mac_loopback(bp);
4308 }
4309 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004310 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004311 bnx2_set_phy_loopback(bp);
4312 }
4313 else
4314 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004315
4316 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004317 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004318 if (!skb)
4319 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004320 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004321 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004322 memset(packet + 6, 0x0, 8);
4323 for (i = 14; i < pkt_size; i++)
4324 packet[i] = (unsigned char) (i & 0xff);
4325
4326 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4327 PCI_DMA_TODEVICE);
4328
Michael Chanbf5295b2006-03-23 01:11:56 -08004329 REG_WR(bp, BNX2_HC_COMMAND,
4330 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4331
Michael Chanb6016b72005-05-26 13:03:09 -07004332 REG_RD(bp, BNX2_HC_COMMAND);
4333
4334 udelay(5);
4335 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4336
Michael Chanb6016b72005-05-26 13:03:09 -07004337 num_pkts = 0;
4338
Michael Chanbc5a0692006-01-23 16:13:22 -08004339 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004340
4341 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4342 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4343 txbd->tx_bd_mss_nbytes = pkt_size;
4344 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4345
4346 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004347 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4348 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004349
Michael Chan234754d2006-11-19 14:11:41 -08004350 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4351 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004352
4353 udelay(100);
4354
Michael Chanbf5295b2006-03-23 01:11:56 -08004355 REG_WR(bp, BNX2_HC_COMMAND,
4356 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4357
Michael Chanb6016b72005-05-26 13:03:09 -07004358 REG_RD(bp, BNX2_HC_COMMAND);
4359
4360 udelay(5);
4361
4362 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004363 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004364
Michael Chanbc5a0692006-01-23 16:13:22 -08004365 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004366 goto loopback_test_done;
4367 }
4368
4369 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4370 if (rx_idx != rx_start_idx + num_pkts) {
4371 goto loopback_test_done;
4372 }
4373
4374 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4375 rx_skb = rx_buf->skb;
4376
4377 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4378 skb_reserve(rx_skb, bp->rx_offset);
4379
4380 pci_dma_sync_single_for_cpu(bp->pdev,
4381 pci_unmap_addr(rx_buf, mapping),
4382 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4383
Michael Chanade2bfe2006-01-23 16:09:51 -08004384 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004385 (L2_FHDR_ERRORS_BAD_CRC |
4386 L2_FHDR_ERRORS_PHY_DECODE |
4387 L2_FHDR_ERRORS_ALIGNMENT |
4388 L2_FHDR_ERRORS_TOO_SHORT |
4389 L2_FHDR_ERRORS_GIANT_FRAME)) {
4390
4391 goto loopback_test_done;
4392 }
4393
4394 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4395 goto loopback_test_done;
4396 }
4397
4398 for (i = 14; i < pkt_size; i++) {
4399 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4400 goto loopback_test_done;
4401 }
4402 }
4403
4404 ret = 0;
4405
4406loopback_test_done:
4407 bp->loopback = 0;
4408 return ret;
4409}
4410
Michael Chanbc5a0692006-01-23 16:13:22 -08004411#define BNX2_MAC_LOOPBACK_FAILED 1
4412#define BNX2_PHY_LOOPBACK_FAILED 2
4413#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4414 BNX2_PHY_LOOPBACK_FAILED)
4415
4416static int
4417bnx2_test_loopback(struct bnx2 *bp)
4418{
4419 int rc = 0;
4420
4421 if (!netif_running(bp->dev))
4422 return BNX2_LOOPBACK_FAILED;
4423
4424 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4425 spin_lock_bh(&bp->phy_lock);
4426 bnx2_init_phy(bp);
4427 spin_unlock_bh(&bp->phy_lock);
4428 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4429 rc |= BNX2_MAC_LOOPBACK_FAILED;
4430 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4431 rc |= BNX2_PHY_LOOPBACK_FAILED;
4432 return rc;
4433}
4434
Michael Chanb6016b72005-05-26 13:03:09 -07004435#define NVRAM_SIZE 0x200
4436#define CRC32_RESIDUAL 0xdebb20e3
4437
4438static int
4439bnx2_test_nvram(struct bnx2 *bp)
4440{
4441 u32 buf[NVRAM_SIZE / 4];
4442 u8 *data = (u8 *) buf;
4443 int rc = 0;
4444 u32 magic, csum;
4445
4446 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4447 goto test_nvram_done;
4448
4449 magic = be32_to_cpu(buf[0]);
4450 if (magic != 0x669955aa) {
4451 rc = -ENODEV;
4452 goto test_nvram_done;
4453 }
4454
4455 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4456 goto test_nvram_done;
4457
4458 csum = ether_crc_le(0x100, data);
4459 if (csum != CRC32_RESIDUAL) {
4460 rc = -ENODEV;
4461 goto test_nvram_done;
4462 }
4463
4464 csum = ether_crc_le(0x100, data + 0x100);
4465 if (csum != CRC32_RESIDUAL) {
4466 rc = -ENODEV;
4467 }
4468
4469test_nvram_done:
4470 return rc;
4471}
4472
4473static int
4474bnx2_test_link(struct bnx2 *bp)
4475{
4476 u32 bmsr;
4477
Michael Chanc770a652005-08-25 15:38:39 -07004478 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004479 bnx2_enable_bmsr1(bp);
4480 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4481 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4482 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004483 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004484
Michael Chanb6016b72005-05-26 13:03:09 -07004485 if (bmsr & BMSR_LSTATUS) {
4486 return 0;
4487 }
4488 return -ENODEV;
4489}
4490
4491static int
4492bnx2_test_intr(struct bnx2 *bp)
4493{
4494 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004495 u16 status_idx;
4496
4497 if (!netif_running(bp->dev))
4498 return -ENODEV;
4499
4500 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4501
4502 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004503 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004504 REG_RD(bp, BNX2_HC_COMMAND);
4505
4506 for (i = 0; i < 10; i++) {
4507 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4508 status_idx) {
4509
4510 break;
4511 }
4512
4513 msleep_interruptible(10);
4514 }
4515 if (i < 10)
4516 return 0;
4517
4518 return -ENODEV;
4519}
4520
4521static void
Michael Chan48b01e22006-11-19 14:08:00 -08004522bnx2_5706_serdes_timer(struct bnx2 *bp)
4523{
4524 spin_lock(&bp->phy_lock);
4525 if (bp->serdes_an_pending)
4526 bp->serdes_an_pending--;
4527 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4528 u32 bmcr;
4529
4530 bp->current_interval = bp->timer_interval;
4531
Michael Chanca58c3a2007-05-03 13:22:52 -07004532 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004533
4534 if (bmcr & BMCR_ANENABLE) {
4535 u32 phy1, phy2;
4536
4537 bnx2_write_phy(bp, 0x1c, 0x7c00);
4538 bnx2_read_phy(bp, 0x1c, &phy1);
4539
4540 bnx2_write_phy(bp, 0x17, 0x0f01);
4541 bnx2_read_phy(bp, 0x15, &phy2);
4542 bnx2_write_phy(bp, 0x17, 0x0f01);
4543 bnx2_read_phy(bp, 0x15, &phy2);
4544
4545 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4546 !(phy2 & 0x20)) { /* no CONFIG */
4547
4548 bmcr &= ~BMCR_ANENABLE;
4549 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004550 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004551 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4552 }
4553 }
4554 }
4555 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4556 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4557 u32 phy2;
4558
4559 bnx2_write_phy(bp, 0x17, 0x0f01);
4560 bnx2_read_phy(bp, 0x15, &phy2);
4561 if (phy2 & 0x20) {
4562 u32 bmcr;
4563
Michael Chanca58c3a2007-05-03 13:22:52 -07004564 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004565 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004566 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004567
4568 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4569 }
4570 } else
4571 bp->current_interval = bp->timer_interval;
4572
4573 spin_unlock(&bp->phy_lock);
4574}
4575
4576static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004577bnx2_5708_serdes_timer(struct bnx2 *bp)
4578{
4579 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4580 bp->serdes_an_pending = 0;
4581 return;
4582 }
4583
4584 spin_lock(&bp->phy_lock);
4585 if (bp->serdes_an_pending)
4586 bp->serdes_an_pending--;
4587 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4588 u32 bmcr;
4589
Michael Chanca58c3a2007-05-03 13:22:52 -07004590 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004591 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004592 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004593 bp->current_interval = SERDES_FORCED_TIMEOUT;
4594 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004595 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004596 bp->serdes_an_pending = 2;
4597 bp->current_interval = bp->timer_interval;
4598 }
4599
4600 } else
4601 bp->current_interval = bp->timer_interval;
4602
4603 spin_unlock(&bp->phy_lock);
4604}
4605
4606static void
Michael Chanb6016b72005-05-26 13:03:09 -07004607bnx2_timer(unsigned long data)
4608{
4609 struct bnx2 *bp = (struct bnx2 *) data;
4610 u32 msg;
4611
Michael Chancd339a02005-08-25 15:35:24 -07004612 if (!netif_running(bp->dev))
4613 return;
4614
Michael Chanb6016b72005-05-26 13:03:09 -07004615 if (atomic_read(&bp->intr_sem) != 0)
4616 goto bnx2_restart_timer;
4617
4618 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004619 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004620
Michael Chancea94db2006-06-12 22:16:13 -07004621 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4622
Michael Chanf8dd0642006-11-19 14:08:29 -08004623 if (bp->phy_flags & PHY_SERDES_FLAG) {
4624 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4625 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004626 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004627 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004628 }
4629
4630bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004631 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004632}
4633
Michael Chan8e6a72c2007-05-03 13:24:48 -07004634static int
4635bnx2_request_irq(struct bnx2 *bp)
4636{
4637 struct net_device *dev = bp->dev;
4638 int rc = 0;
4639
4640 if (bp->flags & USING_MSI_FLAG) {
4641 irq_handler_t fn = bnx2_msi;
4642
4643 if (bp->flags & ONE_SHOT_MSI_FLAG)
4644 fn = bnx2_msi_1shot;
4645
4646 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4647 } else
4648 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4649 IRQF_SHARED, dev->name, dev);
4650 return rc;
4651}
4652
4653static void
4654bnx2_free_irq(struct bnx2 *bp)
4655{
4656 struct net_device *dev = bp->dev;
4657
4658 if (bp->flags & USING_MSI_FLAG) {
4659 free_irq(bp->pdev->irq, dev);
4660 pci_disable_msi(bp->pdev);
4661 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4662 } else
4663 free_irq(bp->pdev->irq, dev);
4664}
4665
Michael Chanb6016b72005-05-26 13:03:09 -07004666/* Called with rtnl_lock */
4667static int
4668bnx2_open(struct net_device *dev)
4669{
Michael Chan972ec0d2006-01-23 16:12:43 -08004670 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004671 int rc;
4672
Michael Chan1b2f9222007-05-03 13:20:19 -07004673 netif_carrier_off(dev);
4674
Pavel Machek829ca9a2005-09-03 15:56:56 -07004675 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004676 bnx2_disable_int(bp);
4677
4678 rc = bnx2_alloc_mem(bp);
4679 if (rc)
4680 return rc;
4681
Michael Chan8e6a72c2007-05-03 13:24:48 -07004682 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07004683 if (pci_enable_msi(bp->pdev) == 0) {
4684 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07004685 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4686 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07004687 }
4688 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07004689 rc = bnx2_request_irq(bp);
4690
Michael Chanb6016b72005-05-26 13:03:09 -07004691 if (rc) {
4692 bnx2_free_mem(bp);
4693 return rc;
4694 }
4695
4696 rc = bnx2_init_nic(bp);
4697
4698 if (rc) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004699 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004700 bnx2_free_skbs(bp);
4701 bnx2_free_mem(bp);
4702 return rc;
4703 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004704
Michael Chancd339a02005-08-25 15:35:24 -07004705 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004706
4707 atomic_set(&bp->intr_sem, 0);
4708
4709 bnx2_enable_int(bp);
4710
4711 if (bp->flags & USING_MSI_FLAG) {
4712 /* Test MSI to make sure it is working
4713 * If MSI test fails, go back to INTx mode
4714 */
4715 if (bnx2_test_intr(bp) != 0) {
4716 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4717 " using MSI, switching to INTx mode. Please"
4718 " report this failure to the PCI maintainer"
4719 " and include system chipset information.\n",
4720 bp->dev->name);
4721
4722 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07004723 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004724
4725 rc = bnx2_init_nic(bp);
4726
Michael Chan8e6a72c2007-05-03 13:24:48 -07004727 if (!rc)
4728 rc = bnx2_request_irq(bp);
4729
Michael Chanb6016b72005-05-26 13:03:09 -07004730 if (rc) {
4731 bnx2_free_skbs(bp);
4732 bnx2_free_mem(bp);
4733 del_timer_sync(&bp->timer);
4734 return rc;
4735 }
4736 bnx2_enable_int(bp);
4737 }
4738 }
4739 if (bp->flags & USING_MSI_FLAG) {
4740 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4741 }
4742
4743 netif_start_queue(dev);
4744
4745 return 0;
4746}
4747
4748static void
David Howellsc4028952006-11-22 14:57:56 +00004749bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004750{
David Howellsc4028952006-11-22 14:57:56 +00004751 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004752
Michael Chanafdc08b2005-08-25 15:34:29 -07004753 if (!netif_running(bp->dev))
4754 return;
4755
4756 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004757 bnx2_netif_stop(bp);
4758
4759 bnx2_init_nic(bp);
4760
4761 atomic_set(&bp->intr_sem, 1);
4762 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004763 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004764}
4765
4766static void
4767bnx2_tx_timeout(struct net_device *dev)
4768{
Michael Chan972ec0d2006-01-23 16:12:43 -08004769 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004770
4771 /* This allows the netif to be shutdown gracefully before resetting */
4772 schedule_work(&bp->reset_task);
4773}
4774
4775#ifdef BCM_VLAN
4776/* Called with rtnl_lock */
4777static void
4778bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4779{
Michael Chan972ec0d2006-01-23 16:12:43 -08004780 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004781
4782 bnx2_netif_stop(bp);
4783
4784 bp->vlgrp = vlgrp;
4785 bnx2_set_rx_mode(dev);
4786
4787 bnx2_netif_start(bp);
4788}
4789
4790/* Called with rtnl_lock */
4791static void
4792bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4793{
Michael Chan972ec0d2006-01-23 16:12:43 -08004794 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004795
4796 bnx2_netif_stop(bp);
Dan Aloni5c15bde2007-03-02 20:44:51 -08004797 vlan_group_set_device(bp->vlgrp, vid, NULL);
Michael Chanb6016b72005-05-26 13:03:09 -07004798 bnx2_set_rx_mode(dev);
4799
4800 bnx2_netif_start(bp);
4801}
4802#endif
4803
Herbert Xu932ff272006-06-09 12:20:56 -07004804/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004805 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4806 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004807 */
4808static int
4809bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4810{
Michael Chan972ec0d2006-01-23 16:12:43 -08004811 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004812 dma_addr_t mapping;
4813 struct tx_bd *txbd;
4814 struct sw_bd *tx_buf;
4815 u32 len, vlan_tag_flags, last_frag, mss;
4816 u16 prod, ring_prod;
4817 int i;
4818
Michael Chane89bbf12005-08-25 15:36:58 -07004819 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004820 netif_stop_queue(dev);
4821 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4822 dev->name);
4823
4824 return NETDEV_TX_BUSY;
4825 }
4826 len = skb_headlen(skb);
4827 prod = bp->tx_prod;
4828 ring_prod = TX_RING_IDX(prod);
4829
4830 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004831 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004832 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4833 }
4834
4835 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4836 vlan_tag_flags |=
4837 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4838 }
Herbert Xu79671682006-06-22 02:40:14 -07004839 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004840 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4841 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004842 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004843
Michael Chanb6016b72005-05-26 13:03:09 -07004844 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4845
Michael Chan4666f872007-05-03 13:22:28 -07004846 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004847
Michael Chan4666f872007-05-03 13:22:28 -07004848 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4849 u32 tcp_off = skb_transport_offset(skb) -
4850 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004851
Michael Chan4666f872007-05-03 13:22:28 -07004852 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4853 TX_BD_FLAGS_SW_FLAGS;
4854 if (likely(tcp_off == 0))
4855 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4856 else {
4857 tcp_off >>= 3;
4858 vlan_tag_flags |= ((tcp_off & 0x3) <<
4859 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4860 ((tcp_off & 0x10) <<
4861 TX_BD_FLAGS_TCP6_OFF4_SHL);
4862 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4863 }
4864 } else {
4865 if (skb_header_cloned(skb) &&
4866 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4867 dev_kfree_skb(skb);
4868 return NETDEV_TX_OK;
4869 }
4870
4871 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4872
4873 iph = ip_hdr(skb);
4874 iph->check = 0;
4875 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4876 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4877 iph->daddr, 0,
4878 IPPROTO_TCP,
4879 0);
4880 if (tcp_opt_len || (iph->ihl > 5)) {
4881 vlan_tag_flags |= ((iph->ihl - 5) +
4882 (tcp_opt_len >> 2)) << 8;
4883 }
Michael Chanb6016b72005-05-26 13:03:09 -07004884 }
Michael Chan4666f872007-05-03 13:22:28 -07004885 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004886 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004887
4888 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004889
Michael Chanb6016b72005-05-26 13:03:09 -07004890 tx_buf = &bp->tx_buf_ring[ring_prod];
4891 tx_buf->skb = skb;
4892 pci_unmap_addr_set(tx_buf, mapping, mapping);
4893
4894 txbd = &bp->tx_desc_ring[ring_prod];
4895
4896 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4897 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4898 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4899 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4900
4901 last_frag = skb_shinfo(skb)->nr_frags;
4902
4903 for (i = 0; i < last_frag; i++) {
4904 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4905
4906 prod = NEXT_TX_BD(prod);
4907 ring_prod = TX_RING_IDX(prod);
4908 txbd = &bp->tx_desc_ring[ring_prod];
4909
4910 len = frag->size;
4911 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4912 len, PCI_DMA_TODEVICE);
4913 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4914 mapping, mapping);
4915
4916 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4917 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4918 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4919 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4920
4921 }
4922 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4923
4924 prod = NEXT_TX_BD(prod);
4925 bp->tx_prod_bseq += skb->len;
4926
Michael Chan234754d2006-11-19 14:11:41 -08004927 REG_WR16(bp, bp->tx_bidx_addr, prod);
4928 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004929
4930 mmiowb();
4931
4932 bp->tx_prod = prod;
4933 dev->trans_start = jiffies;
4934
Michael Chane89bbf12005-08-25 15:36:58 -07004935 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004936 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004937 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004938 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004939 }
4940
4941 return NETDEV_TX_OK;
4942}
4943
4944/* Called with rtnl_lock */
4945static int
4946bnx2_close(struct net_device *dev)
4947{
Michael Chan972ec0d2006-01-23 16:12:43 -08004948 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004949 u32 reset_code;
4950
Michael Chanafdc08b2005-08-25 15:34:29 -07004951 /* Calling flush_scheduled_work() may deadlock because
4952 * linkwatch_event() may be on the workqueue and it will try to get
4953 * the rtnl_lock which we are holding.
4954 */
4955 while (bp->in_reset_task)
4956 msleep(1);
4957
Michael Chanb6016b72005-05-26 13:03:09 -07004958 bnx2_netif_stop(bp);
4959 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004960 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004961 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004962 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004963 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4964 else
4965 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4966 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07004967 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004968 bnx2_free_skbs(bp);
4969 bnx2_free_mem(bp);
4970 bp->link_up = 0;
4971 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004972 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004973 return 0;
4974}
4975
4976#define GET_NET_STATS64(ctr) \
4977 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4978 (unsigned long) (ctr##_lo)
4979
4980#define GET_NET_STATS32(ctr) \
4981 (ctr##_lo)
4982
4983#if (BITS_PER_LONG == 64)
4984#define GET_NET_STATS GET_NET_STATS64
4985#else
4986#define GET_NET_STATS GET_NET_STATS32
4987#endif
4988
4989static struct net_device_stats *
4990bnx2_get_stats(struct net_device *dev)
4991{
Michael Chan972ec0d2006-01-23 16:12:43 -08004992 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004993 struct statistics_block *stats_blk = bp->stats_blk;
4994 struct net_device_stats *net_stats = &bp->net_stats;
4995
4996 if (bp->stats_blk == NULL) {
4997 return net_stats;
4998 }
4999 net_stats->rx_packets =
5000 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5001 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5002 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5003
5004 net_stats->tx_packets =
5005 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5006 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5007 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5008
5009 net_stats->rx_bytes =
5010 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5011
5012 net_stats->tx_bytes =
5013 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5014
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005015 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005016 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5017
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005018 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005019 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5020
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005021 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005022 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5023 stats_blk->stat_EtherStatsOverrsizePkts);
5024
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005025 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005026 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5027
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005028 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005029 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5030
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005031 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005032 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5033
5034 net_stats->rx_errors = net_stats->rx_length_errors +
5035 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5036 net_stats->rx_crc_errors;
5037
5038 net_stats->tx_aborted_errors =
5039 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5040 stats_blk->stat_Dot3StatsLateCollisions);
5041
Michael Chan5b0c76a2005-11-04 08:45:49 -08005042 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5043 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005044 net_stats->tx_carrier_errors = 0;
5045 else {
5046 net_stats->tx_carrier_errors =
5047 (unsigned long)
5048 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5049 }
5050
5051 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005052 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005053 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5054 +
5055 net_stats->tx_aborted_errors +
5056 net_stats->tx_carrier_errors;
5057
Michael Chancea94db2006-06-12 22:16:13 -07005058 net_stats->rx_missed_errors =
5059 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5060 stats_blk->stat_FwRxDrop);
5061
Michael Chanb6016b72005-05-26 13:03:09 -07005062 return net_stats;
5063}
5064
5065/* All ethtool functions called with rtnl_lock */
5066
5067static int
5068bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5069{
Michael Chan972ec0d2006-01-23 16:12:43 -08005070 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005071
5072 cmd->supported = SUPPORTED_Autoneg;
5073 if (bp->phy_flags & PHY_SERDES_FLAG) {
5074 cmd->supported |= SUPPORTED_1000baseT_Full |
5075 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005076 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5077 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005078
5079 cmd->port = PORT_FIBRE;
5080 }
5081 else {
5082 cmd->supported |= SUPPORTED_10baseT_Half |
5083 SUPPORTED_10baseT_Full |
5084 SUPPORTED_100baseT_Half |
5085 SUPPORTED_100baseT_Full |
5086 SUPPORTED_1000baseT_Full |
5087 SUPPORTED_TP;
5088
5089 cmd->port = PORT_TP;
5090 }
5091
5092 cmd->advertising = bp->advertising;
5093
5094 if (bp->autoneg & AUTONEG_SPEED) {
5095 cmd->autoneg = AUTONEG_ENABLE;
5096 }
5097 else {
5098 cmd->autoneg = AUTONEG_DISABLE;
5099 }
5100
5101 if (netif_carrier_ok(dev)) {
5102 cmd->speed = bp->line_speed;
5103 cmd->duplex = bp->duplex;
5104 }
5105 else {
5106 cmd->speed = -1;
5107 cmd->duplex = -1;
5108 }
5109
5110 cmd->transceiver = XCVR_INTERNAL;
5111 cmd->phy_address = bp->phy_addr;
5112
5113 return 0;
5114}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005115
Michael Chanb6016b72005-05-26 13:03:09 -07005116static int
5117bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5118{
Michael Chan972ec0d2006-01-23 16:12:43 -08005119 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005120 u8 autoneg = bp->autoneg;
5121 u8 req_duplex = bp->req_duplex;
5122 u16 req_line_speed = bp->req_line_speed;
5123 u32 advertising = bp->advertising;
5124
5125 if (cmd->autoneg == AUTONEG_ENABLE) {
5126 autoneg |= AUTONEG_SPEED;
5127
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005128 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005129
5130 /* allow advertising 1 speed */
5131 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5132 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5133 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5134 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5135
5136 if (bp->phy_flags & PHY_SERDES_FLAG)
5137 return -EINVAL;
5138
5139 advertising = cmd->advertising;
5140
Michael Chan27a005b2007-05-03 13:23:41 -07005141 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5142 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5143 return -EINVAL;
5144 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005145 advertising = cmd->advertising;
5146 }
5147 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5148 return -EINVAL;
5149 }
5150 else {
5151 if (bp->phy_flags & PHY_SERDES_FLAG) {
5152 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5153 }
5154 else {
5155 advertising = ETHTOOL_ALL_COPPER_SPEED;
5156 }
5157 }
5158 advertising |= ADVERTISED_Autoneg;
5159 }
5160 else {
5161 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005162 if ((cmd->speed != SPEED_1000 &&
5163 cmd->speed != SPEED_2500) ||
5164 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005165 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005166
5167 if (cmd->speed == SPEED_2500 &&
5168 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5169 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005170 }
5171 else if (cmd->speed == SPEED_1000) {
5172 return -EINVAL;
5173 }
5174 autoneg &= ~AUTONEG_SPEED;
5175 req_line_speed = cmd->speed;
5176 req_duplex = cmd->duplex;
5177 advertising = 0;
5178 }
5179
5180 bp->autoneg = autoneg;
5181 bp->advertising = advertising;
5182 bp->req_line_speed = req_line_speed;
5183 bp->req_duplex = req_duplex;
5184
Michael Chanc770a652005-08-25 15:38:39 -07005185 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005186
5187 bnx2_setup_phy(bp);
5188
Michael Chanc770a652005-08-25 15:38:39 -07005189 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005190
5191 return 0;
5192}
5193
5194static void
5195bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5196{
Michael Chan972ec0d2006-01-23 16:12:43 -08005197 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005198
5199 strcpy(info->driver, DRV_MODULE_NAME);
5200 strcpy(info->version, DRV_MODULE_VERSION);
5201 strcpy(info->bus_info, pci_name(bp->pdev));
5202 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5203 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5204 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005205 info->fw_version[1] = info->fw_version[3] = '.';
5206 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005207}
5208
Michael Chan244ac4f2006-03-20 17:48:46 -08005209#define BNX2_REGDUMP_LEN (32 * 1024)
5210
5211static int
5212bnx2_get_regs_len(struct net_device *dev)
5213{
5214 return BNX2_REGDUMP_LEN;
5215}
5216
5217static void
5218bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5219{
5220 u32 *p = _p, i, offset;
5221 u8 *orig_p = _p;
5222 struct bnx2 *bp = netdev_priv(dev);
5223 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5224 0x0800, 0x0880, 0x0c00, 0x0c10,
5225 0x0c30, 0x0d08, 0x1000, 0x101c,
5226 0x1040, 0x1048, 0x1080, 0x10a4,
5227 0x1400, 0x1490, 0x1498, 0x14f0,
5228 0x1500, 0x155c, 0x1580, 0x15dc,
5229 0x1600, 0x1658, 0x1680, 0x16d8,
5230 0x1800, 0x1820, 0x1840, 0x1854,
5231 0x1880, 0x1894, 0x1900, 0x1984,
5232 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5233 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5234 0x2000, 0x2030, 0x23c0, 0x2400,
5235 0x2800, 0x2820, 0x2830, 0x2850,
5236 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5237 0x3c00, 0x3c94, 0x4000, 0x4010,
5238 0x4080, 0x4090, 0x43c0, 0x4458,
5239 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5240 0x4fc0, 0x5010, 0x53c0, 0x5444,
5241 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5242 0x5fc0, 0x6000, 0x6400, 0x6428,
5243 0x6800, 0x6848, 0x684c, 0x6860,
5244 0x6888, 0x6910, 0x8000 };
5245
5246 regs->version = 0;
5247
5248 memset(p, 0, BNX2_REGDUMP_LEN);
5249
5250 if (!netif_running(bp->dev))
5251 return;
5252
5253 i = 0;
5254 offset = reg_boundaries[0];
5255 p += offset;
5256 while (offset < BNX2_REGDUMP_LEN) {
5257 *p++ = REG_RD(bp, offset);
5258 offset += 4;
5259 if (offset == reg_boundaries[i + 1]) {
5260 offset = reg_boundaries[i + 2];
5261 p = (u32 *) (orig_p + offset);
5262 i += 2;
5263 }
5264 }
5265}
5266
Michael Chanb6016b72005-05-26 13:03:09 -07005267static void
5268bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5269{
Michael Chan972ec0d2006-01-23 16:12:43 -08005270 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005271
5272 if (bp->flags & NO_WOL_FLAG) {
5273 wol->supported = 0;
5274 wol->wolopts = 0;
5275 }
5276 else {
5277 wol->supported = WAKE_MAGIC;
5278 if (bp->wol)
5279 wol->wolopts = WAKE_MAGIC;
5280 else
5281 wol->wolopts = 0;
5282 }
5283 memset(&wol->sopass, 0, sizeof(wol->sopass));
5284}
5285
5286static int
5287bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5288{
Michael Chan972ec0d2006-01-23 16:12:43 -08005289 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005290
5291 if (wol->wolopts & ~WAKE_MAGIC)
5292 return -EINVAL;
5293
5294 if (wol->wolopts & WAKE_MAGIC) {
5295 if (bp->flags & NO_WOL_FLAG)
5296 return -EINVAL;
5297
5298 bp->wol = 1;
5299 }
5300 else {
5301 bp->wol = 0;
5302 }
5303 return 0;
5304}
5305
5306static int
5307bnx2_nway_reset(struct net_device *dev)
5308{
Michael Chan972ec0d2006-01-23 16:12:43 -08005309 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005310 u32 bmcr;
5311
5312 if (!(bp->autoneg & AUTONEG_SPEED)) {
5313 return -EINVAL;
5314 }
5315
Michael Chanc770a652005-08-25 15:38:39 -07005316 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005317
5318 /* Force a link down visible on the other side */
5319 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005320 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005321 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005322
5323 msleep(20);
5324
Michael Chanc770a652005-08-25 15:38:39 -07005325 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005326
5327 bp->current_interval = SERDES_AN_TIMEOUT;
5328 bp->serdes_an_pending = 1;
5329 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005330 }
5331
Michael Chanca58c3a2007-05-03 13:22:52 -07005332 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005333 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005334 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005335
Michael Chanc770a652005-08-25 15:38:39 -07005336 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005337
5338 return 0;
5339}
5340
5341static int
5342bnx2_get_eeprom_len(struct net_device *dev)
5343{
Michael Chan972ec0d2006-01-23 16:12:43 -08005344 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005345
Michael Chan1122db72006-01-23 16:11:42 -08005346 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005347 return 0;
5348
Michael Chan1122db72006-01-23 16:11:42 -08005349 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005350}
5351
5352static int
5353bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5354 u8 *eebuf)
5355{
Michael Chan972ec0d2006-01-23 16:12:43 -08005356 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005357 int rc;
5358
John W. Linville1064e942005-11-10 12:58:24 -08005359 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005360
5361 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5362
5363 return rc;
5364}
5365
5366static int
5367bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5368 u8 *eebuf)
5369{
Michael Chan972ec0d2006-01-23 16:12:43 -08005370 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005371 int rc;
5372
John W. Linville1064e942005-11-10 12:58:24 -08005373 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005374
5375 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5376
5377 return rc;
5378}
5379
5380static int
5381bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5382{
Michael Chan972ec0d2006-01-23 16:12:43 -08005383 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005384
5385 memset(coal, 0, sizeof(struct ethtool_coalesce));
5386
5387 coal->rx_coalesce_usecs = bp->rx_ticks;
5388 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5389 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5390 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5391
5392 coal->tx_coalesce_usecs = bp->tx_ticks;
5393 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5394 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5395 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5396
5397 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5398
5399 return 0;
5400}
5401
5402static int
5403bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5404{
Michael Chan972ec0d2006-01-23 16:12:43 -08005405 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005406
5407 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5408 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5409
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005410 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005411 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5412
5413 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5414 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5415
5416 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5417 if (bp->rx_quick_cons_trip_int > 0xff)
5418 bp->rx_quick_cons_trip_int = 0xff;
5419
5420 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5421 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5422
5423 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5424 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5425
5426 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5427 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5428
5429 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5430 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5431 0xff;
5432
5433 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5434 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5435 bp->stats_ticks &= 0xffff00;
5436
5437 if (netif_running(bp->dev)) {
5438 bnx2_netif_stop(bp);
5439 bnx2_init_nic(bp);
5440 bnx2_netif_start(bp);
5441 }
5442
5443 return 0;
5444}
5445
5446static void
5447bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5448{
Michael Chan972ec0d2006-01-23 16:12:43 -08005449 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005450
Michael Chan13daffa2006-03-20 17:49:20 -08005451 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005452 ering->rx_mini_max_pending = 0;
5453 ering->rx_jumbo_max_pending = 0;
5454
5455 ering->rx_pending = bp->rx_ring_size;
5456 ering->rx_mini_pending = 0;
5457 ering->rx_jumbo_pending = 0;
5458
5459 ering->tx_max_pending = MAX_TX_DESC_CNT;
5460 ering->tx_pending = bp->tx_ring_size;
5461}
5462
5463static int
5464bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5465{
Michael Chan972ec0d2006-01-23 16:12:43 -08005466 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005467
Michael Chan13daffa2006-03-20 17:49:20 -08005468 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005469 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5470 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5471
5472 return -EINVAL;
5473 }
Michael Chan13daffa2006-03-20 17:49:20 -08005474 if (netif_running(bp->dev)) {
5475 bnx2_netif_stop(bp);
5476 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5477 bnx2_free_skbs(bp);
5478 bnx2_free_mem(bp);
5479 }
5480
5481 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005482 bp->tx_ring_size = ering->tx_pending;
5483
5484 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005485 int rc;
5486
5487 rc = bnx2_alloc_mem(bp);
5488 if (rc)
5489 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005490 bnx2_init_nic(bp);
5491 bnx2_netif_start(bp);
5492 }
5493
5494 return 0;
5495}
5496
5497static void
5498bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5499{
Michael Chan972ec0d2006-01-23 16:12:43 -08005500 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005501
5502 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5503 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5504 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5505}
5506
5507static int
5508bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5509{
Michael Chan972ec0d2006-01-23 16:12:43 -08005510 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005511
5512 bp->req_flow_ctrl = 0;
5513 if (epause->rx_pause)
5514 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5515 if (epause->tx_pause)
5516 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5517
5518 if (epause->autoneg) {
5519 bp->autoneg |= AUTONEG_FLOW_CTRL;
5520 }
5521 else {
5522 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5523 }
5524
Michael Chanc770a652005-08-25 15:38:39 -07005525 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005526
5527 bnx2_setup_phy(bp);
5528
Michael Chanc770a652005-08-25 15:38:39 -07005529 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005530
5531 return 0;
5532}
5533
5534static u32
5535bnx2_get_rx_csum(struct net_device *dev)
5536{
Michael Chan972ec0d2006-01-23 16:12:43 -08005537 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005538
5539 return bp->rx_csum;
5540}
5541
5542static int
5543bnx2_set_rx_csum(struct net_device *dev, u32 data)
5544{
Michael Chan972ec0d2006-01-23 16:12:43 -08005545 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005546
5547 bp->rx_csum = data;
5548 return 0;
5549}
5550
Michael Chanb11d6212006-06-29 12:31:21 -07005551static int
5552bnx2_set_tso(struct net_device *dev, u32 data)
5553{
Michael Chan4666f872007-05-03 13:22:28 -07005554 struct bnx2 *bp = netdev_priv(dev);
5555
5556 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005557 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005558 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5559 dev->features |= NETIF_F_TSO6;
5560 } else
5561 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5562 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005563 return 0;
5564}
5565
Michael Chancea94db2006-06-12 22:16:13 -07005566#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005567
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005568static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005569 char string[ETH_GSTRING_LEN];
5570} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5571 { "rx_bytes" },
5572 { "rx_error_bytes" },
5573 { "tx_bytes" },
5574 { "tx_error_bytes" },
5575 { "rx_ucast_packets" },
5576 { "rx_mcast_packets" },
5577 { "rx_bcast_packets" },
5578 { "tx_ucast_packets" },
5579 { "tx_mcast_packets" },
5580 { "tx_bcast_packets" },
5581 { "tx_mac_errors" },
5582 { "tx_carrier_errors" },
5583 { "rx_crc_errors" },
5584 { "rx_align_errors" },
5585 { "tx_single_collisions" },
5586 { "tx_multi_collisions" },
5587 { "tx_deferred" },
5588 { "tx_excess_collisions" },
5589 { "tx_late_collisions" },
5590 { "tx_total_collisions" },
5591 { "rx_fragments" },
5592 { "rx_jabbers" },
5593 { "rx_undersize_packets" },
5594 { "rx_oversize_packets" },
5595 { "rx_64_byte_packets" },
5596 { "rx_65_to_127_byte_packets" },
5597 { "rx_128_to_255_byte_packets" },
5598 { "rx_256_to_511_byte_packets" },
5599 { "rx_512_to_1023_byte_packets" },
5600 { "rx_1024_to_1522_byte_packets" },
5601 { "rx_1523_to_9022_byte_packets" },
5602 { "tx_64_byte_packets" },
5603 { "tx_65_to_127_byte_packets" },
5604 { "tx_128_to_255_byte_packets" },
5605 { "tx_256_to_511_byte_packets" },
5606 { "tx_512_to_1023_byte_packets" },
5607 { "tx_1024_to_1522_byte_packets" },
5608 { "tx_1523_to_9022_byte_packets" },
5609 { "rx_xon_frames" },
5610 { "rx_xoff_frames" },
5611 { "tx_xon_frames" },
5612 { "tx_xoff_frames" },
5613 { "rx_mac_ctrl_frames" },
5614 { "rx_filtered_packets" },
5615 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005616 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005617};
5618
5619#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5620
Arjan van de Venf71e1302006-03-03 21:33:57 -05005621static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005622 STATS_OFFSET32(stat_IfHCInOctets_hi),
5623 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5624 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5625 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5626 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5627 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5628 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5629 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5630 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5631 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5632 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005633 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5634 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5635 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5636 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5637 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5638 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5639 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5640 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5641 STATS_OFFSET32(stat_EtherStatsCollisions),
5642 STATS_OFFSET32(stat_EtherStatsFragments),
5643 STATS_OFFSET32(stat_EtherStatsJabbers),
5644 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5645 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5646 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5647 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5648 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5649 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5650 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5651 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5652 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5653 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5654 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5655 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5656 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5657 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5658 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5659 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5660 STATS_OFFSET32(stat_XonPauseFramesReceived),
5661 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5662 STATS_OFFSET32(stat_OutXonSent),
5663 STATS_OFFSET32(stat_OutXoffSent),
5664 STATS_OFFSET32(stat_MacControlFramesReceived),
5665 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5666 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005667 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005668};
5669
5670/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5671 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005672 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005673static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005674 8,0,8,8,8,8,8,8,8,8,
5675 4,0,4,4,4,4,4,4,4,4,
5676 4,4,4,4,4,4,4,4,4,4,
5677 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005678 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005679};
5680
Michael Chan5b0c76a2005-11-04 08:45:49 -08005681static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5682 8,0,8,8,8,8,8,8,8,8,
5683 4,4,4,4,4,4,4,4,4,4,
5684 4,4,4,4,4,4,4,4,4,4,
5685 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005686 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005687};
5688
Michael Chanb6016b72005-05-26 13:03:09 -07005689#define BNX2_NUM_TESTS 6
5690
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005691static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005692 char string[ETH_GSTRING_LEN];
5693} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5694 { "register_test (offline)" },
5695 { "memory_test (offline)" },
5696 { "loopback_test (offline)" },
5697 { "nvram_test (online)" },
5698 { "interrupt_test (online)" },
5699 { "link_test (online)" },
5700};
5701
5702static int
5703bnx2_self_test_count(struct net_device *dev)
5704{
5705 return BNX2_NUM_TESTS;
5706}
5707
5708static void
5709bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5710{
Michael Chan972ec0d2006-01-23 16:12:43 -08005711 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005712
5713 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5714 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005715 int i;
5716
Michael Chanb6016b72005-05-26 13:03:09 -07005717 bnx2_netif_stop(bp);
5718 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5719 bnx2_free_skbs(bp);
5720
5721 if (bnx2_test_registers(bp) != 0) {
5722 buf[0] = 1;
5723 etest->flags |= ETH_TEST_FL_FAILED;
5724 }
5725 if (bnx2_test_memory(bp) != 0) {
5726 buf[1] = 1;
5727 etest->flags |= ETH_TEST_FL_FAILED;
5728 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005729 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005730 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005731
5732 if (!netif_running(bp->dev)) {
5733 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5734 }
5735 else {
5736 bnx2_init_nic(bp);
5737 bnx2_netif_start(bp);
5738 }
5739
5740 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005741 for (i = 0; i < 7; i++) {
5742 if (bp->link_up)
5743 break;
5744 msleep_interruptible(1000);
5745 }
Michael Chanb6016b72005-05-26 13:03:09 -07005746 }
5747
5748 if (bnx2_test_nvram(bp) != 0) {
5749 buf[3] = 1;
5750 etest->flags |= ETH_TEST_FL_FAILED;
5751 }
5752 if (bnx2_test_intr(bp) != 0) {
5753 buf[4] = 1;
5754 etest->flags |= ETH_TEST_FL_FAILED;
5755 }
5756
5757 if (bnx2_test_link(bp) != 0) {
5758 buf[5] = 1;
5759 etest->flags |= ETH_TEST_FL_FAILED;
5760
5761 }
5762}
5763
5764static void
5765bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5766{
5767 switch (stringset) {
5768 case ETH_SS_STATS:
5769 memcpy(buf, bnx2_stats_str_arr,
5770 sizeof(bnx2_stats_str_arr));
5771 break;
5772 case ETH_SS_TEST:
5773 memcpy(buf, bnx2_tests_str_arr,
5774 sizeof(bnx2_tests_str_arr));
5775 break;
5776 }
5777}
5778
5779static int
5780bnx2_get_stats_count(struct net_device *dev)
5781{
5782 return BNX2_NUM_STATS;
5783}
5784
5785static void
5786bnx2_get_ethtool_stats(struct net_device *dev,
5787 struct ethtool_stats *stats, u64 *buf)
5788{
Michael Chan972ec0d2006-01-23 16:12:43 -08005789 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005790 int i;
5791 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005792 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005793
5794 if (hw_stats == NULL) {
5795 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5796 return;
5797 }
5798
Michael Chan5b0c76a2005-11-04 08:45:49 -08005799 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5800 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5801 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5802 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005803 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005804 else
5805 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005806
5807 for (i = 0; i < BNX2_NUM_STATS; i++) {
5808 if (stats_len_arr[i] == 0) {
5809 /* skip this counter */
5810 buf[i] = 0;
5811 continue;
5812 }
5813 if (stats_len_arr[i] == 4) {
5814 /* 4-byte counter */
5815 buf[i] = (u64)
5816 *(hw_stats + bnx2_stats_offset_arr[i]);
5817 continue;
5818 }
5819 /* 8-byte counter */
5820 buf[i] = (((u64) *(hw_stats +
5821 bnx2_stats_offset_arr[i])) << 32) +
5822 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5823 }
5824}
5825
5826static int
5827bnx2_phys_id(struct net_device *dev, u32 data)
5828{
Michael Chan972ec0d2006-01-23 16:12:43 -08005829 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005830 int i;
5831 u32 save;
5832
5833 if (data == 0)
5834 data = 2;
5835
5836 save = REG_RD(bp, BNX2_MISC_CFG);
5837 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5838
5839 for (i = 0; i < (data * 2); i++) {
5840 if ((i % 2) == 0) {
5841 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5842 }
5843 else {
5844 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5845 BNX2_EMAC_LED_1000MB_OVERRIDE |
5846 BNX2_EMAC_LED_100MB_OVERRIDE |
5847 BNX2_EMAC_LED_10MB_OVERRIDE |
5848 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5849 BNX2_EMAC_LED_TRAFFIC);
5850 }
5851 msleep_interruptible(500);
5852 if (signal_pending(current))
5853 break;
5854 }
5855 REG_WR(bp, BNX2_EMAC_LED, 0);
5856 REG_WR(bp, BNX2_MISC_CFG, save);
5857 return 0;
5858}
5859
Michael Chan4666f872007-05-03 13:22:28 -07005860static int
5861bnx2_set_tx_csum(struct net_device *dev, u32 data)
5862{
5863 struct bnx2 *bp = netdev_priv(dev);
5864
5865 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5866 return (ethtool_op_set_tx_hw_csum(dev, data));
5867 else
5868 return (ethtool_op_set_tx_csum(dev, data));
5869}
5870
Jeff Garzik7282d492006-09-13 14:30:00 -04005871static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005872 .get_settings = bnx2_get_settings,
5873 .set_settings = bnx2_set_settings,
5874 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005875 .get_regs_len = bnx2_get_regs_len,
5876 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005877 .get_wol = bnx2_get_wol,
5878 .set_wol = bnx2_set_wol,
5879 .nway_reset = bnx2_nway_reset,
5880 .get_link = ethtool_op_get_link,
5881 .get_eeprom_len = bnx2_get_eeprom_len,
5882 .get_eeprom = bnx2_get_eeprom,
5883 .set_eeprom = bnx2_set_eeprom,
5884 .get_coalesce = bnx2_get_coalesce,
5885 .set_coalesce = bnx2_set_coalesce,
5886 .get_ringparam = bnx2_get_ringparam,
5887 .set_ringparam = bnx2_set_ringparam,
5888 .get_pauseparam = bnx2_get_pauseparam,
5889 .set_pauseparam = bnx2_set_pauseparam,
5890 .get_rx_csum = bnx2_get_rx_csum,
5891 .set_rx_csum = bnx2_set_rx_csum,
5892 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005893 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005894 .get_sg = ethtool_op_get_sg,
5895 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005896 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005897 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005898 .self_test_count = bnx2_self_test_count,
5899 .self_test = bnx2_self_test,
5900 .get_strings = bnx2_get_strings,
5901 .phys_id = bnx2_phys_id,
5902 .get_stats_count = bnx2_get_stats_count,
5903 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005904 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005905};
5906
5907/* Called with rtnl_lock */
5908static int
5909bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5910{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005911 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005912 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005913 int err;
5914
5915 switch(cmd) {
5916 case SIOCGMIIPHY:
5917 data->phy_id = bp->phy_addr;
5918
5919 /* fallthru */
5920 case SIOCGMIIREG: {
5921 u32 mii_regval;
5922
Michael Chandad3e452007-05-03 13:18:03 -07005923 if (!netif_running(dev))
5924 return -EAGAIN;
5925
Michael Chanc770a652005-08-25 15:38:39 -07005926 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005927 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005928 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005929
5930 data->val_out = mii_regval;
5931
5932 return err;
5933 }
5934
5935 case SIOCSMIIREG:
5936 if (!capable(CAP_NET_ADMIN))
5937 return -EPERM;
5938
Michael Chandad3e452007-05-03 13:18:03 -07005939 if (!netif_running(dev))
5940 return -EAGAIN;
5941
Michael Chanc770a652005-08-25 15:38:39 -07005942 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005943 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005944 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005945
5946 return err;
5947
5948 default:
5949 /* do nothing */
5950 break;
5951 }
5952 return -EOPNOTSUPP;
5953}
5954
5955/* Called with rtnl_lock */
5956static int
5957bnx2_change_mac_addr(struct net_device *dev, void *p)
5958{
5959 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005960 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005961
Michael Chan73eef4c2005-08-25 15:39:15 -07005962 if (!is_valid_ether_addr(addr->sa_data))
5963 return -EINVAL;
5964
Michael Chanb6016b72005-05-26 13:03:09 -07005965 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5966 if (netif_running(dev))
5967 bnx2_set_mac_addr(bp);
5968
5969 return 0;
5970}
5971
5972/* Called with rtnl_lock */
5973static int
5974bnx2_change_mtu(struct net_device *dev, int new_mtu)
5975{
Michael Chan972ec0d2006-01-23 16:12:43 -08005976 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005977
5978 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5979 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5980 return -EINVAL;
5981
5982 dev->mtu = new_mtu;
5983 if (netif_running(dev)) {
5984 bnx2_netif_stop(bp);
5985
5986 bnx2_init_nic(bp);
5987
5988 bnx2_netif_start(bp);
5989 }
5990 return 0;
5991}
5992
5993#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5994static void
5995poll_bnx2(struct net_device *dev)
5996{
Michael Chan972ec0d2006-01-23 16:12:43 -08005997 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005998
5999 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006000 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006001 enable_irq(bp->pdev->irq);
6002}
6003#endif
6004
Michael Chan253c8b72007-01-08 19:56:01 -08006005static void __devinit
6006bnx2_get_5709_media(struct bnx2 *bp)
6007{
6008 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6009 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6010 u32 strap;
6011
6012 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6013 return;
6014 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6015 bp->phy_flags |= PHY_SERDES_FLAG;
6016 return;
6017 }
6018
6019 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6020 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6021 else
6022 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6023
6024 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6025 switch (strap) {
6026 case 0x4:
6027 case 0x5:
6028 case 0x6:
6029 bp->phy_flags |= PHY_SERDES_FLAG;
6030 return;
6031 }
6032 } else {
6033 switch (strap) {
6034 case 0x1:
6035 case 0x2:
6036 case 0x4:
6037 bp->phy_flags |= PHY_SERDES_FLAG;
6038 return;
6039 }
6040 }
6041}
6042
Michael Chanb6016b72005-05-26 13:03:09 -07006043static int __devinit
6044bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6045{
6046 struct bnx2 *bp;
6047 unsigned long mem_len;
6048 int rc;
6049 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006050 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006051
6052 SET_MODULE_OWNER(dev);
6053 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006054 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006055
6056 bp->flags = 0;
6057 bp->phy_flags = 0;
6058
6059 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6060 rc = pci_enable_device(pdev);
6061 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006062 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006063 goto err_out;
6064 }
6065
6066 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006067 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006068 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006069 rc = -ENODEV;
6070 goto err_out_disable;
6071 }
6072
6073 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6074 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006075 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006076 goto err_out_disable;
6077 }
6078
6079 pci_set_master(pdev);
6080
6081 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6082 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006083 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006084 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006085 rc = -EIO;
6086 goto err_out_release;
6087 }
6088
Michael Chanb6016b72005-05-26 13:03:09 -07006089 bp->dev = dev;
6090 bp->pdev = pdev;
6091
6092 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006093 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006094 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006095
6096 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006097 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006098 dev->mem_end = dev->mem_start + mem_len;
6099 dev->irq = pdev->irq;
6100
6101 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6102
6103 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006104 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006105 rc = -ENOMEM;
6106 goto err_out_release;
6107 }
6108
6109 /* Configure byte swap and enable write to the reg_window registers.
6110 * Rely on CPU to do target byte swapping on big endian systems
6111 * The chip's target access swapping will not swap all accesses
6112 */
6113 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6114 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6115 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6116
Pavel Machek829ca9a2005-09-03 15:56:56 -07006117 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006118
6119 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6120
Michael Chan59b47d82006-11-19 14:10:45 -08006121 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6122 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6123 if (bp->pcix_cap == 0) {
6124 dev_err(&pdev->dev,
6125 "Cannot find PCIX capability, aborting.\n");
6126 rc = -EIO;
6127 goto err_out_unmap;
6128 }
6129 }
6130
Michael Chan8e6a72c2007-05-03 13:24:48 -07006131 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6132 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6133 bp->flags |= MSI_CAP_FLAG;
6134 }
6135
Michael Chan40453c82007-05-03 13:19:18 -07006136 /* 5708 cannot support DMA addresses > 40-bit. */
6137 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6138 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6139 else
6140 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6141
6142 /* Configure DMA attributes. */
6143 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6144 dev->features |= NETIF_F_HIGHDMA;
6145 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6146 if (rc) {
6147 dev_err(&pdev->dev,
6148 "pci_set_consistent_dma_mask failed, aborting.\n");
6149 goto err_out_unmap;
6150 }
6151 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6152 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6153 goto err_out_unmap;
6154 }
6155
Michael Chanb6016b72005-05-26 13:03:09 -07006156 /* Get bus information. */
6157 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6158 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6159 u32 clkreg;
6160
6161 bp->flags |= PCIX_FLAG;
6162
6163 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006164
Michael Chanb6016b72005-05-26 13:03:09 -07006165 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6166 switch (clkreg) {
6167 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6168 bp->bus_speed_mhz = 133;
6169 break;
6170
6171 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6172 bp->bus_speed_mhz = 100;
6173 break;
6174
6175 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6176 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6177 bp->bus_speed_mhz = 66;
6178 break;
6179
6180 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6181 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6182 bp->bus_speed_mhz = 50;
6183 break;
6184
6185 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6186 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6187 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6188 bp->bus_speed_mhz = 33;
6189 break;
6190 }
6191 }
6192 else {
6193 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6194 bp->bus_speed_mhz = 66;
6195 else
6196 bp->bus_speed_mhz = 33;
6197 }
6198
6199 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6200 bp->flags |= PCI_32BIT_FLAG;
6201
6202 /* 5706A0 may falsely detect SERR and PERR. */
6203 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6204 reg = REG_RD(bp, PCI_COMMAND);
6205 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6206 REG_WR(bp, PCI_COMMAND, reg);
6207 }
6208 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6209 !(bp->flags & PCIX_FLAG)) {
6210
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006211 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006212 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006213 goto err_out_unmap;
6214 }
6215
6216 bnx2_init_nvram(bp);
6217
Michael Chane3648b32005-11-04 08:51:21 -08006218 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6219
6220 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006221 BNX2_SHM_HDR_SIGNATURE_SIG) {
6222 u32 off = PCI_FUNC(pdev->devfn) << 2;
6223
6224 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6225 } else
Michael Chane3648b32005-11-04 08:51:21 -08006226 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6227
Michael Chanb6016b72005-05-26 13:03:09 -07006228 /* Get the permanent MAC address. First we need to make sure the
6229 * firmware is actually running.
6230 */
Michael Chane3648b32005-11-04 08:51:21 -08006231 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006232
6233 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6234 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006235 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006236 rc = -ENODEV;
6237 goto err_out_unmap;
6238 }
6239
Michael Chane3648b32005-11-04 08:51:21 -08006240 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006241
Michael Chane3648b32005-11-04 08:51:21 -08006242 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006243 bp->mac_addr[0] = (u8) (reg >> 8);
6244 bp->mac_addr[1] = (u8) reg;
6245
Michael Chane3648b32005-11-04 08:51:21 -08006246 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006247 bp->mac_addr[2] = (u8) (reg >> 24);
6248 bp->mac_addr[3] = (u8) (reg >> 16);
6249 bp->mac_addr[4] = (u8) (reg >> 8);
6250 bp->mac_addr[5] = (u8) reg;
6251
6252 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006253 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006254
6255 bp->rx_csum = 1;
6256
6257 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6258
6259 bp->tx_quick_cons_trip_int = 20;
6260 bp->tx_quick_cons_trip = 20;
6261 bp->tx_ticks_int = 80;
6262 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006263
Michael Chanb6016b72005-05-26 13:03:09 -07006264 bp->rx_quick_cons_trip_int = 6;
6265 bp->rx_quick_cons_trip = 6;
6266 bp->rx_ticks_int = 18;
6267 bp->rx_ticks = 18;
6268
6269 bp->stats_ticks = 1000000 & 0xffff00;
6270
6271 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006272 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006273
Michael Chan5b0c76a2005-11-04 08:45:49 -08006274 bp->phy_addr = 1;
6275
Michael Chanb6016b72005-05-26 13:03:09 -07006276 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006277 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6278 bnx2_get_5709_media(bp);
6279 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006280 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006281
6282 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07006283 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006284 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006285 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006286 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006287 BNX2_SHARED_HW_CFG_CONFIG);
6288 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6289 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6290 }
Michael Chan261dd5c2007-01-08 19:55:46 -08006291 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6292 CHIP_NUM(bp) == CHIP_NUM_5708)
6293 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006294 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6295 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006296
Michael Chan16088272006-06-12 22:16:43 -07006297 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6298 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6299 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006300 bp->flags |= NO_WOL_FLAG;
6301
Michael Chanb6016b72005-05-26 13:03:09 -07006302 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6303 bp->tx_quick_cons_trip_int =
6304 bp->tx_quick_cons_trip;
6305 bp->tx_ticks_int = bp->tx_ticks;
6306 bp->rx_quick_cons_trip_int =
6307 bp->rx_quick_cons_trip;
6308 bp->rx_ticks_int = bp->rx_ticks;
6309 bp->comp_prod_trip_int = bp->comp_prod_trip;
6310 bp->com_ticks_int = bp->com_ticks;
6311 bp->cmd_ticks_int = bp->cmd_ticks;
6312 }
6313
Michael Chanf9317a42006-09-29 17:06:23 -07006314 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6315 *
6316 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6317 * with byte enables disabled on the unused 32-bit word. This is legal
6318 * but causes problems on the AMD 8132 which will eventually stop
6319 * responding after a while.
6320 *
6321 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006322 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006323 */
6324 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6325 struct pci_dev *amd_8132 = NULL;
6326
6327 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6328 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6329 amd_8132))) {
6330 u8 rev;
6331
6332 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6333 if (rev >= 0x10 && rev <= 0x13) {
6334 disable_msi = 1;
6335 pci_dev_put(amd_8132);
6336 break;
6337 }
6338 }
6339 }
6340
Michael Chanb6016b72005-05-26 13:03:09 -07006341 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6342 bp->req_line_speed = 0;
6343 if (bp->phy_flags & PHY_SERDES_FLAG) {
6344 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006345
Michael Chane3648b32005-11-04 08:51:21 -08006346 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006347 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6348 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6349 bp->autoneg = 0;
6350 bp->req_line_speed = bp->line_speed = SPEED_1000;
6351 bp->req_duplex = DUPLEX_FULL;
6352 }
Michael Chanb6016b72005-05-26 13:03:09 -07006353 }
6354 else {
6355 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6356 }
6357
6358 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6359
Michael Chancd339a02005-08-25 15:35:24 -07006360 init_timer(&bp->timer);
6361 bp->timer.expires = RUN_AT(bp->timer_interval);
6362 bp->timer.data = (unsigned long) bp;
6363 bp->timer.function = bnx2_timer;
6364
Michael Chanb6016b72005-05-26 13:03:09 -07006365 return 0;
6366
6367err_out_unmap:
6368 if (bp->regview) {
6369 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006370 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006371 }
6372
6373err_out_release:
6374 pci_release_regions(pdev);
6375
6376err_out_disable:
6377 pci_disable_device(pdev);
6378 pci_set_drvdata(pdev, NULL);
6379
6380err_out:
6381 return rc;
6382}
6383
6384static int __devinit
6385bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6386{
6387 static int version_printed = 0;
6388 struct net_device *dev = NULL;
6389 struct bnx2 *bp;
6390 int rc, i;
6391
6392 if (version_printed++ == 0)
6393 printk(KERN_INFO "%s", version);
6394
6395 /* dev zeroed in init_etherdev */
6396 dev = alloc_etherdev(sizeof(*bp));
6397
6398 if (!dev)
6399 return -ENOMEM;
6400
6401 rc = bnx2_init_board(pdev, dev);
6402 if (rc < 0) {
6403 free_netdev(dev);
6404 return rc;
6405 }
6406
6407 dev->open = bnx2_open;
6408 dev->hard_start_xmit = bnx2_start_xmit;
6409 dev->stop = bnx2_close;
6410 dev->get_stats = bnx2_get_stats;
6411 dev->set_multicast_list = bnx2_set_rx_mode;
6412 dev->do_ioctl = bnx2_ioctl;
6413 dev->set_mac_address = bnx2_change_mac_addr;
6414 dev->change_mtu = bnx2_change_mtu;
6415 dev->tx_timeout = bnx2_tx_timeout;
6416 dev->watchdog_timeo = TX_TIMEOUT;
6417#ifdef BCM_VLAN
6418 dev->vlan_rx_register = bnx2_vlan_rx_register;
6419 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6420#endif
6421 dev->poll = bnx2_poll;
6422 dev->ethtool_ops = &bnx2_ethtool_ops;
6423 dev->weight = 64;
6424
Michael Chan972ec0d2006-01-23 16:12:43 -08006425 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006426
6427#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6428 dev->poll_controller = poll_bnx2;
6429#endif
6430
Michael Chan1b2f9222007-05-03 13:20:19 -07006431 pci_set_drvdata(pdev, dev);
6432
6433 memcpy(dev->dev_addr, bp->mac_addr, 6);
6434 memcpy(dev->perm_addr, bp->mac_addr, 6);
6435 bp->name = board_info[ent->driver_data].name;
6436
Michael Chan4666f872007-05-03 13:22:28 -07006437 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6438 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6439 else
6440 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan1b2f9222007-05-03 13:20:19 -07006441#ifdef BCM_VLAN
6442 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6443#endif
6444 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006445 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6446 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006447
Michael Chanb6016b72005-05-26 13:03:09 -07006448 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006449 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006450 if (bp->regview)
6451 iounmap(bp->regview);
6452 pci_release_regions(pdev);
6453 pci_disable_device(pdev);
6454 pci_set_drvdata(pdev, NULL);
6455 free_netdev(dev);
6456 return rc;
6457 }
6458
Michael Chanb6016b72005-05-26 13:03:09 -07006459 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6460 "IRQ %d, ",
6461 dev->name,
6462 bp->name,
6463 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6464 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6465 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6466 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6467 bp->bus_speed_mhz,
6468 dev->base_addr,
6469 bp->pdev->irq);
6470
6471 printk("node addr ");
6472 for (i = 0; i < 6; i++)
6473 printk("%2.2x", dev->dev_addr[i]);
6474 printk("\n");
6475
Michael Chanb6016b72005-05-26 13:03:09 -07006476 return 0;
6477}
6478
6479static void __devexit
6480bnx2_remove_one(struct pci_dev *pdev)
6481{
6482 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006483 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006484
Michael Chanafdc08b2005-08-25 15:34:29 -07006485 flush_scheduled_work();
6486
Michael Chanb6016b72005-05-26 13:03:09 -07006487 unregister_netdev(dev);
6488
6489 if (bp->regview)
6490 iounmap(bp->regview);
6491
6492 free_netdev(dev);
6493 pci_release_regions(pdev);
6494 pci_disable_device(pdev);
6495 pci_set_drvdata(pdev, NULL);
6496}
6497
6498static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006499bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006500{
6501 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006502 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006503 u32 reset_code;
6504
6505 if (!netif_running(dev))
6506 return 0;
6507
Michael Chan1d60290f2006-03-20 17:50:08 -08006508 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006509 bnx2_netif_stop(bp);
6510 netif_device_detach(dev);
6511 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006512 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006513 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006514 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006515 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6516 else
6517 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6518 bnx2_reset_chip(bp, reset_code);
6519 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006520 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006521 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006522 return 0;
6523}
6524
6525static int
6526bnx2_resume(struct pci_dev *pdev)
6527{
6528 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006529 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006530
6531 if (!netif_running(dev))
6532 return 0;
6533
Michael Chan30c517b2007-05-03 13:20:40 -07006534 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006535 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006536 netif_device_attach(dev);
6537 bnx2_init_nic(bp);
6538 bnx2_netif_start(bp);
6539 return 0;
6540}
6541
6542static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006543 .name = DRV_MODULE_NAME,
6544 .id_table = bnx2_pci_tbl,
6545 .probe = bnx2_init_one,
6546 .remove = __devexit_p(bnx2_remove_one),
6547 .suspend = bnx2_suspend,
6548 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006549};
6550
6551static int __init bnx2_init(void)
6552{
Jeff Garzik29917622006-08-19 17:48:59 -04006553 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006554}
6555
6556static void __exit bnx2_cleanup(void)
6557{
6558 pci_unregister_driver(&bnx2_pci_driver);
6559}
6560
6561module_init(bnx2_init);
6562module_exit(bnx2_cleanup);
6563
6564
6565