blob: f072028e3e3b9e8ed66166a948fb4c2c6ddde17f [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan68c9f752007-04-24 15:35:53 -070057#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
553 if (bp->link_up) {
554 u32 bmsr;
555
556 switch (bp->line_speed) {
557 case SPEED_10:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
562 break;
563 case SPEED_100:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
568 break;
569 case SPEED_1000:
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572 else
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574 break;
575 case SPEED_2500:
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578 else
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580 break;
581 }
582
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585 if (bp->autoneg) {
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
Michael Chanca58c3a2007-05-03 13:22:52 -0700588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800590
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594 else
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596 }
597 }
598 else
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
604static void
Michael Chanb6016b72005-05-26 13:03:09 -0700605bnx2_report_link(struct bnx2 *bp)
606{
607 if (bp->link_up) {
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611 printk("%d Mbps ", bp->line_speed);
612
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
615 else
616 printk("half duplex");
617
618 if (bp->flow_ctrl) {
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
623 }
624 else {
625 printk(", transmit ");
626 }
627 printk("flow control ON");
628 }
629 printk("\n");
630 }
631 else {
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634 }
Michael Chane3648b32005-11-04 08:51:21 -0800635
636 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642 u32 local_adv, remote_adv;
643
644 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
650 }
651 return;
652 }
653
654 if (bp->duplex != DUPLEX_FULL) {
655 return;
656 }
657
Michael Chan5b0c76a2005-11-04 08:45:49 -0800658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660 u32 val;
661
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
667 return;
668 }
669
Michael Chanca58c3a2007-05-03 13:22:52 -0700670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700672
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
676
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
688 }
689
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695 }
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
698 }
699 }
700 else {
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703 }
704 }
705 }
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710 bp->flow_ctrl = FLOW_CTRL_TX;
711 }
712 }
713}
714
715static int
Michael Chan27a005b2007-05-03 13:23:41 -0700716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718 u32 val, speed;
719
720 bp->link_up = 1;
721
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
729 return 0;
730 }
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732 switch (speed) {
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
735 break;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
742 break;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
745 break;
746 }
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
749 else
750 bp->duplex = DUPLEX_HALF;
751 return 0;
752}
753
754static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757 u32 val;
758
759 bp->link_up = 1;
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
764 break;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
773 break;
774 }
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
777 else
778 bp->duplex = DUPLEX_HALF;
779
780 return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700785{
786 u32 bmcr, local_adv, remote_adv, common;
787
788 bp->link_up = 1;
789 bp->line_speed = SPEED_1000;
790
Michael Chanca58c3a2007-05-03 13:22:52 -0700791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
794 }
795 else {
796 bp->duplex = DUPLEX_HALF;
797 }
798
799 if (!(bmcr & BMCR_ANENABLE)) {
800 return 0;
801 }
802
Michael Chanca58c3a2007-05-03 13:22:52 -0700803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700805
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
811 }
812 else {
813 bp->duplex = DUPLEX_HALF;
814 }
815 }
816
817 return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823 u32 bmcr;
824
Michael Chanca58c3a2007-05-03 13:22:52 -0700825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
828
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
836 }
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
840 }
841 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700844
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
857 }
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
861 }
862 else {
863 bp->line_speed = 0;
864 bp->link_up = 0;
865 }
866 }
867 }
868 else {
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
871 }
872 else {
873 bp->line_speed = SPEED_10;
874 }
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
877 }
878 else {
879 bp->duplex = DUPLEX_HALF;
880 }
881 }
882
883 return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889 u32 val;
890
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895 }
896
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
899
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800902 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700903
904 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800905 switch (bp->line_speed) {
906 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800909 break;
910 }
911 /* fall through */
912 case SPEED_100:
913 val |= BNX2_EMAC_MODE_PORT_MII;
914 break;
915 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800916 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800917 /* fall through */
918 case SPEED_1000:
919 val |= BNX2_EMAC_MODE_PORT_GMII;
920 break;
921 }
Michael Chanb6016b72005-05-26 13:03:09 -0700922 }
923 else {
924 val |= BNX2_EMAC_MODE_PORT_GMII;
925 }
926
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
931
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950 return 0;
951}
952
Michael Chan27a005b2007-05-03 13:23:41 -0700953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
Michael Chanb6016b72005-05-26 13:03:09 -0700971static int
Michael Chan605a9e22007-05-03 13:23:13 -0700972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974 u32 up1;
975 int ret = 1;
976
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978 return 0;
979
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
982
Michael Chan27a005b2007-05-03 13:23:41 -0700983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
Michael Chan605a9e22007-05-03 13:23:13 -0700986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
990 ret = 0;
991 }
992
Michael Chan27a005b2007-05-03 13:23:41 -0700993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
Michael Chan605a9e22007-05-03 13:23:13 -0700997 return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003 u32 up1;
1004 int ret = 0;
1005
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007 return 0;
1008
Michael Chan27a005b2007-05-03 13:23:41 -07001009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
Michael Chan605a9e22007-05-03 13:23:13 -07001012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 1;
1017 }
1018
Michael Chan27a005b2007-05-03 13:23:41 -07001019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
Michael Chan605a9e22007-05-03 13:23:13 -07001023 return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029 u32 bmcr;
1030
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032 return;
1033
Michael Chan27a005b2007-05-03 13:23:41 -07001034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035 u32 val;
1036
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051 }
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1057 }
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064 u32 bmcr;
1065
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067 return;
1068
Michael Chan27a005b2007-05-03 13:23:41 -07001069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070 u32 val;
1071
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085 }
1086
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
1092static int
Michael Chanb6016b72005-05-26 13:03:09 -07001093bnx2_set_link(struct bnx2 *bp)
1094{
1095 u32 bmsr;
1096 u8 link_up;
1097
Michael Chan80be4432006-11-19 14:07:28 -08001098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001099 bp->link_up = 1;
1100 return 0;
1101 }
1102
1103 link_up = bp->link_up;
1104
Michael Chan27a005b2007-05-03 13:23:41 -07001105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001109
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112 u32 val;
1113
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1117 else
1118 bmsr &= ~BMSR_LSTATUS;
1119 }
1120
1121 if (bmsr & BMSR_LSTATUS) {
1122 bp->link_up = 1;
1123
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001131 }
1132 else {
1133 bnx2_copper_linkup(bp);
1134 }
1135 bnx2_resolve_flow_ctrl(bp);
1136 }
1137 else {
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001141
Michael Chanb6016b72005-05-26 13:03:09 -07001142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143 bp->link_up = 0;
1144 }
1145
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1148 }
1149
1150 bnx2_set_mac_link(bp);
1151
1152 return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158 int i;
1159 u32 reg;
1160
Michael Chanca58c3a2007-05-03 13:22:52 -07001161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163#define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165 udelay(10);
1166
Michael Chanca58c3a2007-05-03 13:22:52 -07001167 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001168 if (!(reg & BMCR_RESET)) {
1169 udelay(20);
1170 break;
1171 }
1172 }
1173 if (i == PHY_RESET_MAX_WAIT) {
1174 return -EBUSY;
1175 }
1176 return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182 u32 adv = 0;
1183
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1189 }
1190 else {
1191 adv = ADVERTISE_PAUSE_CAP;
1192 }
1193 }
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1197 }
1198 else {
1199 adv = ADVERTISE_PAUSE_ASYM;
1200 }
1201 }
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208 }
1209 }
1210 return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
Michael Chan605a9e22007-05-03 13:23:13 -07001216 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001217 u32 new_adv = 0;
1218
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001221 int force_link_down = 0;
1222
Michael Chan605a9e22007-05-03 13:23:13 -07001223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1229 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001230 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
Michael Chanca58c3a2007-05-03 13:22:52 -07001233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001234 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001235 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001236
Michael Chan27a005b2007-05-03 13:23:41 -07001237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1243 }
1244
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248 else
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001250 }
1251
Michael Chanb6016b72005-05-26 13:03:09 -07001252 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001253 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001254 new_bmcr |= BMCR_FULLDPLX;
1255 }
1256 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001257 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001258 new_bmcr &= ~BMCR_FULLDPLX;
1259 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001260 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001261 /* Force a link down visible on the other side */
1262 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001263 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001267 BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269 bp->link_up = 0;
1270 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001272 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001273 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001276 } else {
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001279 }
1280 return 0;
1281 }
1282
Michael Chan605a9e22007-05-03 13:23:13 -07001283 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001284
Michael Chanb6016b72005-05-26 13:03:09 -07001285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1287
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1289
Michael Chanca58c3a2007-05-03 13:22:52 -07001290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001292
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1296 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001298 spin_unlock_bh(&bp->phy_lock);
1299 msleep(20);
1300 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001301 }
1302
Michael Chanca58c3a2007-05-03 13:22:52 -07001303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001305 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1313 */
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001317 } else {
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001320 }
1321
1322 return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1327
1328#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1332
1333#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001335
Michael Chanb6016b72005-05-26 13:03:09 -07001336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338static int
1339bnx2_setup_copper_phy(struct bnx2 *bp)
1340{
1341 u32 bmcr;
1342 u32 new_bmcr;
1343
Michael Chanca58c3a2007-05-03 13:22:52 -07001344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001345
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1350
Michael Chanca58c3a2007-05-03 13:22:52 -07001351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1354
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001368
Michael Chanb6016b72005-05-26 13:03:09 -07001369 new_adv_reg |= ADVERTISE_CSMA;
1370
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1376
Michael Chanca58c3a2007-05-03 13:22:52 -07001377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001380 BMCR_ANENABLE);
1381 }
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1385
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1388 }
1389 return 0;
1390 }
1391
1392 new_bmcr = 0;
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1395 }
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1398 }
1399 if (new_bmcr != bmcr) {
1400 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001401
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001404
Michael Chanb6016b72005-05-26 13:03:09 -07001405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001408 spin_unlock_bh(&bp->phy_lock);
1409 msleep(50);
1410 spin_lock_bh(&bp->phy_lock);
1411
Michael Chanca58c3a2007-05-03 13:22:52 -07001412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001414 }
1415
Michael Chanca58c3a2007-05-03 13:22:52 -07001416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001417
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1421 */
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1427 }
Michael Chan27a005b2007-05-03 13:23:41 -07001428 } else {
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001431 }
1432 return 0;
1433}
1434
1435static int
1436bnx2_setup_phy(struct bnx2 *bp)
1437{
1438 if (bp->loopback == MAC_LOOPBACK)
1439 return 0;
1440
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1443 }
1444 else {
1445 return (bnx2_setup_copper_phy(bp));
1446 }
1447}
1448
1449static int
Michael Chan27a005b2007-05-03 13:23:41 -07001450bnx2_init_5709s_phy(struct bnx2 *bp)
1451{
1452 u32 val;
1453
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_reset_phy(bp);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1478 else
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495 return 0;
1496}
1497
1498static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001499bnx2_init_5708s_phy(struct bnx2 *bp)
1500{
1501 u32 val;
1502
Michael Chan27a005b2007-05-03 13:23:41 -07001503 bnx2_reset_phy(bp);
1504
1505 bp->mii_up1 = BCM5708S_UP1;
1506
Michael Chan5b0c76a2005-11-04 08:45:49 -08001507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523 }
1524
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535 }
1536
Michael Chane3648b32005-11-04 08:51:21 -08001537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540 if (val) {
1541 u32 is_backplane;
1542
Michael Chane3648b32005-11-04 08:51:21 -08001543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1551 }
1552 }
1553 return 0;
1554}
1555
1556static int
1557bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001558{
Michael Chan27a005b2007-05-03 13:23:41 -07001559 bnx2_reset_phy(bp);
1560
Michael Chanb6016b72005-05-26 13:03:09 -07001561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
Michael Chan59b47d82006-11-19 14:10:45 -08001563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001565
1566 if (bp->dev->mtu > 1500) {
1567 u32 val;
1568
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577 }
1578 else {
1579 u32 val;
1580
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588 }
1589
1590 return 0;
1591}
1592
1593static int
1594bnx2_init_copper_phy(struct bnx2 *bp)
1595{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001596 u32 val;
1597
Michael Chan27a005b2007-05-03 13:23:41 -07001598 bnx2_reset_phy(bp);
1599
Michael Chanb6016b72005-05-26 13:03:09 -07001600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1609 }
1610
Michael Chanb659f442007-02-02 00:46:35 -08001611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615 val &= ~(1 << 8);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617 }
1618
Michael Chanb6016b72005-05-26 13:03:09 -07001619 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1627 }
1628 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635 }
1636
Michael Chan5b0c76a2005-11-04 08:45:49 -08001637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001641 return 0;
1642}
1643
1644
1645static int
1646bnx2_init_phy(struct bnx2 *bp)
1647{
1648 u32 val;
1649 int rc = 0;
1650
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
Michael Chanca58c3a2007-05-03 13:22:52 -07001654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001656 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1659
Michael Chanb6016b72005-05-26 13:03:09 -07001660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
Michael Chanb6016b72005-05-26 13:03:09 -07001662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1666
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001674 }
1675 else {
1676 rc = bnx2_init_copper_phy(bp);
1677 }
1678
1679 bnx2_setup_phy(bp);
1680
1681 return rc;
1682}
1683
1684static int
1685bnx2_set_mac_loopback(struct bnx2 *bp)
1686{
1687 u32 mac_mode;
1688
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693 bp->link_up = 1;
1694 return 0;
1695}
1696
Michael Chanbc5a0692006-01-23 16:13:22 -08001697static int bnx2_test_link(struct bnx2 *);
1698
1699static int
1700bnx2_set_phy_loopback(struct bnx2 *bp)
1701{
1702 u32 mac_mode;
1703 int rc, i;
1704
1705 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001707 BMCR_SPEED1000);
1708 spin_unlock_bh(&bp->phy_lock);
1709 if (rc)
1710 return rc;
1711
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1714 break;
Michael Chan80be4432006-11-19 14:07:28 -08001715 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001716 }
1717
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001721 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001722
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725 bp->link_up = 1;
1726 return 0;
1727}
1728
Michael Chanb6016b72005-05-26 13:03:09 -07001729static int
Michael Chanb090ae22006-01-23 16:07:10 -08001730bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001731{
1732 int i;
1733 u32 val;
1734
Michael Chanb6016b72005-05-26 13:03:09 -07001735 bp->fw_wr_seq++;
1736 msg_data |= bp->fw_wr_seq;
1737
Michael Chane3648b32005-11-04 08:51:21 -08001738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001739
1740 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001743
Michael Chane3648b32005-11-04 08:51:21 -08001744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001745
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747 break;
1748 }
Michael Chanb090ae22006-01-23 16:07:10 -08001749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001751
1752 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754 if (!silent)
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001757
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
Michael Chane3648b32005-11-04 08:51:21 -08001761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001762
Michael Chanb6016b72005-05-26 13:03:09 -07001763 return -EBUSY;
1764 }
1765
Michael Chanb090ae22006-01-23 16:07:10 -08001766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767 return -EIO;
1768
Michael Chanb6016b72005-05-26 13:03:09 -07001769 return 0;
1770}
1771
Michael Chan59b47d82006-11-19 14:10:45 -08001772static int
1773bnx2_init_5709_context(struct bnx2 *bp)
1774{
1775 int i, ret = 0;
1776 u32 val;
1777
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1782 int j;
1783
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1792
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1795 break;
1796 udelay(5);
1797 }
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1799 ret = -EBUSY;
1800 break;
1801 }
1802 }
1803 return ret;
1804}
1805
Michael Chanb6016b72005-05-26 13:03:09 -07001806static void
1807bnx2_init_context(struct bnx2 *bp)
1808{
1809 u32 vcid;
1810
1811 vcid = 96;
1812 while (vcid) {
1813 u32 vcid_addr, pcid_addr, offset;
1814
1815 vcid--;
1816
1817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1818 u32 new_vcid;
1819
1820 vcid_addr = GET_PCID_ADDR(vcid);
1821 if (vcid & 0x8) {
1822 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1823 }
1824 else {
1825 new_vcid = vcid;
1826 }
1827 pcid_addr = GET_PCID_ADDR(new_vcid);
1828 }
1829 else {
1830 vcid_addr = GET_CID_ADDR(vcid);
1831 pcid_addr = vcid_addr;
1832 }
1833
1834 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1836
1837 /* Zero out the context. */
1838 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839 CTX_WR(bp, 0x00, offset, 0);
1840 }
1841
1842 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1844 }
1845}
1846
1847static int
1848bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1849{
1850 u16 *good_mbuf;
1851 u32 good_mbuf_cnt;
1852 u32 val;
1853
1854 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855 if (good_mbuf == NULL) {
1856 printk(KERN_ERR PFX "Failed to allocate memory in "
1857 "bnx2_alloc_bad_rbuf\n");
1858 return -ENOMEM;
1859 }
1860
1861 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1863
1864 good_mbuf_cnt = 0;
1865
1866 /* Allocate a bunch of mbufs and save the good ones in an array. */
1867 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1870
1871 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1872
1873 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1874
1875 /* The addresses with Bit 9 set are bad memory blocks. */
1876 if (!(val & (1 << 9))) {
1877 good_mbuf[good_mbuf_cnt] = (u16) val;
1878 good_mbuf_cnt++;
1879 }
1880
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882 }
1883
1884 /* Free the good ones back to the mbuf pool thus discarding
1885 * all the bad ones. */
1886 while (good_mbuf_cnt) {
1887 good_mbuf_cnt--;
1888
1889 val = good_mbuf[good_mbuf_cnt];
1890 val = (val << 9) | val | 1;
1891
1892 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1893 }
1894 kfree(good_mbuf);
1895 return 0;
1896}
1897
1898static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001899bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001900{
1901 u32 val;
1902 u8 *mac_addr = bp->dev->dev_addr;
1903
1904 val = (mac_addr[0] << 8) | mac_addr[1];
1905
1906 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1907
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001909 (mac_addr[4] << 8) | mac_addr[5];
1910
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1912}
1913
1914static inline int
1915bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1916{
1917 struct sk_buff *skb;
1918 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1919 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001920 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001921 unsigned long align;
1922
Michael Chan932f3772006-08-15 01:39:36 -07001923 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001924 if (skb == NULL) {
1925 return -ENOMEM;
1926 }
1927
Michael Chan59b47d82006-11-19 14:10:45 -08001928 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001930
Michael Chanb6016b72005-05-26 13:03:09 -07001931 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932 PCI_DMA_FROMDEVICE);
1933
1934 rx_buf->skb = skb;
1935 pci_unmap_addr_set(rx_buf, mapping, mapping);
1936
1937 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1939
1940 bp->rx_prod_bseq += bp->rx_buf_use_size;
1941
1942 return 0;
1943}
1944
Michael Chanda3e4fb2007-05-03 13:24:23 -07001945static int
1946bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1947{
1948 struct status_block *sblk = bp->status_blk;
1949 u32 new_link_state, old_link_state;
1950 int is_set = 1;
1951
1952 new_link_state = sblk->status_attn_bits & event;
1953 old_link_state = sblk->status_attn_bits_ack & event;
1954 if (new_link_state != old_link_state) {
1955 if (new_link_state)
1956 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1957 else
1958 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1959 } else
1960 is_set = 0;
1961
1962 return is_set;
1963}
1964
Michael Chanb6016b72005-05-26 13:03:09 -07001965static void
1966bnx2_phy_int(struct bnx2 *bp)
1967{
Michael Chanda3e4fb2007-05-03 13:24:23 -07001968 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1969 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001970 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07001971 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001972 }
1973}
1974
1975static void
1976bnx2_tx_int(struct bnx2 *bp)
1977{
Michael Chanf4e418f2005-11-04 08:53:48 -08001978 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001979 u16 hw_cons, sw_cons, sw_ring_cons;
1980 int tx_free_bd = 0;
1981
Michael Chanf4e418f2005-11-04 08:53:48 -08001982 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001983 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1984 hw_cons++;
1985 }
1986 sw_cons = bp->tx_cons;
1987
1988 while (sw_cons != hw_cons) {
1989 struct sw_bd *tx_buf;
1990 struct sk_buff *skb;
1991 int i, last;
1992
1993 sw_ring_cons = TX_RING_IDX(sw_cons);
1994
1995 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1996 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001997
Michael Chanb6016b72005-05-26 13:03:09 -07001998 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001999 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002000 u16 last_idx, last_ring_idx;
2001
2002 last_idx = sw_cons +
2003 skb_shinfo(skb)->nr_frags + 1;
2004 last_ring_idx = sw_ring_cons +
2005 skb_shinfo(skb)->nr_frags + 1;
2006 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2007 last_idx++;
2008 }
2009 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2010 break;
2011 }
2012 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002013
Michael Chanb6016b72005-05-26 13:03:09 -07002014 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2015 skb_headlen(skb), PCI_DMA_TODEVICE);
2016
2017 tx_buf->skb = NULL;
2018 last = skb_shinfo(skb)->nr_frags;
2019
2020 for (i = 0; i < last; i++) {
2021 sw_cons = NEXT_TX_BD(sw_cons);
2022
2023 pci_unmap_page(bp->pdev,
2024 pci_unmap_addr(
2025 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2026 mapping),
2027 skb_shinfo(skb)->frags[i].size,
2028 PCI_DMA_TODEVICE);
2029 }
2030
2031 sw_cons = NEXT_TX_BD(sw_cons);
2032
2033 tx_free_bd += last + 1;
2034
Michael Chan745720e2006-06-29 12:37:41 -07002035 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002036
Michael Chanf4e418f2005-11-04 08:53:48 -08002037 hw_cons = bp->hw_tx_cons =
2038 sblk->status_tx_quick_consumer_index0;
2039
Michael Chanb6016b72005-05-26 13:03:09 -07002040 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2041 hw_cons++;
2042 }
2043 }
2044
Michael Chane89bbf12005-08-25 15:36:58 -07002045 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002046 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2047 * before checking for netif_queue_stopped(). Without the
2048 * memory barrier, there is a small possibility that bnx2_start_xmit()
2049 * will miss it and cause the queue to be stopped forever.
2050 */
2051 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002052
Michael Chan2f8af122006-08-15 01:39:10 -07002053 if (unlikely(netif_queue_stopped(bp->dev)) &&
2054 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2055 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002056 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002057 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002058 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002059 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002060 }
Michael Chanb6016b72005-05-26 13:03:09 -07002061}
2062
2063static inline void
2064bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2065 u16 cons, u16 prod)
2066{
Michael Chan236b6392006-03-20 17:49:02 -08002067 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2068 struct rx_bd *cons_bd, *prod_bd;
2069
2070 cons_rx_buf = &bp->rx_buf_ring[cons];
2071 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002072
2073 pci_dma_sync_single_for_device(bp->pdev,
2074 pci_unmap_addr(cons_rx_buf, mapping),
2075 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2076
Michael Chan236b6392006-03-20 17:49:02 -08002077 bp->rx_prod_bseq += bp->rx_buf_use_size;
2078
2079 prod_rx_buf->skb = skb;
2080
2081 if (cons == prod)
2082 return;
2083
Michael Chanb6016b72005-05-26 13:03:09 -07002084 pci_unmap_addr_set(prod_rx_buf, mapping,
2085 pci_unmap_addr(cons_rx_buf, mapping));
2086
Michael Chan3fdfcc22006-03-20 17:49:49 -08002087 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2088 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002089 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2090 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002091}
2092
2093static int
2094bnx2_rx_int(struct bnx2 *bp, int budget)
2095{
Michael Chanf4e418f2005-11-04 08:53:48 -08002096 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002097 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2098 struct l2_fhdr *rx_hdr;
2099 int rx_pkt = 0;
2100
Michael Chanf4e418f2005-11-04 08:53:48 -08002101 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002102 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2103 hw_cons++;
2104 }
2105 sw_cons = bp->rx_cons;
2106 sw_prod = bp->rx_prod;
2107
2108 /* Memory barrier necessary as speculative reads of the rx
2109 * buffer can be ahead of the index in the status block
2110 */
2111 rmb();
2112 while (sw_cons != hw_cons) {
2113 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002114 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002115 struct sw_bd *rx_buf;
2116 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002117 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002118
2119 sw_ring_cons = RX_RING_IDX(sw_cons);
2120 sw_ring_prod = RX_RING_IDX(sw_prod);
2121
2122 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2123 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002124
2125 rx_buf->skb = NULL;
2126
2127 dma_addr = pci_unmap_addr(rx_buf, mapping);
2128
2129 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002130 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2131
2132 rx_hdr = (struct l2_fhdr *) skb->data;
2133 len = rx_hdr->l2_fhdr_pkt_len - 4;
2134
Michael Chanade2bfe2006-01-23 16:09:51 -08002135 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002136 (L2_FHDR_ERRORS_BAD_CRC |
2137 L2_FHDR_ERRORS_PHY_DECODE |
2138 L2_FHDR_ERRORS_ALIGNMENT |
2139 L2_FHDR_ERRORS_TOO_SHORT |
2140 L2_FHDR_ERRORS_GIANT_FRAME)) {
2141
2142 goto reuse_rx;
2143 }
2144
2145 /* Since we don't have a jumbo ring, copy small packets
2146 * if mtu > 1500
2147 */
2148 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2149 struct sk_buff *new_skb;
2150
Michael Chan932f3772006-08-15 01:39:36 -07002151 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002152 if (new_skb == NULL)
2153 goto reuse_rx;
2154
2155 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002156 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2157 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002158 skb_reserve(new_skb, 2);
2159 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002160
2161 bnx2_reuse_rx_skb(bp, skb,
2162 sw_ring_cons, sw_ring_prod);
2163
2164 skb = new_skb;
2165 }
2166 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002167 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002168 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2169
2170 skb_reserve(skb, bp->rx_offset);
2171 skb_put(skb, len);
2172 }
2173 else {
2174reuse_rx:
2175 bnx2_reuse_rx_skb(bp, skb,
2176 sw_ring_cons, sw_ring_prod);
2177 goto next_rx;
2178 }
2179
2180 skb->protocol = eth_type_trans(skb, bp->dev);
2181
2182 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002183 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002184
Michael Chan745720e2006-06-29 12:37:41 -07002185 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002186 goto next_rx;
2187
2188 }
2189
Michael Chanb6016b72005-05-26 13:03:09 -07002190 skb->ip_summed = CHECKSUM_NONE;
2191 if (bp->rx_csum &&
2192 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2193 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2194
Michael Chanade2bfe2006-01-23 16:09:51 -08002195 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2196 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002197 skb->ip_summed = CHECKSUM_UNNECESSARY;
2198 }
2199
2200#ifdef BCM_VLAN
2201 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2202 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2203 rx_hdr->l2_fhdr_vlan_tag);
2204 }
2205 else
2206#endif
2207 netif_receive_skb(skb);
2208
2209 bp->dev->last_rx = jiffies;
2210 rx_pkt++;
2211
2212next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002213 sw_cons = NEXT_RX_BD(sw_cons);
2214 sw_prod = NEXT_RX_BD(sw_prod);
2215
2216 if ((rx_pkt == budget))
2217 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002218
2219 /* Refresh hw_cons to see if there is new work */
2220 if (sw_cons == hw_cons) {
2221 hw_cons = bp->hw_rx_cons =
2222 sblk->status_rx_quick_consumer_index0;
2223 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2224 hw_cons++;
2225 rmb();
2226 }
Michael Chanb6016b72005-05-26 13:03:09 -07002227 }
2228 bp->rx_cons = sw_cons;
2229 bp->rx_prod = sw_prod;
2230
2231 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2232
2233 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2234
2235 mmiowb();
2236
2237 return rx_pkt;
2238
2239}
2240
2241/* MSI ISR - The only difference between this and the INTx ISR
2242 * is that the MSI interrupt is always serviced.
2243 */
2244static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002245bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002246{
2247 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002248 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002249
Michael Chanc921e4c2005-09-08 13:15:32 -07002250 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002251 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2252 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2253 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2254
2255 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002256 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2257 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002258
Michael Chan73eef4c2005-08-25 15:39:15 -07002259 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002260
Michael Chan73eef4c2005-08-25 15:39:15 -07002261 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002262}
2263
2264static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002265bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002266{
2267 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002268 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002269
2270 /* When using INTx, it is possible for the interrupt to arrive
2271 * at the CPU before the status block posted prior to the
2272 * interrupt. Reading a register will flush the status block.
2273 * When using MSI, the MSI message will always complete after
2274 * the status block write.
2275 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002276 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002277 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2278 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002279 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002280
2281 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2282 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2283 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2284
2285 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002286 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2287 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002288
Michael Chan73eef4c2005-08-25 15:39:15 -07002289 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002290
Michael Chan73eef4c2005-08-25 15:39:15 -07002291 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002292}
2293
Michael Chanda3e4fb2007-05-03 13:24:23 -07002294#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2295
Michael Chanf4e418f2005-11-04 08:53:48 -08002296static inline int
2297bnx2_has_work(struct bnx2 *bp)
2298{
2299 struct status_block *sblk = bp->status_blk;
2300
2301 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2302 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2303 return 1;
2304
Michael Chanda3e4fb2007-05-03 13:24:23 -07002305 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2306 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002307 return 1;
2308
2309 return 0;
2310}
2311
Michael Chanb6016b72005-05-26 13:03:09 -07002312static int
2313bnx2_poll(struct net_device *dev, int *budget)
2314{
Michael Chan972ec0d2006-01-23 16:12:43 -08002315 struct bnx2 *bp = netdev_priv(dev);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002316 struct status_block *sblk = bp->status_blk;
2317 u32 status_attn_bits = sblk->status_attn_bits;
2318 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002319
Michael Chanda3e4fb2007-05-03 13:24:23 -07002320 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2321 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002322
Michael Chanb6016b72005-05-26 13:03:09 -07002323 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002324
2325 /* This is needed to take care of transient status
2326 * during link changes.
2327 */
2328 REG_WR(bp, BNX2_HC_COMMAND,
2329 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2330 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002331 }
2332
Michael Chanf4e418f2005-11-04 08:53:48 -08002333 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002334 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002335
Michael Chanf4e418f2005-11-04 08:53:48 -08002336 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002337 int orig_budget = *budget;
2338 int work_done;
2339
2340 if (orig_budget > dev->quota)
2341 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002342
Michael Chanb6016b72005-05-26 13:03:09 -07002343 work_done = bnx2_rx_int(bp, orig_budget);
2344 *budget -= work_done;
2345 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002346 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002347
Michael Chanf4e418f2005-11-04 08:53:48 -08002348 bp->last_status_idx = bp->status_blk->status_idx;
2349 rmb();
2350
2351 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002352 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002353 if (likely(bp->flags & USING_MSI_FLAG)) {
2354 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2355 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2356 bp->last_status_idx);
2357 return 0;
2358 }
Michael Chanb6016b72005-05-26 13:03:09 -07002359 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002360 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2361 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2362 bp->last_status_idx);
2363
2364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2366 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002367 return 0;
2368 }
2369
2370 return 1;
2371}
2372
Herbert Xu932ff272006-06-09 12:20:56 -07002373/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002374 * from set_multicast.
2375 */
2376static void
2377bnx2_set_rx_mode(struct net_device *dev)
2378{
Michael Chan972ec0d2006-01-23 16:12:43 -08002379 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002380 u32 rx_mode, sort_mode;
2381 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002382
Michael Chanc770a652005-08-25 15:38:39 -07002383 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002384
2385 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2386 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2387 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2388#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002389 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002390 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002391#else
Michael Chane29054f2006-01-23 16:06:06 -08002392 if (!(bp->flags & ASF_ENABLE_FLAG))
2393 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002394#endif
2395 if (dev->flags & IFF_PROMISC) {
2396 /* Promiscuous mode. */
2397 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002398 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2399 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002400 }
2401 else if (dev->flags & IFF_ALLMULTI) {
2402 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2403 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2404 0xffffffff);
2405 }
2406 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2407 }
2408 else {
2409 /* Accept one or more multicast(s). */
2410 struct dev_mc_list *mclist;
2411 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2412 u32 regidx;
2413 u32 bit;
2414 u32 crc;
2415
2416 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2417
2418 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2419 i++, mclist = mclist->next) {
2420
2421 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2422 bit = crc & 0xff;
2423 regidx = (bit & 0xe0) >> 5;
2424 bit &= 0x1f;
2425 mc_filter[regidx] |= (1 << bit);
2426 }
2427
2428 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2429 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2430 mc_filter[i]);
2431 }
2432
2433 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2434 }
2435
2436 if (rx_mode != bp->rx_mode) {
2437 bp->rx_mode = rx_mode;
2438 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2439 }
2440
2441 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2442 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2443 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2444
Michael Chanc770a652005-08-25 15:38:39 -07002445 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002446}
2447
Michael Chanfba9fe92006-06-12 22:21:25 -07002448#define FW_BUF_SIZE 0x8000
2449
2450static int
2451bnx2_gunzip_init(struct bnx2 *bp)
2452{
2453 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2454 goto gunzip_nomem1;
2455
2456 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2457 goto gunzip_nomem2;
2458
2459 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2460 if (bp->strm->workspace == NULL)
2461 goto gunzip_nomem3;
2462
2463 return 0;
2464
2465gunzip_nomem3:
2466 kfree(bp->strm);
2467 bp->strm = NULL;
2468
2469gunzip_nomem2:
2470 vfree(bp->gunzip_buf);
2471 bp->gunzip_buf = NULL;
2472
2473gunzip_nomem1:
2474 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2475 "uncompression.\n", bp->dev->name);
2476 return -ENOMEM;
2477}
2478
2479static void
2480bnx2_gunzip_end(struct bnx2 *bp)
2481{
2482 kfree(bp->strm->workspace);
2483
2484 kfree(bp->strm);
2485 bp->strm = NULL;
2486
2487 if (bp->gunzip_buf) {
2488 vfree(bp->gunzip_buf);
2489 bp->gunzip_buf = NULL;
2490 }
2491}
2492
2493static int
2494bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2495{
2496 int n, rc;
2497
2498 /* check gzip header */
2499 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2500 return -EINVAL;
2501
2502 n = 10;
2503
2504#define FNAME 0x8
2505 if (zbuf[3] & FNAME)
2506 while ((zbuf[n++] != 0) && (n < len));
2507
2508 bp->strm->next_in = zbuf + n;
2509 bp->strm->avail_in = len - n;
2510 bp->strm->next_out = bp->gunzip_buf;
2511 bp->strm->avail_out = FW_BUF_SIZE;
2512
2513 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2514 if (rc != Z_OK)
2515 return rc;
2516
2517 rc = zlib_inflate(bp->strm, Z_FINISH);
2518
2519 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2520 *outbuf = bp->gunzip_buf;
2521
2522 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2523 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2524 bp->dev->name, bp->strm->msg);
2525
2526 zlib_inflateEnd(bp->strm);
2527
2528 if (rc == Z_STREAM_END)
2529 return 0;
2530
2531 return rc;
2532}
2533
Michael Chanb6016b72005-05-26 13:03:09 -07002534static void
2535load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2536 u32 rv2p_proc)
2537{
2538 int i;
2539 u32 val;
2540
2541
2542 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002543 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002544 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002545 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002546 rv2p_code++;
2547
2548 if (rv2p_proc == RV2P_PROC1) {
2549 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2550 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2551 }
2552 else {
2553 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2554 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2555 }
2556 }
2557
2558 /* Reset the processor, un-stall is done later. */
2559 if (rv2p_proc == RV2P_PROC1) {
2560 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2561 }
2562 else {
2563 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2564 }
2565}
2566
Michael Chanaf3ee512006-11-19 14:09:25 -08002567static int
Michael Chanb6016b72005-05-26 13:03:09 -07002568load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2569{
2570 u32 offset;
2571 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002572 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002573
2574 /* Halt the CPU. */
2575 val = REG_RD_IND(bp, cpu_reg->mode);
2576 val |= cpu_reg->mode_value_halt;
2577 REG_WR_IND(bp, cpu_reg->mode, val);
2578 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2579
2580 /* Load the Text area. */
2581 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002582 if (fw->gz_text) {
2583 u32 text_len;
2584 void *text;
2585
2586 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2587 &text_len);
2588 if (rc)
2589 return rc;
2590
2591 fw->text = text;
2592 }
2593 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002594 int j;
2595
2596 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002597 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002598 }
2599 }
2600
2601 /* Load the Data area. */
2602 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2603 if (fw->data) {
2604 int j;
2605
2606 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2607 REG_WR_IND(bp, offset, fw->data[j]);
2608 }
2609 }
2610
2611 /* Load the SBSS area. */
2612 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2613 if (fw->sbss) {
2614 int j;
2615
2616 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2617 REG_WR_IND(bp, offset, fw->sbss[j]);
2618 }
2619 }
2620
2621 /* Load the BSS area. */
2622 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2623 if (fw->bss) {
2624 int j;
2625
2626 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2627 REG_WR_IND(bp, offset, fw->bss[j]);
2628 }
2629 }
2630
2631 /* Load the Read-Only area. */
2632 offset = cpu_reg->spad_base +
2633 (fw->rodata_addr - cpu_reg->mips_view_base);
2634 if (fw->rodata) {
2635 int j;
2636
2637 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2638 REG_WR_IND(bp, offset, fw->rodata[j]);
2639 }
2640 }
2641
2642 /* Clear the pre-fetch instruction. */
2643 REG_WR_IND(bp, cpu_reg->inst, 0);
2644 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2645
2646 /* Start the CPU. */
2647 val = REG_RD_IND(bp, cpu_reg->mode);
2648 val &= ~cpu_reg->mode_value_halt;
2649 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2650 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002651
2652 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002653}
2654
Michael Chanfba9fe92006-06-12 22:21:25 -07002655static int
Michael Chanb6016b72005-05-26 13:03:09 -07002656bnx2_init_cpus(struct bnx2 *bp)
2657{
2658 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002659 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002660 int rc = 0;
2661 void *text;
2662 u32 text_len;
2663
2664 if ((rc = bnx2_gunzip_init(bp)) != 0)
2665 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002666
2667 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002668 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2669 &text_len);
2670 if (rc)
2671 goto init_cpu_err;
2672
2673 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2674
2675 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2676 &text_len);
2677 if (rc)
2678 goto init_cpu_err;
2679
2680 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002681
2682 /* Initialize the RX Processor. */
2683 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2684 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2685 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2686 cpu_reg.state = BNX2_RXP_CPU_STATE;
2687 cpu_reg.state_value_clear = 0xffffff;
2688 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2689 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2690 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2691 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2692 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2693 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2694 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002695
Michael Chand43584c2006-11-19 14:14:35 -08002696 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2697 fw = &bnx2_rxp_fw_09;
2698 else
2699 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002700
Michael Chanaf3ee512006-11-19 14:09:25 -08002701 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002702 if (rc)
2703 goto init_cpu_err;
2704
Michael Chanb6016b72005-05-26 13:03:09 -07002705 /* Initialize the TX Processor. */
2706 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2707 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2708 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2709 cpu_reg.state = BNX2_TXP_CPU_STATE;
2710 cpu_reg.state_value_clear = 0xffffff;
2711 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2712 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2713 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2714 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2715 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2716 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2717 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002718
Michael Chand43584c2006-11-19 14:14:35 -08002719 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2720 fw = &bnx2_txp_fw_09;
2721 else
2722 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002723
Michael Chanaf3ee512006-11-19 14:09:25 -08002724 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002725 if (rc)
2726 goto init_cpu_err;
2727
Michael Chanb6016b72005-05-26 13:03:09 -07002728 /* Initialize the TX Patch-up Processor. */
2729 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2730 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2731 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2732 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2733 cpu_reg.state_value_clear = 0xffffff;
2734 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2735 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2736 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2737 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2738 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2739 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2740 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002741
Michael Chand43584c2006-11-19 14:14:35 -08002742 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2743 fw = &bnx2_tpat_fw_09;
2744 else
2745 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002746
Michael Chanaf3ee512006-11-19 14:09:25 -08002747 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002748 if (rc)
2749 goto init_cpu_err;
2750
Michael Chanb6016b72005-05-26 13:03:09 -07002751 /* Initialize the Completion Processor. */
2752 cpu_reg.mode = BNX2_COM_CPU_MODE;
2753 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2754 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2755 cpu_reg.state = BNX2_COM_CPU_STATE;
2756 cpu_reg.state_value_clear = 0xffffff;
2757 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2758 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2759 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2760 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2761 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2762 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2763 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002764
Michael Chand43584c2006-11-19 14:14:35 -08002765 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2766 fw = &bnx2_com_fw_09;
2767 else
2768 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002769
Michael Chanaf3ee512006-11-19 14:09:25 -08002770 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002771 if (rc)
2772 goto init_cpu_err;
2773
Michael Chand43584c2006-11-19 14:14:35 -08002774 /* Initialize the Command Processor. */
2775 cpu_reg.mode = BNX2_CP_CPU_MODE;
2776 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2777 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2778 cpu_reg.state = BNX2_CP_CPU_STATE;
2779 cpu_reg.state_value_clear = 0xffffff;
2780 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2781 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2782 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2783 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2784 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2785 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2786 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002787
Michael Chand43584c2006-11-19 14:14:35 -08002788 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2789 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002790
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002791 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002792 if (rc)
2793 goto init_cpu_err;
2794 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002795init_cpu_err:
2796 bnx2_gunzip_end(bp);
2797 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002798}
2799
2800static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002801bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002802{
2803 u16 pmcsr;
2804
2805 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2806
2807 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002808 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002809 u32 val;
2810
2811 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2812 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2813 PCI_PM_CTRL_PME_STATUS);
2814
2815 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2816 /* delay required during transition out of D3hot */
2817 msleep(20);
2818
2819 val = REG_RD(bp, BNX2_EMAC_MODE);
2820 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2821 val &= ~BNX2_EMAC_MODE_MPKT;
2822 REG_WR(bp, BNX2_EMAC_MODE, val);
2823
2824 val = REG_RD(bp, BNX2_RPM_CONFIG);
2825 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2826 REG_WR(bp, BNX2_RPM_CONFIG, val);
2827 break;
2828 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002829 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002830 int i;
2831 u32 val, wol_msg;
2832
2833 if (bp->wol) {
2834 u32 advertising;
2835 u8 autoneg;
2836
2837 autoneg = bp->autoneg;
2838 advertising = bp->advertising;
2839
2840 bp->autoneg = AUTONEG_SPEED;
2841 bp->advertising = ADVERTISED_10baseT_Half |
2842 ADVERTISED_10baseT_Full |
2843 ADVERTISED_100baseT_Half |
2844 ADVERTISED_100baseT_Full |
2845 ADVERTISED_Autoneg;
2846
2847 bnx2_setup_copper_phy(bp);
2848
2849 bp->autoneg = autoneg;
2850 bp->advertising = advertising;
2851
2852 bnx2_set_mac_addr(bp);
2853
2854 val = REG_RD(bp, BNX2_EMAC_MODE);
2855
2856 /* Enable port mode. */
2857 val &= ~BNX2_EMAC_MODE_PORT;
2858 val |= BNX2_EMAC_MODE_PORT_MII |
2859 BNX2_EMAC_MODE_MPKT_RCVD |
2860 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002861 BNX2_EMAC_MODE_MPKT;
2862
2863 REG_WR(bp, BNX2_EMAC_MODE, val);
2864
2865 /* receive all multicast */
2866 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2867 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2868 0xffffffff);
2869 }
2870 REG_WR(bp, BNX2_EMAC_RX_MODE,
2871 BNX2_EMAC_RX_MODE_SORT_MODE);
2872
2873 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2874 BNX2_RPM_SORT_USER0_MC_EN;
2875 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2876 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2877 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2878 BNX2_RPM_SORT_USER0_ENA);
2879
2880 /* Need to enable EMAC and RPM for WOL. */
2881 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2882 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2883 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2884 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2885
2886 val = REG_RD(bp, BNX2_RPM_CONFIG);
2887 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2888 REG_WR(bp, BNX2_RPM_CONFIG, val);
2889
2890 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2891 }
2892 else {
2893 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2894 }
2895
Michael Chandda1e392006-01-23 16:08:14 -08002896 if (!(bp->flags & NO_WOL_FLAG))
2897 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002898
2899 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2900 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2901 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2902
2903 if (bp->wol)
2904 pmcsr |= 3;
2905 }
2906 else {
2907 pmcsr |= 3;
2908 }
2909 if (bp->wol) {
2910 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2911 }
2912 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2913 pmcsr);
2914
2915 /* No more memory access after this point until
2916 * device is brought back to D0.
2917 */
2918 udelay(50);
2919 break;
2920 }
2921 default:
2922 return -EINVAL;
2923 }
2924 return 0;
2925}
2926
2927static int
2928bnx2_acquire_nvram_lock(struct bnx2 *bp)
2929{
2930 u32 val;
2931 int j;
2932
2933 /* Request access to the flash interface. */
2934 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2935 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2936 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2937 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2938 break;
2939
2940 udelay(5);
2941 }
2942
2943 if (j >= NVRAM_TIMEOUT_COUNT)
2944 return -EBUSY;
2945
2946 return 0;
2947}
2948
2949static int
2950bnx2_release_nvram_lock(struct bnx2 *bp)
2951{
2952 int j;
2953 u32 val;
2954
2955 /* Relinquish nvram interface. */
2956 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2957
2958 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2959 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2960 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2961 break;
2962
2963 udelay(5);
2964 }
2965
2966 if (j >= NVRAM_TIMEOUT_COUNT)
2967 return -EBUSY;
2968
2969 return 0;
2970}
2971
2972
2973static int
2974bnx2_enable_nvram_write(struct bnx2 *bp)
2975{
2976 u32 val;
2977
2978 val = REG_RD(bp, BNX2_MISC_CFG);
2979 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2980
2981 if (!bp->flash_info->buffered) {
2982 int j;
2983
2984 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2985 REG_WR(bp, BNX2_NVM_COMMAND,
2986 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2987
2988 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2989 udelay(5);
2990
2991 val = REG_RD(bp, BNX2_NVM_COMMAND);
2992 if (val & BNX2_NVM_COMMAND_DONE)
2993 break;
2994 }
2995
2996 if (j >= NVRAM_TIMEOUT_COUNT)
2997 return -EBUSY;
2998 }
2999 return 0;
3000}
3001
3002static void
3003bnx2_disable_nvram_write(struct bnx2 *bp)
3004{
3005 u32 val;
3006
3007 val = REG_RD(bp, BNX2_MISC_CFG);
3008 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3009}
3010
3011
3012static void
3013bnx2_enable_nvram_access(struct bnx2 *bp)
3014{
3015 u32 val;
3016
3017 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3018 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003019 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003020 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3021}
3022
3023static void
3024bnx2_disable_nvram_access(struct bnx2 *bp)
3025{
3026 u32 val;
3027
3028 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3029 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003030 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003031 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3032 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3033}
3034
3035static int
3036bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3037{
3038 u32 cmd;
3039 int j;
3040
3041 if (bp->flash_info->buffered)
3042 /* Buffered flash, no erase needed */
3043 return 0;
3044
3045 /* Build an erase command */
3046 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3047 BNX2_NVM_COMMAND_DOIT;
3048
3049 /* Need to clear DONE bit separately. */
3050 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3051
3052 /* Address of the NVRAM to read from. */
3053 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3054
3055 /* Issue an erase command. */
3056 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3057
3058 /* Wait for completion. */
3059 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3060 u32 val;
3061
3062 udelay(5);
3063
3064 val = REG_RD(bp, BNX2_NVM_COMMAND);
3065 if (val & BNX2_NVM_COMMAND_DONE)
3066 break;
3067 }
3068
3069 if (j >= NVRAM_TIMEOUT_COUNT)
3070 return -EBUSY;
3071
3072 return 0;
3073}
3074
3075static int
3076bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3077{
3078 u32 cmd;
3079 int j;
3080
3081 /* Build the command word. */
3082 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3083
3084 /* Calculate an offset of a buffered flash. */
3085 if (bp->flash_info->buffered) {
3086 offset = ((offset / bp->flash_info->page_size) <<
3087 bp->flash_info->page_bits) +
3088 (offset % bp->flash_info->page_size);
3089 }
3090
3091 /* Need to clear DONE bit separately. */
3092 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3093
3094 /* Address of the NVRAM to read from. */
3095 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3096
3097 /* Issue a read command. */
3098 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3099
3100 /* Wait for completion. */
3101 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3102 u32 val;
3103
3104 udelay(5);
3105
3106 val = REG_RD(bp, BNX2_NVM_COMMAND);
3107 if (val & BNX2_NVM_COMMAND_DONE) {
3108 val = REG_RD(bp, BNX2_NVM_READ);
3109
3110 val = be32_to_cpu(val);
3111 memcpy(ret_val, &val, 4);
3112 break;
3113 }
3114 }
3115 if (j >= NVRAM_TIMEOUT_COUNT)
3116 return -EBUSY;
3117
3118 return 0;
3119}
3120
3121
3122static int
3123bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3124{
3125 u32 cmd, val32;
3126 int j;
3127
3128 /* Build the command word. */
3129 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3130
3131 /* Calculate an offset of a buffered flash. */
3132 if (bp->flash_info->buffered) {
3133 offset = ((offset / bp->flash_info->page_size) <<
3134 bp->flash_info->page_bits) +
3135 (offset % bp->flash_info->page_size);
3136 }
3137
3138 /* Need to clear DONE bit separately. */
3139 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3140
3141 memcpy(&val32, val, 4);
3142 val32 = cpu_to_be32(val32);
3143
3144 /* Write the data. */
3145 REG_WR(bp, BNX2_NVM_WRITE, val32);
3146
3147 /* Address of the NVRAM to write to. */
3148 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3149
3150 /* Issue the write command. */
3151 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3152
3153 /* Wait for completion. */
3154 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3155 udelay(5);
3156
3157 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3158 break;
3159 }
3160 if (j >= NVRAM_TIMEOUT_COUNT)
3161 return -EBUSY;
3162
3163 return 0;
3164}
3165
3166static int
3167bnx2_init_nvram(struct bnx2 *bp)
3168{
3169 u32 val;
3170 int j, entry_count, rc;
3171 struct flash_spec *flash;
3172
3173 /* Determine the selected interface. */
3174 val = REG_RD(bp, BNX2_NVM_CFG1);
3175
3176 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3177
3178 rc = 0;
3179 if (val & 0x40000000) {
3180
3181 /* Flash interface has been reconfigured */
3182 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003183 j++, flash++) {
3184 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3185 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003186 bp->flash_info = flash;
3187 break;
3188 }
3189 }
3190 }
3191 else {
Michael Chan37137702005-11-04 08:49:17 -08003192 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003193 /* Not yet been reconfigured */
3194
Michael Chan37137702005-11-04 08:49:17 -08003195 if (val & (1 << 23))
3196 mask = FLASH_BACKUP_STRAP_MASK;
3197 else
3198 mask = FLASH_STRAP_MASK;
3199
Michael Chanb6016b72005-05-26 13:03:09 -07003200 for (j = 0, flash = &flash_table[0]; j < entry_count;
3201 j++, flash++) {
3202
Michael Chan37137702005-11-04 08:49:17 -08003203 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003204 bp->flash_info = flash;
3205
3206 /* Request access to the flash interface. */
3207 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3208 return rc;
3209
3210 /* Enable access to flash interface */
3211 bnx2_enable_nvram_access(bp);
3212
3213 /* Reconfigure the flash interface */
3214 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3215 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3216 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3217 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3218
3219 /* Disable access to flash interface */
3220 bnx2_disable_nvram_access(bp);
3221 bnx2_release_nvram_lock(bp);
3222
3223 break;
3224 }
3225 }
3226 } /* if (val & 0x40000000) */
3227
3228 if (j == entry_count) {
3229 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003230 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003231 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003232 }
3233
Michael Chan1122db72006-01-23 16:11:42 -08003234 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3235 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3236 if (val)
3237 bp->flash_size = val;
3238 else
3239 bp->flash_size = bp->flash_info->total_size;
3240
Michael Chanb6016b72005-05-26 13:03:09 -07003241 return rc;
3242}
3243
3244static int
3245bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3246 int buf_size)
3247{
3248 int rc = 0;
3249 u32 cmd_flags, offset32, len32, extra;
3250
3251 if (buf_size == 0)
3252 return 0;
3253
3254 /* Request access to the flash interface. */
3255 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3256 return rc;
3257
3258 /* Enable access to flash interface */
3259 bnx2_enable_nvram_access(bp);
3260
3261 len32 = buf_size;
3262 offset32 = offset;
3263 extra = 0;
3264
3265 cmd_flags = 0;
3266
3267 if (offset32 & 3) {
3268 u8 buf[4];
3269 u32 pre_len;
3270
3271 offset32 &= ~3;
3272 pre_len = 4 - (offset & 3);
3273
3274 if (pre_len >= len32) {
3275 pre_len = len32;
3276 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3277 BNX2_NVM_COMMAND_LAST;
3278 }
3279 else {
3280 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3281 }
3282
3283 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3284
3285 if (rc)
3286 return rc;
3287
3288 memcpy(ret_buf, buf + (offset & 3), pre_len);
3289
3290 offset32 += 4;
3291 ret_buf += pre_len;
3292 len32 -= pre_len;
3293 }
3294 if (len32 & 3) {
3295 extra = 4 - (len32 & 3);
3296 len32 = (len32 + 4) & ~3;
3297 }
3298
3299 if (len32 == 4) {
3300 u8 buf[4];
3301
3302 if (cmd_flags)
3303 cmd_flags = BNX2_NVM_COMMAND_LAST;
3304 else
3305 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3306 BNX2_NVM_COMMAND_LAST;
3307
3308 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3309
3310 memcpy(ret_buf, buf, 4 - extra);
3311 }
3312 else if (len32 > 0) {
3313 u8 buf[4];
3314
3315 /* Read the first word. */
3316 if (cmd_flags)
3317 cmd_flags = 0;
3318 else
3319 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3320
3321 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3322
3323 /* Advance to the next dword. */
3324 offset32 += 4;
3325 ret_buf += 4;
3326 len32 -= 4;
3327
3328 while (len32 > 4 && rc == 0) {
3329 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3330
3331 /* Advance to the next dword. */
3332 offset32 += 4;
3333 ret_buf += 4;
3334 len32 -= 4;
3335 }
3336
3337 if (rc)
3338 return rc;
3339
3340 cmd_flags = BNX2_NVM_COMMAND_LAST;
3341 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3342
3343 memcpy(ret_buf, buf, 4 - extra);
3344 }
3345
3346 /* Disable access to flash interface */
3347 bnx2_disable_nvram_access(bp);
3348
3349 bnx2_release_nvram_lock(bp);
3350
3351 return rc;
3352}
3353
3354static int
3355bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3356 int buf_size)
3357{
3358 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003359 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003360 int rc = 0;
3361 int align_start, align_end;
3362
3363 buf = data_buf;
3364 offset32 = offset;
3365 len32 = buf_size;
3366 align_start = align_end = 0;
3367
3368 if ((align_start = (offset32 & 3))) {
3369 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003370 len32 += align_start;
3371 if (len32 < 4)
3372 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003373 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3374 return rc;
3375 }
3376
3377 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003378 align_end = 4 - (len32 & 3);
3379 len32 += align_end;
3380 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3381 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003382 }
3383
3384 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003385 align_buf = kmalloc(len32, GFP_KERNEL);
3386 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003387 return -ENOMEM;
3388 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003389 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003390 }
3391 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003392 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003393 }
Michael Chane6be7632007-01-08 19:56:13 -08003394 memcpy(align_buf + align_start, data_buf, buf_size);
3395 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003396 }
3397
Michael Chanae181bc2006-05-22 16:39:20 -07003398 if (bp->flash_info->buffered == 0) {
3399 flash_buffer = kmalloc(264, GFP_KERNEL);
3400 if (flash_buffer == NULL) {
3401 rc = -ENOMEM;
3402 goto nvram_write_end;
3403 }
3404 }
3405
Michael Chanb6016b72005-05-26 13:03:09 -07003406 written = 0;
3407 while ((written < len32) && (rc == 0)) {
3408 u32 page_start, page_end, data_start, data_end;
3409 u32 addr, cmd_flags;
3410 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003411
3412 /* Find the page_start addr */
3413 page_start = offset32 + written;
3414 page_start -= (page_start % bp->flash_info->page_size);
3415 /* Find the page_end addr */
3416 page_end = page_start + bp->flash_info->page_size;
3417 /* Find the data_start addr */
3418 data_start = (written == 0) ? offset32 : page_start;
3419 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003420 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003421 (offset32 + len32) : page_end;
3422
3423 /* Request access to the flash interface. */
3424 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3425 goto nvram_write_end;
3426
3427 /* Enable access to flash interface */
3428 bnx2_enable_nvram_access(bp);
3429
3430 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3431 if (bp->flash_info->buffered == 0) {
3432 int j;
3433
3434 /* Read the whole page into the buffer
3435 * (non-buffer flash only) */
3436 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3437 if (j == (bp->flash_info->page_size - 4)) {
3438 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3439 }
3440 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003441 page_start + j,
3442 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003443 cmd_flags);
3444
3445 if (rc)
3446 goto nvram_write_end;
3447
3448 cmd_flags = 0;
3449 }
3450 }
3451
3452 /* Enable writes to flash interface (unlock write-protect) */
3453 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3454 goto nvram_write_end;
3455
Michael Chanb6016b72005-05-26 13:03:09 -07003456 /* Loop to write back the buffer data from page_start to
3457 * data_start */
3458 i = 0;
3459 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003460 /* Erase the page */
3461 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3462 goto nvram_write_end;
3463
3464 /* Re-enable the write again for the actual write */
3465 bnx2_enable_nvram_write(bp);
3466
Michael Chanb6016b72005-05-26 13:03:09 -07003467 for (addr = page_start; addr < data_start;
3468 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003469
Michael Chanb6016b72005-05-26 13:03:09 -07003470 rc = bnx2_nvram_write_dword(bp, addr,
3471 &flash_buffer[i], cmd_flags);
3472
3473 if (rc != 0)
3474 goto nvram_write_end;
3475
3476 cmd_flags = 0;
3477 }
3478 }
3479
3480 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003481 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003482 if ((addr == page_end - 4) ||
3483 ((bp->flash_info->buffered) &&
3484 (addr == data_end - 4))) {
3485
3486 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3487 }
3488 rc = bnx2_nvram_write_dword(bp, addr, buf,
3489 cmd_flags);
3490
3491 if (rc != 0)
3492 goto nvram_write_end;
3493
3494 cmd_flags = 0;
3495 buf += 4;
3496 }
3497
3498 /* Loop to write back the buffer data from data_end
3499 * to page_end */
3500 if (bp->flash_info->buffered == 0) {
3501 for (addr = data_end; addr < page_end;
3502 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003503
Michael Chanb6016b72005-05-26 13:03:09 -07003504 if (addr == page_end-4) {
3505 cmd_flags = BNX2_NVM_COMMAND_LAST;
3506 }
3507 rc = bnx2_nvram_write_dword(bp, addr,
3508 &flash_buffer[i], cmd_flags);
3509
3510 if (rc != 0)
3511 goto nvram_write_end;
3512
3513 cmd_flags = 0;
3514 }
3515 }
3516
3517 /* Disable writes to flash interface (lock write-protect) */
3518 bnx2_disable_nvram_write(bp);
3519
3520 /* Disable access to flash interface */
3521 bnx2_disable_nvram_access(bp);
3522 bnx2_release_nvram_lock(bp);
3523
3524 /* Increment written */
3525 written += data_end - data_start;
3526 }
3527
3528nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003529 kfree(flash_buffer);
3530 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003531 return rc;
3532}
3533
3534static int
3535bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3536{
3537 u32 val;
3538 int i, rc = 0;
3539
3540 /* Wait for the current PCI transaction to complete before
3541 * issuing a reset. */
3542 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3543 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3544 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3545 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3546 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3547 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3548 udelay(5);
3549
Michael Chanb090ae22006-01-23 16:07:10 -08003550 /* Wait for the firmware to tell us it is ok to issue a reset. */
3551 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3552
Michael Chanb6016b72005-05-26 13:03:09 -07003553 /* Deposit a driver reset signature so the firmware knows that
3554 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003555 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003556 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3557
Michael Chanb6016b72005-05-26 13:03:09 -07003558 /* Do a dummy read to force the chip to complete all current transaction
3559 * before we issue a reset. */
3560 val = REG_RD(bp, BNX2_MISC_ID);
3561
Michael Chan234754d2006-11-19 14:11:41 -08003562 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3563 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3564 REG_RD(bp, BNX2_MISC_COMMAND);
3565 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003566
Michael Chan234754d2006-11-19 14:11:41 -08003567 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3568 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003569
Michael Chan234754d2006-11-19 14:11:41 -08003570 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003571
Michael Chan234754d2006-11-19 14:11:41 -08003572 } else {
3573 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3574 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3575 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3576
3577 /* Chip reset. */
3578 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3579
3580 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3581 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3582 current->state = TASK_UNINTERRUPTIBLE;
3583 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003584 }
Michael Chanb6016b72005-05-26 13:03:09 -07003585
Michael Chan234754d2006-11-19 14:11:41 -08003586 /* Reset takes approximate 30 usec */
3587 for (i = 0; i < 10; i++) {
3588 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3589 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3590 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3591 break;
3592 udelay(10);
3593 }
3594
3595 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3596 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3597 printk(KERN_ERR PFX "Chip reset did not complete\n");
3598 return -EBUSY;
3599 }
Michael Chanb6016b72005-05-26 13:03:09 -07003600 }
3601
3602 /* Make sure byte swapping is properly configured. */
3603 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3604 if (val != 0x01020304) {
3605 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3606 return -ENODEV;
3607 }
3608
Michael Chanb6016b72005-05-26 13:03:09 -07003609 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003610 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3611 if (rc)
3612 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003613
3614 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3615 /* Adjust the voltage regular to two steps lower. The default
3616 * of this register is 0x0000000e. */
3617 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3618
3619 /* Remove bad rbuf memory from the free pool. */
3620 rc = bnx2_alloc_bad_rbuf(bp);
3621 }
3622
3623 return rc;
3624}
3625
3626static int
3627bnx2_init_chip(struct bnx2 *bp)
3628{
3629 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003630 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003631
3632 /* Make sure the interrupt is not active. */
3633 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3634
3635 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3636 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3637#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003638 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003639#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003640 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003641 DMA_READ_CHANS << 12 |
3642 DMA_WRITE_CHANS << 16;
3643
3644 val |= (0x2 << 20) | (1 << 11);
3645
Michael Chandda1e392006-01-23 16:08:14 -08003646 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003647 val |= (1 << 23);
3648
3649 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3650 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3651 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3652
3653 REG_WR(bp, BNX2_DMA_CONFIG, val);
3654
3655 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3656 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3657 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3658 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3659 }
3660
3661 if (bp->flags & PCIX_FLAG) {
3662 u16 val16;
3663
3664 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3665 &val16);
3666 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3667 val16 & ~PCI_X_CMD_ERO);
3668 }
3669
3670 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3671 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3672 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3673 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3674
3675 /* Initialize context mapping and zero out the quick contexts. The
3676 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003677 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3678 bnx2_init_5709_context(bp);
3679 else
3680 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003681
Michael Chanfba9fe92006-06-12 22:21:25 -07003682 if ((rc = bnx2_init_cpus(bp)) != 0)
3683 return rc;
3684
Michael Chanb6016b72005-05-26 13:03:09 -07003685 bnx2_init_nvram(bp);
3686
3687 bnx2_set_mac_addr(bp);
3688
3689 val = REG_RD(bp, BNX2_MQ_CONFIG);
3690 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3691 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003692 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3693 val |= BNX2_MQ_CONFIG_HALT_DIS;
3694
Michael Chanb6016b72005-05-26 13:03:09 -07003695 REG_WR(bp, BNX2_MQ_CONFIG, val);
3696
3697 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3698 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3699 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3700
3701 val = (BCM_PAGE_BITS - 8) << 24;
3702 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3703
3704 /* Configure page size. */
3705 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3706 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3707 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3708 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3709
3710 val = bp->mac_addr[0] +
3711 (bp->mac_addr[1] << 8) +
3712 (bp->mac_addr[2] << 16) +
3713 bp->mac_addr[3] +
3714 (bp->mac_addr[4] << 8) +
3715 (bp->mac_addr[5] << 16);
3716 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3717
3718 /* Program the MTU. Also include 4 bytes for CRC32. */
3719 val = bp->dev->mtu + ETH_HLEN + 4;
3720 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3721 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3722 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3723
3724 bp->last_status_idx = 0;
3725 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3726
3727 /* Set up how to generate a link change interrupt. */
3728 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3729
3730 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3731 (u64) bp->status_blk_mapping & 0xffffffff);
3732 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3733
3734 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3735 (u64) bp->stats_blk_mapping & 0xffffffff);
3736 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3737 (u64) bp->stats_blk_mapping >> 32);
3738
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003739 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003740 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3741
3742 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3743 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3744
3745 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3746 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3747
3748 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3749
3750 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3751
3752 REG_WR(bp, BNX2_HC_COM_TICKS,
3753 (bp->com_ticks_int << 16) | bp->com_ticks);
3754
3755 REG_WR(bp, BNX2_HC_CMD_TICKS,
3756 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3757
3758 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3759 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3760
3761 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3762 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3763 else {
3764 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3765 BNX2_HC_CONFIG_TX_TMR_MODE |
3766 BNX2_HC_CONFIG_COLLECT_STATS);
3767 }
3768
3769 /* Clear internal stats counters. */
3770 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3771
Michael Chanda3e4fb2007-05-03 13:24:23 -07003772 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07003773
Michael Chane29054f2006-01-23 16:06:06 -08003774 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3775 BNX2_PORT_FEATURE_ASF_ENABLED)
3776 bp->flags |= ASF_ENABLE_FLAG;
3777
Michael Chanb6016b72005-05-26 13:03:09 -07003778 /* Initialize the receive filter. */
3779 bnx2_set_rx_mode(bp->dev);
3780
Michael Chanb090ae22006-01-23 16:07:10 -08003781 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3782 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003783
3784 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3785 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3786
3787 udelay(20);
3788
Michael Chanbf5295b2006-03-23 01:11:56 -08003789 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3790
Michael Chanb090ae22006-01-23 16:07:10 -08003791 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003792}
3793
Michael Chan59b47d82006-11-19 14:10:45 -08003794static void
3795bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3796{
3797 u32 val, offset0, offset1, offset2, offset3;
3798
3799 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3800 offset0 = BNX2_L2CTX_TYPE_XI;
3801 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3802 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3803 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3804 } else {
3805 offset0 = BNX2_L2CTX_TYPE;
3806 offset1 = BNX2_L2CTX_CMD_TYPE;
3807 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3808 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3809 }
3810 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3811 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3812
3813 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3814 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3815
3816 val = (u64) bp->tx_desc_mapping >> 32;
3817 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3818
3819 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3820 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3821}
Michael Chanb6016b72005-05-26 13:03:09 -07003822
3823static void
3824bnx2_init_tx_ring(struct bnx2 *bp)
3825{
3826 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003827 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003828
Michael Chan2f8af122006-08-15 01:39:10 -07003829 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3830
Michael Chanb6016b72005-05-26 13:03:09 -07003831 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003832
Michael Chanb6016b72005-05-26 13:03:09 -07003833 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3834 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3835
3836 bp->tx_prod = 0;
3837 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003838 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003839 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003840
Michael Chan59b47d82006-11-19 14:10:45 -08003841 cid = TX_CID;
3842 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3843 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003844
Michael Chan59b47d82006-11-19 14:10:45 -08003845 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003846}
3847
3848static void
3849bnx2_init_rx_ring(struct bnx2 *bp)
3850{
3851 struct rx_bd *rxbd;
3852 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003853 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003854 u32 val;
3855
3856 /* 8 for CRC and VLAN */
3857 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003858 /* hw alignment */
3859 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003860
3861 ring_prod = prod = bp->rx_prod = 0;
3862 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003863 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003864 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003865
Michael Chan13daffa2006-03-20 17:49:20 -08003866 for (i = 0; i < bp->rx_max_ring; i++) {
3867 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003868
Michael Chan13daffa2006-03-20 17:49:20 -08003869 rxbd = &bp->rx_desc_ring[i][0];
3870 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3871 rxbd->rx_bd_len = bp->rx_buf_use_size;
3872 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3873 }
3874 if (i == (bp->rx_max_ring - 1))
3875 j = 0;
3876 else
3877 j = i + 1;
3878 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3879 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3880 0xffffffff;
3881 }
Michael Chanb6016b72005-05-26 13:03:09 -07003882
3883 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3884 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3885 val |= 0x02 << 8;
3886 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3887
Michael Chan13daffa2006-03-20 17:49:20 -08003888 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003889 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3890
Michael Chan13daffa2006-03-20 17:49:20 -08003891 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003892 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3893
Michael Chan236b6392006-03-20 17:49:02 -08003894 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003895 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3896 break;
3897 }
3898 prod = NEXT_RX_BD(prod);
3899 ring_prod = RX_RING_IDX(prod);
3900 }
3901 bp->rx_prod = prod;
3902
3903 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3904
3905 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3906}
3907
3908static void
Michael Chan13daffa2006-03-20 17:49:20 -08003909bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3910{
3911 u32 num_rings, max;
3912
3913 bp->rx_ring_size = size;
3914 num_rings = 1;
3915 while (size > MAX_RX_DESC_CNT) {
3916 size -= MAX_RX_DESC_CNT;
3917 num_rings++;
3918 }
3919 /* round to next power of 2 */
3920 max = MAX_RX_RINGS;
3921 while ((max & num_rings) == 0)
3922 max >>= 1;
3923
3924 if (num_rings != max)
3925 max <<= 1;
3926
3927 bp->rx_max_ring = max;
3928 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3929}
3930
3931static void
Michael Chanb6016b72005-05-26 13:03:09 -07003932bnx2_free_tx_skbs(struct bnx2 *bp)
3933{
3934 int i;
3935
3936 if (bp->tx_buf_ring == NULL)
3937 return;
3938
3939 for (i = 0; i < TX_DESC_CNT; ) {
3940 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3941 struct sk_buff *skb = tx_buf->skb;
3942 int j, last;
3943
3944 if (skb == NULL) {
3945 i++;
3946 continue;
3947 }
3948
3949 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3950 skb_headlen(skb), PCI_DMA_TODEVICE);
3951
3952 tx_buf->skb = NULL;
3953
3954 last = skb_shinfo(skb)->nr_frags;
3955 for (j = 0; j < last; j++) {
3956 tx_buf = &bp->tx_buf_ring[i + j + 1];
3957 pci_unmap_page(bp->pdev,
3958 pci_unmap_addr(tx_buf, mapping),
3959 skb_shinfo(skb)->frags[j].size,
3960 PCI_DMA_TODEVICE);
3961 }
Michael Chan745720e2006-06-29 12:37:41 -07003962 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003963 i += j + 1;
3964 }
3965
3966}
3967
3968static void
3969bnx2_free_rx_skbs(struct bnx2 *bp)
3970{
3971 int i;
3972
3973 if (bp->rx_buf_ring == NULL)
3974 return;
3975
Michael Chan13daffa2006-03-20 17:49:20 -08003976 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003977 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3978 struct sk_buff *skb = rx_buf->skb;
3979
Michael Chan05d0f1c2005-11-04 08:53:48 -08003980 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003981 continue;
3982
3983 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3984 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3985
3986 rx_buf->skb = NULL;
3987
Michael Chan745720e2006-06-29 12:37:41 -07003988 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003989 }
3990}
3991
3992static void
3993bnx2_free_skbs(struct bnx2 *bp)
3994{
3995 bnx2_free_tx_skbs(bp);
3996 bnx2_free_rx_skbs(bp);
3997}
3998
3999static int
4000bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4001{
4002 int rc;
4003
4004 rc = bnx2_reset_chip(bp, reset_code);
4005 bnx2_free_skbs(bp);
4006 if (rc)
4007 return rc;
4008
Michael Chanfba9fe92006-06-12 22:21:25 -07004009 if ((rc = bnx2_init_chip(bp)) != 0)
4010 return rc;
4011
Michael Chanb6016b72005-05-26 13:03:09 -07004012 bnx2_init_tx_ring(bp);
4013 bnx2_init_rx_ring(bp);
4014 return 0;
4015}
4016
4017static int
4018bnx2_init_nic(struct bnx2 *bp)
4019{
4020 int rc;
4021
4022 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4023 return rc;
4024
Michael Chan80be4432006-11-19 14:07:28 -08004025 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004026 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08004027 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004028 bnx2_set_link(bp);
4029 return 0;
4030}
4031
4032static int
4033bnx2_test_registers(struct bnx2 *bp)
4034{
4035 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004036 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004037 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004038 u16 offset;
4039 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004040#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004041 u32 rw_mask;
4042 u32 ro_mask;
4043 } reg_tbl[] = {
4044 { 0x006c, 0, 0x00000000, 0x0000003f },
4045 { 0x0090, 0, 0xffffffff, 0x00000000 },
4046 { 0x0094, 0, 0x00000000, 0x00000000 },
4047
Michael Chan5bae30c2007-05-03 13:18:46 -07004048 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4049 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4050 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4051 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4052 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4053 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4054 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4055 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4056 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004057
Michael Chan5bae30c2007-05-03 13:18:46 -07004058 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4059 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4060 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4061 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4062 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4063 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004064
Michael Chan5bae30c2007-05-03 13:18:46 -07004065 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4066 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4067 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004068
4069 { 0x1000, 0, 0x00000000, 0x00000001 },
4070 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004071
4072 { 0x1408, 0, 0x01c00800, 0x00000000 },
4073 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4074 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004075 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004076 { 0x14b0, 0, 0x00000002, 0x00000001 },
4077 { 0x14b8, 0, 0x00000000, 0x00000000 },
4078 { 0x14c0, 0, 0x00000000, 0x00000009 },
4079 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4080 { 0x14cc, 0, 0x00000000, 0x00000001 },
4081 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004082
4083 { 0x1800, 0, 0x00000000, 0x00000001 },
4084 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004085
4086 { 0x2800, 0, 0x00000000, 0x00000001 },
4087 { 0x2804, 0, 0x00000000, 0x00003f01 },
4088 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4089 { 0x2810, 0, 0xffff0000, 0x00000000 },
4090 { 0x2814, 0, 0xffff0000, 0x00000000 },
4091 { 0x2818, 0, 0xffff0000, 0x00000000 },
4092 { 0x281c, 0, 0xffff0000, 0x00000000 },
4093 { 0x2834, 0, 0xffffffff, 0x00000000 },
4094 { 0x2840, 0, 0x00000000, 0xffffffff },
4095 { 0x2844, 0, 0x00000000, 0xffffffff },
4096 { 0x2848, 0, 0xffffffff, 0x00000000 },
4097 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4098
4099 { 0x2c00, 0, 0x00000000, 0x00000011 },
4100 { 0x2c04, 0, 0x00000000, 0x00030007 },
4101
Michael Chanb6016b72005-05-26 13:03:09 -07004102 { 0x3c00, 0, 0x00000000, 0x00000001 },
4103 { 0x3c04, 0, 0x00000000, 0x00070000 },
4104 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4105 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4106 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4107 { 0x3c14, 0, 0x00000000, 0xffffffff },
4108 { 0x3c18, 0, 0x00000000, 0xffffffff },
4109 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4110 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004111
4112 { 0x5004, 0, 0x00000000, 0x0000007f },
4113 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004114
Michael Chanb6016b72005-05-26 13:03:09 -07004115 { 0x5c00, 0, 0x00000000, 0x00000001 },
4116 { 0x5c04, 0, 0x00000000, 0x0003000f },
4117 { 0x5c08, 0, 0x00000003, 0x00000000 },
4118 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4119 { 0x5c10, 0, 0x00000000, 0xffffffff },
4120 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4121 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4122 { 0x5c88, 0, 0x00000000, 0x00077373 },
4123 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4124
4125 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4126 { 0x680c, 0, 0xffffffff, 0x00000000 },
4127 { 0x6810, 0, 0xffffffff, 0x00000000 },
4128 { 0x6814, 0, 0xffffffff, 0x00000000 },
4129 { 0x6818, 0, 0xffffffff, 0x00000000 },
4130 { 0x681c, 0, 0xffffffff, 0x00000000 },
4131 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4132 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4133 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4134 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4135 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4136 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4137 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4138 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4139 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4140 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4141 { 0x684c, 0, 0xffffffff, 0x00000000 },
4142 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4143 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4144 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4145 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4146 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4147 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4148
4149 { 0xffff, 0, 0x00000000, 0x00000000 },
4150 };
4151
4152 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004153 is_5709 = 0;
4154 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4155 is_5709 = 1;
4156
Michael Chanb6016b72005-05-26 13:03:09 -07004157 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4158 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004159 u16 flags = reg_tbl[i].flags;
4160
4161 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4162 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004163
4164 offset = (u32) reg_tbl[i].offset;
4165 rw_mask = reg_tbl[i].rw_mask;
4166 ro_mask = reg_tbl[i].ro_mask;
4167
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004168 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004169
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004170 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004171
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004172 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004173 if ((val & rw_mask) != 0) {
4174 goto reg_test_err;
4175 }
4176
4177 if ((val & ro_mask) != (save_val & ro_mask)) {
4178 goto reg_test_err;
4179 }
4180
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004181 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004182
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004183 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004184 if ((val & rw_mask) != rw_mask) {
4185 goto reg_test_err;
4186 }
4187
4188 if ((val & ro_mask) != (save_val & ro_mask)) {
4189 goto reg_test_err;
4190 }
4191
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004192 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004193 continue;
4194
4195reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004196 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004197 ret = -ENODEV;
4198 break;
4199 }
4200 return ret;
4201}
4202
4203static int
4204bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4205{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004206 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004207 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4208 int i;
4209
4210 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4211 u32 offset;
4212
4213 for (offset = 0; offset < size; offset += 4) {
4214
4215 REG_WR_IND(bp, start + offset, test_pattern[i]);
4216
4217 if (REG_RD_IND(bp, start + offset) !=
4218 test_pattern[i]) {
4219 return -ENODEV;
4220 }
4221 }
4222 }
4223 return 0;
4224}
4225
4226static int
4227bnx2_test_memory(struct bnx2 *bp)
4228{
4229 int ret = 0;
4230 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004231 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004232 u32 offset;
4233 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004234 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004235 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004236 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004237 { 0xe0000, 0x4000 },
4238 { 0x120000, 0x4000 },
4239 { 0x1a0000, 0x4000 },
4240 { 0x160000, 0x4000 },
4241 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004242 },
4243 mem_tbl_5709[] = {
4244 { 0x60000, 0x4000 },
4245 { 0xa0000, 0x3000 },
4246 { 0xe0000, 0x4000 },
4247 { 0x120000, 0x4000 },
4248 { 0x1a0000, 0x4000 },
4249 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004250 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004251 struct mem_entry *mem_tbl;
4252
4253 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4254 mem_tbl = mem_tbl_5709;
4255 else
4256 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004257
4258 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4259 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4260 mem_tbl[i].len)) != 0) {
4261 return ret;
4262 }
4263 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004264
Michael Chanb6016b72005-05-26 13:03:09 -07004265 return ret;
4266}
4267
Michael Chanbc5a0692006-01-23 16:13:22 -08004268#define BNX2_MAC_LOOPBACK 0
4269#define BNX2_PHY_LOOPBACK 1
4270
Michael Chanb6016b72005-05-26 13:03:09 -07004271static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004272bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004273{
4274 unsigned int pkt_size, num_pkts, i;
4275 struct sk_buff *skb, *rx_skb;
4276 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004277 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004278 dma_addr_t map;
4279 struct tx_bd *txbd;
4280 struct sw_bd *rx_buf;
4281 struct l2_fhdr *rx_hdr;
4282 int ret = -ENODEV;
4283
Michael Chanbc5a0692006-01-23 16:13:22 -08004284 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4285 bp->loopback = MAC_LOOPBACK;
4286 bnx2_set_mac_loopback(bp);
4287 }
4288 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004289 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004290 bnx2_set_phy_loopback(bp);
4291 }
4292 else
4293 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004294
4295 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004296 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004297 if (!skb)
4298 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004299 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004300 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004301 memset(packet + 6, 0x0, 8);
4302 for (i = 14; i < pkt_size; i++)
4303 packet[i] = (unsigned char) (i & 0xff);
4304
4305 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4306 PCI_DMA_TODEVICE);
4307
Michael Chanbf5295b2006-03-23 01:11:56 -08004308 REG_WR(bp, BNX2_HC_COMMAND,
4309 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4310
Michael Chanb6016b72005-05-26 13:03:09 -07004311 REG_RD(bp, BNX2_HC_COMMAND);
4312
4313 udelay(5);
4314 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4315
Michael Chanb6016b72005-05-26 13:03:09 -07004316 num_pkts = 0;
4317
Michael Chanbc5a0692006-01-23 16:13:22 -08004318 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004319
4320 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4321 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4322 txbd->tx_bd_mss_nbytes = pkt_size;
4323 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4324
4325 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004326 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4327 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004328
Michael Chan234754d2006-11-19 14:11:41 -08004329 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4330 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004331
4332 udelay(100);
4333
Michael Chanbf5295b2006-03-23 01:11:56 -08004334 REG_WR(bp, BNX2_HC_COMMAND,
4335 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4336
Michael Chanb6016b72005-05-26 13:03:09 -07004337 REG_RD(bp, BNX2_HC_COMMAND);
4338
4339 udelay(5);
4340
4341 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004342 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004343
Michael Chanbc5a0692006-01-23 16:13:22 -08004344 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004345 goto loopback_test_done;
4346 }
4347
4348 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4349 if (rx_idx != rx_start_idx + num_pkts) {
4350 goto loopback_test_done;
4351 }
4352
4353 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4354 rx_skb = rx_buf->skb;
4355
4356 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4357 skb_reserve(rx_skb, bp->rx_offset);
4358
4359 pci_dma_sync_single_for_cpu(bp->pdev,
4360 pci_unmap_addr(rx_buf, mapping),
4361 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4362
Michael Chanade2bfe2006-01-23 16:09:51 -08004363 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004364 (L2_FHDR_ERRORS_BAD_CRC |
4365 L2_FHDR_ERRORS_PHY_DECODE |
4366 L2_FHDR_ERRORS_ALIGNMENT |
4367 L2_FHDR_ERRORS_TOO_SHORT |
4368 L2_FHDR_ERRORS_GIANT_FRAME)) {
4369
4370 goto loopback_test_done;
4371 }
4372
4373 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4374 goto loopback_test_done;
4375 }
4376
4377 for (i = 14; i < pkt_size; i++) {
4378 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4379 goto loopback_test_done;
4380 }
4381 }
4382
4383 ret = 0;
4384
4385loopback_test_done:
4386 bp->loopback = 0;
4387 return ret;
4388}
4389
Michael Chanbc5a0692006-01-23 16:13:22 -08004390#define BNX2_MAC_LOOPBACK_FAILED 1
4391#define BNX2_PHY_LOOPBACK_FAILED 2
4392#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4393 BNX2_PHY_LOOPBACK_FAILED)
4394
4395static int
4396bnx2_test_loopback(struct bnx2 *bp)
4397{
4398 int rc = 0;
4399
4400 if (!netif_running(bp->dev))
4401 return BNX2_LOOPBACK_FAILED;
4402
4403 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4404 spin_lock_bh(&bp->phy_lock);
4405 bnx2_init_phy(bp);
4406 spin_unlock_bh(&bp->phy_lock);
4407 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4408 rc |= BNX2_MAC_LOOPBACK_FAILED;
4409 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4410 rc |= BNX2_PHY_LOOPBACK_FAILED;
4411 return rc;
4412}
4413
Michael Chanb6016b72005-05-26 13:03:09 -07004414#define NVRAM_SIZE 0x200
4415#define CRC32_RESIDUAL 0xdebb20e3
4416
4417static int
4418bnx2_test_nvram(struct bnx2 *bp)
4419{
4420 u32 buf[NVRAM_SIZE / 4];
4421 u8 *data = (u8 *) buf;
4422 int rc = 0;
4423 u32 magic, csum;
4424
4425 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4426 goto test_nvram_done;
4427
4428 magic = be32_to_cpu(buf[0]);
4429 if (magic != 0x669955aa) {
4430 rc = -ENODEV;
4431 goto test_nvram_done;
4432 }
4433
4434 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4435 goto test_nvram_done;
4436
4437 csum = ether_crc_le(0x100, data);
4438 if (csum != CRC32_RESIDUAL) {
4439 rc = -ENODEV;
4440 goto test_nvram_done;
4441 }
4442
4443 csum = ether_crc_le(0x100, data + 0x100);
4444 if (csum != CRC32_RESIDUAL) {
4445 rc = -ENODEV;
4446 }
4447
4448test_nvram_done:
4449 return rc;
4450}
4451
4452static int
4453bnx2_test_link(struct bnx2 *bp)
4454{
4455 u32 bmsr;
4456
Michael Chanc770a652005-08-25 15:38:39 -07004457 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004458 bnx2_enable_bmsr1(bp);
4459 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4460 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4461 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004462 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004463
Michael Chanb6016b72005-05-26 13:03:09 -07004464 if (bmsr & BMSR_LSTATUS) {
4465 return 0;
4466 }
4467 return -ENODEV;
4468}
4469
4470static int
4471bnx2_test_intr(struct bnx2 *bp)
4472{
4473 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004474 u16 status_idx;
4475
4476 if (!netif_running(bp->dev))
4477 return -ENODEV;
4478
4479 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4480
4481 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004482 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004483 REG_RD(bp, BNX2_HC_COMMAND);
4484
4485 for (i = 0; i < 10; i++) {
4486 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4487 status_idx) {
4488
4489 break;
4490 }
4491
4492 msleep_interruptible(10);
4493 }
4494 if (i < 10)
4495 return 0;
4496
4497 return -ENODEV;
4498}
4499
4500static void
Michael Chan48b01e22006-11-19 14:08:00 -08004501bnx2_5706_serdes_timer(struct bnx2 *bp)
4502{
4503 spin_lock(&bp->phy_lock);
4504 if (bp->serdes_an_pending)
4505 bp->serdes_an_pending--;
4506 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4507 u32 bmcr;
4508
4509 bp->current_interval = bp->timer_interval;
4510
Michael Chanca58c3a2007-05-03 13:22:52 -07004511 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004512
4513 if (bmcr & BMCR_ANENABLE) {
4514 u32 phy1, phy2;
4515
4516 bnx2_write_phy(bp, 0x1c, 0x7c00);
4517 bnx2_read_phy(bp, 0x1c, &phy1);
4518
4519 bnx2_write_phy(bp, 0x17, 0x0f01);
4520 bnx2_read_phy(bp, 0x15, &phy2);
4521 bnx2_write_phy(bp, 0x17, 0x0f01);
4522 bnx2_read_phy(bp, 0x15, &phy2);
4523
4524 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4525 !(phy2 & 0x20)) { /* no CONFIG */
4526
4527 bmcr &= ~BMCR_ANENABLE;
4528 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004529 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004530 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4531 }
4532 }
4533 }
4534 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4535 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4536 u32 phy2;
4537
4538 bnx2_write_phy(bp, 0x17, 0x0f01);
4539 bnx2_read_phy(bp, 0x15, &phy2);
4540 if (phy2 & 0x20) {
4541 u32 bmcr;
4542
Michael Chanca58c3a2007-05-03 13:22:52 -07004543 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004544 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004545 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004546
4547 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4548 }
4549 } else
4550 bp->current_interval = bp->timer_interval;
4551
4552 spin_unlock(&bp->phy_lock);
4553}
4554
4555static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004556bnx2_5708_serdes_timer(struct bnx2 *bp)
4557{
4558 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4559 bp->serdes_an_pending = 0;
4560 return;
4561 }
4562
4563 spin_lock(&bp->phy_lock);
4564 if (bp->serdes_an_pending)
4565 bp->serdes_an_pending--;
4566 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4567 u32 bmcr;
4568
Michael Chanca58c3a2007-05-03 13:22:52 -07004569 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004570 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004571 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004572 bp->current_interval = SERDES_FORCED_TIMEOUT;
4573 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004574 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004575 bp->serdes_an_pending = 2;
4576 bp->current_interval = bp->timer_interval;
4577 }
4578
4579 } else
4580 bp->current_interval = bp->timer_interval;
4581
4582 spin_unlock(&bp->phy_lock);
4583}
4584
4585static void
Michael Chanb6016b72005-05-26 13:03:09 -07004586bnx2_timer(unsigned long data)
4587{
4588 struct bnx2 *bp = (struct bnx2 *) data;
4589 u32 msg;
4590
Michael Chancd339a02005-08-25 15:35:24 -07004591 if (!netif_running(bp->dev))
4592 return;
4593
Michael Chanb6016b72005-05-26 13:03:09 -07004594 if (atomic_read(&bp->intr_sem) != 0)
4595 goto bnx2_restart_timer;
4596
4597 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004598 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004599
Michael Chancea94db2006-06-12 22:16:13 -07004600 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4601
Michael Chanf8dd0642006-11-19 14:08:29 -08004602 if (bp->phy_flags & PHY_SERDES_FLAG) {
4603 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4604 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004605 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004606 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004607 }
4608
4609bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004610 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004611}
4612
4613/* Called with rtnl_lock */
4614static int
4615bnx2_open(struct net_device *dev)
4616{
Michael Chan972ec0d2006-01-23 16:12:43 -08004617 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004618 int rc;
4619
Michael Chan1b2f9222007-05-03 13:20:19 -07004620 netif_carrier_off(dev);
4621
Pavel Machek829ca9a2005-09-03 15:56:56 -07004622 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004623 bnx2_disable_int(bp);
4624
4625 rc = bnx2_alloc_mem(bp);
4626 if (rc)
4627 return rc;
4628
4629 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4630 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4631 !disable_msi) {
4632
4633 if (pci_enable_msi(bp->pdev) == 0) {
4634 bp->flags |= USING_MSI_FLAG;
4635 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4636 dev);
4637 }
4638 else {
4639 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004640 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004641 }
4642 }
4643 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004644 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004645 dev->name, dev);
4646 }
4647 if (rc) {
4648 bnx2_free_mem(bp);
4649 return rc;
4650 }
4651
4652 rc = bnx2_init_nic(bp);
4653
4654 if (rc) {
4655 free_irq(bp->pdev->irq, dev);
4656 if (bp->flags & USING_MSI_FLAG) {
4657 pci_disable_msi(bp->pdev);
4658 bp->flags &= ~USING_MSI_FLAG;
4659 }
4660 bnx2_free_skbs(bp);
4661 bnx2_free_mem(bp);
4662 return rc;
4663 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004664
Michael Chancd339a02005-08-25 15:35:24 -07004665 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004666
4667 atomic_set(&bp->intr_sem, 0);
4668
4669 bnx2_enable_int(bp);
4670
4671 if (bp->flags & USING_MSI_FLAG) {
4672 /* Test MSI to make sure it is working
4673 * If MSI test fails, go back to INTx mode
4674 */
4675 if (bnx2_test_intr(bp) != 0) {
4676 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4677 " using MSI, switching to INTx mode. Please"
4678 " report this failure to the PCI maintainer"
4679 " and include system chipset information.\n",
4680 bp->dev->name);
4681
4682 bnx2_disable_int(bp);
4683 free_irq(bp->pdev->irq, dev);
4684 pci_disable_msi(bp->pdev);
4685 bp->flags &= ~USING_MSI_FLAG;
4686
4687 rc = bnx2_init_nic(bp);
4688
4689 if (!rc) {
4690 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004691 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004692 }
4693 if (rc) {
4694 bnx2_free_skbs(bp);
4695 bnx2_free_mem(bp);
4696 del_timer_sync(&bp->timer);
4697 return rc;
4698 }
4699 bnx2_enable_int(bp);
4700 }
4701 }
4702 if (bp->flags & USING_MSI_FLAG) {
4703 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4704 }
4705
4706 netif_start_queue(dev);
4707
4708 return 0;
4709}
4710
4711static void
David Howellsc4028952006-11-22 14:57:56 +00004712bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004713{
David Howellsc4028952006-11-22 14:57:56 +00004714 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004715
Michael Chanafdc08b2005-08-25 15:34:29 -07004716 if (!netif_running(bp->dev))
4717 return;
4718
4719 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004720 bnx2_netif_stop(bp);
4721
4722 bnx2_init_nic(bp);
4723
4724 atomic_set(&bp->intr_sem, 1);
4725 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004726 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004727}
4728
4729static void
4730bnx2_tx_timeout(struct net_device *dev)
4731{
Michael Chan972ec0d2006-01-23 16:12:43 -08004732 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004733
4734 /* This allows the netif to be shutdown gracefully before resetting */
4735 schedule_work(&bp->reset_task);
4736}
4737
4738#ifdef BCM_VLAN
4739/* Called with rtnl_lock */
4740static void
4741bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4742{
Michael Chan972ec0d2006-01-23 16:12:43 -08004743 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004744
4745 bnx2_netif_stop(bp);
4746
4747 bp->vlgrp = vlgrp;
4748 bnx2_set_rx_mode(dev);
4749
4750 bnx2_netif_start(bp);
4751}
4752
4753/* Called with rtnl_lock */
4754static void
4755bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4756{
Michael Chan972ec0d2006-01-23 16:12:43 -08004757 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004758
4759 bnx2_netif_stop(bp);
Dan Aloni5c15bde2007-03-02 20:44:51 -08004760 vlan_group_set_device(bp->vlgrp, vid, NULL);
Michael Chanb6016b72005-05-26 13:03:09 -07004761 bnx2_set_rx_mode(dev);
4762
4763 bnx2_netif_start(bp);
4764}
4765#endif
4766
Herbert Xu932ff272006-06-09 12:20:56 -07004767/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004768 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4769 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004770 */
4771static int
4772bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4773{
Michael Chan972ec0d2006-01-23 16:12:43 -08004774 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004775 dma_addr_t mapping;
4776 struct tx_bd *txbd;
4777 struct sw_bd *tx_buf;
4778 u32 len, vlan_tag_flags, last_frag, mss;
4779 u16 prod, ring_prod;
4780 int i;
4781
Michael Chane89bbf12005-08-25 15:36:58 -07004782 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004783 netif_stop_queue(dev);
4784 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4785 dev->name);
4786
4787 return NETDEV_TX_BUSY;
4788 }
4789 len = skb_headlen(skb);
4790 prod = bp->tx_prod;
4791 ring_prod = TX_RING_IDX(prod);
4792
4793 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004794 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004795 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4796 }
4797
4798 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4799 vlan_tag_flags |=
4800 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4801 }
Herbert Xu79671682006-06-22 02:40:14 -07004802 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004803 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4804 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004805 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004806
Michael Chanb6016b72005-05-26 13:03:09 -07004807 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4808
Michael Chan4666f872007-05-03 13:22:28 -07004809 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004810
Michael Chan4666f872007-05-03 13:22:28 -07004811 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4812 u32 tcp_off = skb_transport_offset(skb) -
4813 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004814
Michael Chan4666f872007-05-03 13:22:28 -07004815 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4816 TX_BD_FLAGS_SW_FLAGS;
4817 if (likely(tcp_off == 0))
4818 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4819 else {
4820 tcp_off >>= 3;
4821 vlan_tag_flags |= ((tcp_off & 0x3) <<
4822 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4823 ((tcp_off & 0x10) <<
4824 TX_BD_FLAGS_TCP6_OFF4_SHL);
4825 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4826 }
4827 } else {
4828 if (skb_header_cloned(skb) &&
4829 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4830 dev_kfree_skb(skb);
4831 return NETDEV_TX_OK;
4832 }
4833
4834 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4835
4836 iph = ip_hdr(skb);
4837 iph->check = 0;
4838 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4839 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4840 iph->daddr, 0,
4841 IPPROTO_TCP,
4842 0);
4843 if (tcp_opt_len || (iph->ihl > 5)) {
4844 vlan_tag_flags |= ((iph->ihl - 5) +
4845 (tcp_opt_len >> 2)) << 8;
4846 }
Michael Chanb6016b72005-05-26 13:03:09 -07004847 }
Michael Chan4666f872007-05-03 13:22:28 -07004848 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004849 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004850
4851 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004852
Michael Chanb6016b72005-05-26 13:03:09 -07004853 tx_buf = &bp->tx_buf_ring[ring_prod];
4854 tx_buf->skb = skb;
4855 pci_unmap_addr_set(tx_buf, mapping, mapping);
4856
4857 txbd = &bp->tx_desc_ring[ring_prod];
4858
4859 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4860 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4861 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4862 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4863
4864 last_frag = skb_shinfo(skb)->nr_frags;
4865
4866 for (i = 0; i < last_frag; i++) {
4867 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4868
4869 prod = NEXT_TX_BD(prod);
4870 ring_prod = TX_RING_IDX(prod);
4871 txbd = &bp->tx_desc_ring[ring_prod];
4872
4873 len = frag->size;
4874 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4875 len, PCI_DMA_TODEVICE);
4876 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4877 mapping, mapping);
4878
4879 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4880 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4881 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4882 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4883
4884 }
4885 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4886
4887 prod = NEXT_TX_BD(prod);
4888 bp->tx_prod_bseq += skb->len;
4889
Michael Chan234754d2006-11-19 14:11:41 -08004890 REG_WR16(bp, bp->tx_bidx_addr, prod);
4891 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004892
4893 mmiowb();
4894
4895 bp->tx_prod = prod;
4896 dev->trans_start = jiffies;
4897
Michael Chane89bbf12005-08-25 15:36:58 -07004898 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004899 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004900 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004901 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004902 }
4903
4904 return NETDEV_TX_OK;
4905}
4906
4907/* Called with rtnl_lock */
4908static int
4909bnx2_close(struct net_device *dev)
4910{
Michael Chan972ec0d2006-01-23 16:12:43 -08004911 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004912 u32 reset_code;
4913
Michael Chanafdc08b2005-08-25 15:34:29 -07004914 /* Calling flush_scheduled_work() may deadlock because
4915 * linkwatch_event() may be on the workqueue and it will try to get
4916 * the rtnl_lock which we are holding.
4917 */
4918 while (bp->in_reset_task)
4919 msleep(1);
4920
Michael Chanb6016b72005-05-26 13:03:09 -07004921 bnx2_netif_stop(bp);
4922 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004923 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004924 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004925 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004926 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4927 else
4928 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4929 bnx2_reset_chip(bp, reset_code);
4930 free_irq(bp->pdev->irq, dev);
4931 if (bp->flags & USING_MSI_FLAG) {
4932 pci_disable_msi(bp->pdev);
4933 bp->flags &= ~USING_MSI_FLAG;
4934 }
4935 bnx2_free_skbs(bp);
4936 bnx2_free_mem(bp);
4937 bp->link_up = 0;
4938 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004939 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004940 return 0;
4941}
4942
4943#define GET_NET_STATS64(ctr) \
4944 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4945 (unsigned long) (ctr##_lo)
4946
4947#define GET_NET_STATS32(ctr) \
4948 (ctr##_lo)
4949
4950#if (BITS_PER_LONG == 64)
4951#define GET_NET_STATS GET_NET_STATS64
4952#else
4953#define GET_NET_STATS GET_NET_STATS32
4954#endif
4955
4956static struct net_device_stats *
4957bnx2_get_stats(struct net_device *dev)
4958{
Michael Chan972ec0d2006-01-23 16:12:43 -08004959 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004960 struct statistics_block *stats_blk = bp->stats_blk;
4961 struct net_device_stats *net_stats = &bp->net_stats;
4962
4963 if (bp->stats_blk == NULL) {
4964 return net_stats;
4965 }
4966 net_stats->rx_packets =
4967 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4968 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4969 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4970
4971 net_stats->tx_packets =
4972 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4973 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4974 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4975
4976 net_stats->rx_bytes =
4977 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4978
4979 net_stats->tx_bytes =
4980 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4981
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004982 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004983 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4984
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004985 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004986 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4987
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004988 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004989 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4990 stats_blk->stat_EtherStatsOverrsizePkts);
4991
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004992 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004993 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4994
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004995 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004996 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4997
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004998 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004999 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5000
5001 net_stats->rx_errors = net_stats->rx_length_errors +
5002 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5003 net_stats->rx_crc_errors;
5004
5005 net_stats->tx_aborted_errors =
5006 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5007 stats_blk->stat_Dot3StatsLateCollisions);
5008
Michael Chan5b0c76a2005-11-04 08:45:49 -08005009 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5010 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005011 net_stats->tx_carrier_errors = 0;
5012 else {
5013 net_stats->tx_carrier_errors =
5014 (unsigned long)
5015 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5016 }
5017
5018 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005019 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005020 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5021 +
5022 net_stats->tx_aborted_errors +
5023 net_stats->tx_carrier_errors;
5024
Michael Chancea94db2006-06-12 22:16:13 -07005025 net_stats->rx_missed_errors =
5026 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5027 stats_blk->stat_FwRxDrop);
5028
Michael Chanb6016b72005-05-26 13:03:09 -07005029 return net_stats;
5030}
5031
5032/* All ethtool functions called with rtnl_lock */
5033
5034static int
5035bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5036{
Michael Chan972ec0d2006-01-23 16:12:43 -08005037 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005038
5039 cmd->supported = SUPPORTED_Autoneg;
5040 if (bp->phy_flags & PHY_SERDES_FLAG) {
5041 cmd->supported |= SUPPORTED_1000baseT_Full |
5042 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5044 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005045
5046 cmd->port = PORT_FIBRE;
5047 }
5048 else {
5049 cmd->supported |= SUPPORTED_10baseT_Half |
5050 SUPPORTED_10baseT_Full |
5051 SUPPORTED_100baseT_Half |
5052 SUPPORTED_100baseT_Full |
5053 SUPPORTED_1000baseT_Full |
5054 SUPPORTED_TP;
5055
5056 cmd->port = PORT_TP;
5057 }
5058
5059 cmd->advertising = bp->advertising;
5060
5061 if (bp->autoneg & AUTONEG_SPEED) {
5062 cmd->autoneg = AUTONEG_ENABLE;
5063 }
5064 else {
5065 cmd->autoneg = AUTONEG_DISABLE;
5066 }
5067
5068 if (netif_carrier_ok(dev)) {
5069 cmd->speed = bp->line_speed;
5070 cmd->duplex = bp->duplex;
5071 }
5072 else {
5073 cmd->speed = -1;
5074 cmd->duplex = -1;
5075 }
5076
5077 cmd->transceiver = XCVR_INTERNAL;
5078 cmd->phy_address = bp->phy_addr;
5079
5080 return 0;
5081}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005082
Michael Chanb6016b72005-05-26 13:03:09 -07005083static int
5084bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5085{
Michael Chan972ec0d2006-01-23 16:12:43 -08005086 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005087 u8 autoneg = bp->autoneg;
5088 u8 req_duplex = bp->req_duplex;
5089 u16 req_line_speed = bp->req_line_speed;
5090 u32 advertising = bp->advertising;
5091
5092 if (cmd->autoneg == AUTONEG_ENABLE) {
5093 autoneg |= AUTONEG_SPEED;
5094
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005095 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005096
5097 /* allow advertising 1 speed */
5098 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5099 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5100 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5101 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5102
5103 if (bp->phy_flags & PHY_SERDES_FLAG)
5104 return -EINVAL;
5105
5106 advertising = cmd->advertising;
5107
Michael Chan27a005b2007-05-03 13:23:41 -07005108 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5109 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5110 return -EINVAL;
5111 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005112 advertising = cmd->advertising;
5113 }
5114 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5115 return -EINVAL;
5116 }
5117 else {
5118 if (bp->phy_flags & PHY_SERDES_FLAG) {
5119 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5120 }
5121 else {
5122 advertising = ETHTOOL_ALL_COPPER_SPEED;
5123 }
5124 }
5125 advertising |= ADVERTISED_Autoneg;
5126 }
5127 else {
5128 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005129 if ((cmd->speed != SPEED_1000 &&
5130 cmd->speed != SPEED_2500) ||
5131 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005132 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005133
5134 if (cmd->speed == SPEED_2500 &&
5135 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5136 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005137 }
5138 else if (cmd->speed == SPEED_1000) {
5139 return -EINVAL;
5140 }
5141 autoneg &= ~AUTONEG_SPEED;
5142 req_line_speed = cmd->speed;
5143 req_duplex = cmd->duplex;
5144 advertising = 0;
5145 }
5146
5147 bp->autoneg = autoneg;
5148 bp->advertising = advertising;
5149 bp->req_line_speed = req_line_speed;
5150 bp->req_duplex = req_duplex;
5151
Michael Chanc770a652005-08-25 15:38:39 -07005152 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005153
5154 bnx2_setup_phy(bp);
5155
Michael Chanc770a652005-08-25 15:38:39 -07005156 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005157
5158 return 0;
5159}
5160
5161static void
5162bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5163{
Michael Chan972ec0d2006-01-23 16:12:43 -08005164 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005165
5166 strcpy(info->driver, DRV_MODULE_NAME);
5167 strcpy(info->version, DRV_MODULE_VERSION);
5168 strcpy(info->bus_info, pci_name(bp->pdev));
5169 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5170 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5171 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005172 info->fw_version[1] = info->fw_version[3] = '.';
5173 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005174}
5175
Michael Chan244ac4f2006-03-20 17:48:46 -08005176#define BNX2_REGDUMP_LEN (32 * 1024)
5177
5178static int
5179bnx2_get_regs_len(struct net_device *dev)
5180{
5181 return BNX2_REGDUMP_LEN;
5182}
5183
5184static void
5185bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5186{
5187 u32 *p = _p, i, offset;
5188 u8 *orig_p = _p;
5189 struct bnx2 *bp = netdev_priv(dev);
5190 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5191 0x0800, 0x0880, 0x0c00, 0x0c10,
5192 0x0c30, 0x0d08, 0x1000, 0x101c,
5193 0x1040, 0x1048, 0x1080, 0x10a4,
5194 0x1400, 0x1490, 0x1498, 0x14f0,
5195 0x1500, 0x155c, 0x1580, 0x15dc,
5196 0x1600, 0x1658, 0x1680, 0x16d8,
5197 0x1800, 0x1820, 0x1840, 0x1854,
5198 0x1880, 0x1894, 0x1900, 0x1984,
5199 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5200 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5201 0x2000, 0x2030, 0x23c0, 0x2400,
5202 0x2800, 0x2820, 0x2830, 0x2850,
5203 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5204 0x3c00, 0x3c94, 0x4000, 0x4010,
5205 0x4080, 0x4090, 0x43c0, 0x4458,
5206 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5207 0x4fc0, 0x5010, 0x53c0, 0x5444,
5208 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5209 0x5fc0, 0x6000, 0x6400, 0x6428,
5210 0x6800, 0x6848, 0x684c, 0x6860,
5211 0x6888, 0x6910, 0x8000 };
5212
5213 regs->version = 0;
5214
5215 memset(p, 0, BNX2_REGDUMP_LEN);
5216
5217 if (!netif_running(bp->dev))
5218 return;
5219
5220 i = 0;
5221 offset = reg_boundaries[0];
5222 p += offset;
5223 while (offset < BNX2_REGDUMP_LEN) {
5224 *p++ = REG_RD(bp, offset);
5225 offset += 4;
5226 if (offset == reg_boundaries[i + 1]) {
5227 offset = reg_boundaries[i + 2];
5228 p = (u32 *) (orig_p + offset);
5229 i += 2;
5230 }
5231 }
5232}
5233
Michael Chanb6016b72005-05-26 13:03:09 -07005234static void
5235bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5236{
Michael Chan972ec0d2006-01-23 16:12:43 -08005237 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005238
5239 if (bp->flags & NO_WOL_FLAG) {
5240 wol->supported = 0;
5241 wol->wolopts = 0;
5242 }
5243 else {
5244 wol->supported = WAKE_MAGIC;
5245 if (bp->wol)
5246 wol->wolopts = WAKE_MAGIC;
5247 else
5248 wol->wolopts = 0;
5249 }
5250 memset(&wol->sopass, 0, sizeof(wol->sopass));
5251}
5252
5253static int
5254bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5255{
Michael Chan972ec0d2006-01-23 16:12:43 -08005256 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005257
5258 if (wol->wolopts & ~WAKE_MAGIC)
5259 return -EINVAL;
5260
5261 if (wol->wolopts & WAKE_MAGIC) {
5262 if (bp->flags & NO_WOL_FLAG)
5263 return -EINVAL;
5264
5265 bp->wol = 1;
5266 }
5267 else {
5268 bp->wol = 0;
5269 }
5270 return 0;
5271}
5272
5273static int
5274bnx2_nway_reset(struct net_device *dev)
5275{
Michael Chan972ec0d2006-01-23 16:12:43 -08005276 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005277 u32 bmcr;
5278
5279 if (!(bp->autoneg & AUTONEG_SPEED)) {
5280 return -EINVAL;
5281 }
5282
Michael Chanc770a652005-08-25 15:38:39 -07005283 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005284
5285 /* Force a link down visible on the other side */
5286 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005287 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005288 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005289
5290 msleep(20);
5291
Michael Chanc770a652005-08-25 15:38:39 -07005292 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005293
5294 bp->current_interval = SERDES_AN_TIMEOUT;
5295 bp->serdes_an_pending = 1;
5296 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005297 }
5298
Michael Chanca58c3a2007-05-03 13:22:52 -07005299 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005300 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005301 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005302
Michael Chanc770a652005-08-25 15:38:39 -07005303 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005304
5305 return 0;
5306}
5307
5308static int
5309bnx2_get_eeprom_len(struct net_device *dev)
5310{
Michael Chan972ec0d2006-01-23 16:12:43 -08005311 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005312
Michael Chan1122db72006-01-23 16:11:42 -08005313 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005314 return 0;
5315
Michael Chan1122db72006-01-23 16:11:42 -08005316 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005317}
5318
5319static int
5320bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5321 u8 *eebuf)
5322{
Michael Chan972ec0d2006-01-23 16:12:43 -08005323 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005324 int rc;
5325
John W. Linville1064e942005-11-10 12:58:24 -08005326 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005327
5328 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5329
5330 return rc;
5331}
5332
5333static int
5334bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5335 u8 *eebuf)
5336{
Michael Chan972ec0d2006-01-23 16:12:43 -08005337 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005338 int rc;
5339
John W. Linville1064e942005-11-10 12:58:24 -08005340 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005341
5342 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5343
5344 return rc;
5345}
5346
5347static int
5348bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5349{
Michael Chan972ec0d2006-01-23 16:12:43 -08005350 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005351
5352 memset(coal, 0, sizeof(struct ethtool_coalesce));
5353
5354 coal->rx_coalesce_usecs = bp->rx_ticks;
5355 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5356 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5357 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5358
5359 coal->tx_coalesce_usecs = bp->tx_ticks;
5360 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5361 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5362 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5363
5364 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5365
5366 return 0;
5367}
5368
5369static int
5370bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5371{
Michael Chan972ec0d2006-01-23 16:12:43 -08005372 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005373
5374 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5375 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5376
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005377 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005378 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5379
5380 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5381 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5382
5383 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5384 if (bp->rx_quick_cons_trip_int > 0xff)
5385 bp->rx_quick_cons_trip_int = 0xff;
5386
5387 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5388 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5389
5390 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5391 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5392
5393 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5394 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5395
5396 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5397 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5398 0xff;
5399
5400 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5401 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5402 bp->stats_ticks &= 0xffff00;
5403
5404 if (netif_running(bp->dev)) {
5405 bnx2_netif_stop(bp);
5406 bnx2_init_nic(bp);
5407 bnx2_netif_start(bp);
5408 }
5409
5410 return 0;
5411}
5412
5413static void
5414bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5415{
Michael Chan972ec0d2006-01-23 16:12:43 -08005416 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005417
Michael Chan13daffa2006-03-20 17:49:20 -08005418 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005419 ering->rx_mini_max_pending = 0;
5420 ering->rx_jumbo_max_pending = 0;
5421
5422 ering->rx_pending = bp->rx_ring_size;
5423 ering->rx_mini_pending = 0;
5424 ering->rx_jumbo_pending = 0;
5425
5426 ering->tx_max_pending = MAX_TX_DESC_CNT;
5427 ering->tx_pending = bp->tx_ring_size;
5428}
5429
5430static int
5431bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5432{
Michael Chan972ec0d2006-01-23 16:12:43 -08005433 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005434
Michael Chan13daffa2006-03-20 17:49:20 -08005435 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005436 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5437 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5438
5439 return -EINVAL;
5440 }
Michael Chan13daffa2006-03-20 17:49:20 -08005441 if (netif_running(bp->dev)) {
5442 bnx2_netif_stop(bp);
5443 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5444 bnx2_free_skbs(bp);
5445 bnx2_free_mem(bp);
5446 }
5447
5448 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005449 bp->tx_ring_size = ering->tx_pending;
5450
5451 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005452 int rc;
5453
5454 rc = bnx2_alloc_mem(bp);
5455 if (rc)
5456 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005457 bnx2_init_nic(bp);
5458 bnx2_netif_start(bp);
5459 }
5460
5461 return 0;
5462}
5463
5464static void
5465bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5466{
Michael Chan972ec0d2006-01-23 16:12:43 -08005467 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005468
5469 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5470 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5471 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5472}
5473
5474static int
5475bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5476{
Michael Chan972ec0d2006-01-23 16:12:43 -08005477 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005478
5479 bp->req_flow_ctrl = 0;
5480 if (epause->rx_pause)
5481 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5482 if (epause->tx_pause)
5483 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5484
5485 if (epause->autoneg) {
5486 bp->autoneg |= AUTONEG_FLOW_CTRL;
5487 }
5488 else {
5489 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5490 }
5491
Michael Chanc770a652005-08-25 15:38:39 -07005492 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005493
5494 bnx2_setup_phy(bp);
5495
Michael Chanc770a652005-08-25 15:38:39 -07005496 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005497
5498 return 0;
5499}
5500
5501static u32
5502bnx2_get_rx_csum(struct net_device *dev)
5503{
Michael Chan972ec0d2006-01-23 16:12:43 -08005504 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005505
5506 return bp->rx_csum;
5507}
5508
5509static int
5510bnx2_set_rx_csum(struct net_device *dev, u32 data)
5511{
Michael Chan972ec0d2006-01-23 16:12:43 -08005512 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005513
5514 bp->rx_csum = data;
5515 return 0;
5516}
5517
Michael Chanb11d6212006-06-29 12:31:21 -07005518static int
5519bnx2_set_tso(struct net_device *dev, u32 data)
5520{
Michael Chan4666f872007-05-03 13:22:28 -07005521 struct bnx2 *bp = netdev_priv(dev);
5522
5523 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005524 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005525 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5526 dev->features |= NETIF_F_TSO6;
5527 } else
5528 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5529 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005530 return 0;
5531}
5532
Michael Chancea94db2006-06-12 22:16:13 -07005533#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005534
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005535static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005536 char string[ETH_GSTRING_LEN];
5537} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5538 { "rx_bytes" },
5539 { "rx_error_bytes" },
5540 { "tx_bytes" },
5541 { "tx_error_bytes" },
5542 { "rx_ucast_packets" },
5543 { "rx_mcast_packets" },
5544 { "rx_bcast_packets" },
5545 { "tx_ucast_packets" },
5546 { "tx_mcast_packets" },
5547 { "tx_bcast_packets" },
5548 { "tx_mac_errors" },
5549 { "tx_carrier_errors" },
5550 { "rx_crc_errors" },
5551 { "rx_align_errors" },
5552 { "tx_single_collisions" },
5553 { "tx_multi_collisions" },
5554 { "tx_deferred" },
5555 { "tx_excess_collisions" },
5556 { "tx_late_collisions" },
5557 { "tx_total_collisions" },
5558 { "rx_fragments" },
5559 { "rx_jabbers" },
5560 { "rx_undersize_packets" },
5561 { "rx_oversize_packets" },
5562 { "rx_64_byte_packets" },
5563 { "rx_65_to_127_byte_packets" },
5564 { "rx_128_to_255_byte_packets" },
5565 { "rx_256_to_511_byte_packets" },
5566 { "rx_512_to_1023_byte_packets" },
5567 { "rx_1024_to_1522_byte_packets" },
5568 { "rx_1523_to_9022_byte_packets" },
5569 { "tx_64_byte_packets" },
5570 { "tx_65_to_127_byte_packets" },
5571 { "tx_128_to_255_byte_packets" },
5572 { "tx_256_to_511_byte_packets" },
5573 { "tx_512_to_1023_byte_packets" },
5574 { "tx_1024_to_1522_byte_packets" },
5575 { "tx_1523_to_9022_byte_packets" },
5576 { "rx_xon_frames" },
5577 { "rx_xoff_frames" },
5578 { "tx_xon_frames" },
5579 { "tx_xoff_frames" },
5580 { "rx_mac_ctrl_frames" },
5581 { "rx_filtered_packets" },
5582 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005583 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005584};
5585
5586#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5587
Arjan van de Venf71e1302006-03-03 21:33:57 -05005588static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005589 STATS_OFFSET32(stat_IfHCInOctets_hi),
5590 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5591 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5592 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5593 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5594 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5595 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5596 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5597 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5598 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5599 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005600 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5601 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5602 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5603 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5604 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5605 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5606 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5607 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5608 STATS_OFFSET32(stat_EtherStatsCollisions),
5609 STATS_OFFSET32(stat_EtherStatsFragments),
5610 STATS_OFFSET32(stat_EtherStatsJabbers),
5611 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5612 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5613 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5614 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5615 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5616 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5617 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5618 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5619 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5620 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5621 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5622 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5623 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5624 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5625 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5626 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5627 STATS_OFFSET32(stat_XonPauseFramesReceived),
5628 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5629 STATS_OFFSET32(stat_OutXonSent),
5630 STATS_OFFSET32(stat_OutXoffSent),
5631 STATS_OFFSET32(stat_MacControlFramesReceived),
5632 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5633 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005634 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005635};
5636
5637/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5638 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005639 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005640static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005641 8,0,8,8,8,8,8,8,8,8,
5642 4,0,4,4,4,4,4,4,4,4,
5643 4,4,4,4,4,4,4,4,4,4,
5644 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005645 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005646};
5647
Michael Chan5b0c76a2005-11-04 08:45:49 -08005648static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5649 8,0,8,8,8,8,8,8,8,8,
5650 4,4,4,4,4,4,4,4,4,4,
5651 4,4,4,4,4,4,4,4,4,4,
5652 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005653 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005654};
5655
Michael Chanb6016b72005-05-26 13:03:09 -07005656#define BNX2_NUM_TESTS 6
5657
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005658static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005659 char string[ETH_GSTRING_LEN];
5660} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5661 { "register_test (offline)" },
5662 { "memory_test (offline)" },
5663 { "loopback_test (offline)" },
5664 { "nvram_test (online)" },
5665 { "interrupt_test (online)" },
5666 { "link_test (online)" },
5667};
5668
5669static int
5670bnx2_self_test_count(struct net_device *dev)
5671{
5672 return BNX2_NUM_TESTS;
5673}
5674
5675static void
5676bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5677{
Michael Chan972ec0d2006-01-23 16:12:43 -08005678 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005679
5680 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5681 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005682 int i;
5683
Michael Chanb6016b72005-05-26 13:03:09 -07005684 bnx2_netif_stop(bp);
5685 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5686 bnx2_free_skbs(bp);
5687
5688 if (bnx2_test_registers(bp) != 0) {
5689 buf[0] = 1;
5690 etest->flags |= ETH_TEST_FL_FAILED;
5691 }
5692 if (bnx2_test_memory(bp) != 0) {
5693 buf[1] = 1;
5694 etest->flags |= ETH_TEST_FL_FAILED;
5695 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005696 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005697 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005698
5699 if (!netif_running(bp->dev)) {
5700 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5701 }
5702 else {
5703 bnx2_init_nic(bp);
5704 bnx2_netif_start(bp);
5705 }
5706
5707 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005708 for (i = 0; i < 7; i++) {
5709 if (bp->link_up)
5710 break;
5711 msleep_interruptible(1000);
5712 }
Michael Chanb6016b72005-05-26 13:03:09 -07005713 }
5714
5715 if (bnx2_test_nvram(bp) != 0) {
5716 buf[3] = 1;
5717 etest->flags |= ETH_TEST_FL_FAILED;
5718 }
5719 if (bnx2_test_intr(bp) != 0) {
5720 buf[4] = 1;
5721 etest->flags |= ETH_TEST_FL_FAILED;
5722 }
5723
5724 if (bnx2_test_link(bp) != 0) {
5725 buf[5] = 1;
5726 etest->flags |= ETH_TEST_FL_FAILED;
5727
5728 }
5729}
5730
5731static void
5732bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5733{
5734 switch (stringset) {
5735 case ETH_SS_STATS:
5736 memcpy(buf, bnx2_stats_str_arr,
5737 sizeof(bnx2_stats_str_arr));
5738 break;
5739 case ETH_SS_TEST:
5740 memcpy(buf, bnx2_tests_str_arr,
5741 sizeof(bnx2_tests_str_arr));
5742 break;
5743 }
5744}
5745
5746static int
5747bnx2_get_stats_count(struct net_device *dev)
5748{
5749 return BNX2_NUM_STATS;
5750}
5751
5752static void
5753bnx2_get_ethtool_stats(struct net_device *dev,
5754 struct ethtool_stats *stats, u64 *buf)
5755{
Michael Chan972ec0d2006-01-23 16:12:43 -08005756 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005757 int i;
5758 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005759 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005760
5761 if (hw_stats == NULL) {
5762 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5763 return;
5764 }
5765
Michael Chan5b0c76a2005-11-04 08:45:49 -08005766 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5767 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5768 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5769 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005770 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005771 else
5772 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005773
5774 for (i = 0; i < BNX2_NUM_STATS; i++) {
5775 if (stats_len_arr[i] == 0) {
5776 /* skip this counter */
5777 buf[i] = 0;
5778 continue;
5779 }
5780 if (stats_len_arr[i] == 4) {
5781 /* 4-byte counter */
5782 buf[i] = (u64)
5783 *(hw_stats + bnx2_stats_offset_arr[i]);
5784 continue;
5785 }
5786 /* 8-byte counter */
5787 buf[i] = (((u64) *(hw_stats +
5788 bnx2_stats_offset_arr[i])) << 32) +
5789 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5790 }
5791}
5792
5793static int
5794bnx2_phys_id(struct net_device *dev, u32 data)
5795{
Michael Chan972ec0d2006-01-23 16:12:43 -08005796 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005797 int i;
5798 u32 save;
5799
5800 if (data == 0)
5801 data = 2;
5802
5803 save = REG_RD(bp, BNX2_MISC_CFG);
5804 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5805
5806 for (i = 0; i < (data * 2); i++) {
5807 if ((i % 2) == 0) {
5808 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5809 }
5810 else {
5811 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5812 BNX2_EMAC_LED_1000MB_OVERRIDE |
5813 BNX2_EMAC_LED_100MB_OVERRIDE |
5814 BNX2_EMAC_LED_10MB_OVERRIDE |
5815 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5816 BNX2_EMAC_LED_TRAFFIC);
5817 }
5818 msleep_interruptible(500);
5819 if (signal_pending(current))
5820 break;
5821 }
5822 REG_WR(bp, BNX2_EMAC_LED, 0);
5823 REG_WR(bp, BNX2_MISC_CFG, save);
5824 return 0;
5825}
5826
Michael Chan4666f872007-05-03 13:22:28 -07005827static int
5828bnx2_set_tx_csum(struct net_device *dev, u32 data)
5829{
5830 struct bnx2 *bp = netdev_priv(dev);
5831
5832 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5833 return (ethtool_op_set_tx_hw_csum(dev, data));
5834 else
5835 return (ethtool_op_set_tx_csum(dev, data));
5836}
5837
Jeff Garzik7282d492006-09-13 14:30:00 -04005838static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005839 .get_settings = bnx2_get_settings,
5840 .set_settings = bnx2_set_settings,
5841 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005842 .get_regs_len = bnx2_get_regs_len,
5843 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005844 .get_wol = bnx2_get_wol,
5845 .set_wol = bnx2_set_wol,
5846 .nway_reset = bnx2_nway_reset,
5847 .get_link = ethtool_op_get_link,
5848 .get_eeprom_len = bnx2_get_eeprom_len,
5849 .get_eeprom = bnx2_get_eeprom,
5850 .set_eeprom = bnx2_set_eeprom,
5851 .get_coalesce = bnx2_get_coalesce,
5852 .set_coalesce = bnx2_set_coalesce,
5853 .get_ringparam = bnx2_get_ringparam,
5854 .set_ringparam = bnx2_set_ringparam,
5855 .get_pauseparam = bnx2_get_pauseparam,
5856 .set_pauseparam = bnx2_set_pauseparam,
5857 .get_rx_csum = bnx2_get_rx_csum,
5858 .set_rx_csum = bnx2_set_rx_csum,
5859 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005860 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005861 .get_sg = ethtool_op_get_sg,
5862 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005863 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005864 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005865 .self_test_count = bnx2_self_test_count,
5866 .self_test = bnx2_self_test,
5867 .get_strings = bnx2_get_strings,
5868 .phys_id = bnx2_phys_id,
5869 .get_stats_count = bnx2_get_stats_count,
5870 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005871 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005872};
5873
5874/* Called with rtnl_lock */
5875static int
5876bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5877{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005878 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005879 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005880 int err;
5881
5882 switch(cmd) {
5883 case SIOCGMIIPHY:
5884 data->phy_id = bp->phy_addr;
5885
5886 /* fallthru */
5887 case SIOCGMIIREG: {
5888 u32 mii_regval;
5889
Michael Chandad3e452007-05-03 13:18:03 -07005890 if (!netif_running(dev))
5891 return -EAGAIN;
5892
Michael Chanc770a652005-08-25 15:38:39 -07005893 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005894 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005895 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005896
5897 data->val_out = mii_regval;
5898
5899 return err;
5900 }
5901
5902 case SIOCSMIIREG:
5903 if (!capable(CAP_NET_ADMIN))
5904 return -EPERM;
5905
Michael Chandad3e452007-05-03 13:18:03 -07005906 if (!netif_running(dev))
5907 return -EAGAIN;
5908
Michael Chanc770a652005-08-25 15:38:39 -07005909 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005910 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005911 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005912
5913 return err;
5914
5915 default:
5916 /* do nothing */
5917 break;
5918 }
5919 return -EOPNOTSUPP;
5920}
5921
5922/* Called with rtnl_lock */
5923static int
5924bnx2_change_mac_addr(struct net_device *dev, void *p)
5925{
5926 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005927 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005928
Michael Chan73eef4c2005-08-25 15:39:15 -07005929 if (!is_valid_ether_addr(addr->sa_data))
5930 return -EINVAL;
5931
Michael Chanb6016b72005-05-26 13:03:09 -07005932 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5933 if (netif_running(dev))
5934 bnx2_set_mac_addr(bp);
5935
5936 return 0;
5937}
5938
5939/* Called with rtnl_lock */
5940static int
5941bnx2_change_mtu(struct net_device *dev, int new_mtu)
5942{
Michael Chan972ec0d2006-01-23 16:12:43 -08005943 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005944
5945 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5946 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5947 return -EINVAL;
5948
5949 dev->mtu = new_mtu;
5950 if (netif_running(dev)) {
5951 bnx2_netif_stop(bp);
5952
5953 bnx2_init_nic(bp);
5954
5955 bnx2_netif_start(bp);
5956 }
5957 return 0;
5958}
5959
5960#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5961static void
5962poll_bnx2(struct net_device *dev)
5963{
Michael Chan972ec0d2006-01-23 16:12:43 -08005964 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005965
5966 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005967 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005968 enable_irq(bp->pdev->irq);
5969}
5970#endif
5971
Michael Chan253c8b72007-01-08 19:56:01 -08005972static void __devinit
5973bnx2_get_5709_media(struct bnx2 *bp)
5974{
5975 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5976 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5977 u32 strap;
5978
5979 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5980 return;
5981 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5982 bp->phy_flags |= PHY_SERDES_FLAG;
5983 return;
5984 }
5985
5986 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5987 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5988 else
5989 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5990
5991 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5992 switch (strap) {
5993 case 0x4:
5994 case 0x5:
5995 case 0x6:
5996 bp->phy_flags |= PHY_SERDES_FLAG;
5997 return;
5998 }
5999 } else {
6000 switch (strap) {
6001 case 0x1:
6002 case 0x2:
6003 case 0x4:
6004 bp->phy_flags |= PHY_SERDES_FLAG;
6005 return;
6006 }
6007 }
6008}
6009
Michael Chanb6016b72005-05-26 13:03:09 -07006010static int __devinit
6011bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6012{
6013 struct bnx2 *bp;
6014 unsigned long mem_len;
6015 int rc;
6016 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006017 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006018
6019 SET_MODULE_OWNER(dev);
6020 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006021 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006022
6023 bp->flags = 0;
6024 bp->phy_flags = 0;
6025
6026 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6027 rc = pci_enable_device(pdev);
6028 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006029 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006030 goto err_out;
6031 }
6032
6033 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006034 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006035 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006036 rc = -ENODEV;
6037 goto err_out_disable;
6038 }
6039
6040 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6041 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006042 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006043 goto err_out_disable;
6044 }
6045
6046 pci_set_master(pdev);
6047
6048 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6049 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006050 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006051 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006052 rc = -EIO;
6053 goto err_out_release;
6054 }
6055
Michael Chanb6016b72005-05-26 13:03:09 -07006056 bp->dev = dev;
6057 bp->pdev = pdev;
6058
6059 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006060 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006061 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006062
6063 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006064 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006065 dev->mem_end = dev->mem_start + mem_len;
6066 dev->irq = pdev->irq;
6067
6068 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6069
6070 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006071 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006072 rc = -ENOMEM;
6073 goto err_out_release;
6074 }
6075
6076 /* Configure byte swap and enable write to the reg_window registers.
6077 * Rely on CPU to do target byte swapping on big endian systems
6078 * The chip's target access swapping will not swap all accesses
6079 */
6080 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6081 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6082 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6083
Pavel Machek829ca9a2005-09-03 15:56:56 -07006084 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006085
6086 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6087
Michael Chan59b47d82006-11-19 14:10:45 -08006088 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6089 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6090 if (bp->pcix_cap == 0) {
6091 dev_err(&pdev->dev,
6092 "Cannot find PCIX capability, aborting.\n");
6093 rc = -EIO;
6094 goto err_out_unmap;
6095 }
6096 }
6097
Michael Chan40453c82007-05-03 13:19:18 -07006098 /* 5708 cannot support DMA addresses > 40-bit. */
6099 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6100 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6101 else
6102 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6103
6104 /* Configure DMA attributes. */
6105 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6106 dev->features |= NETIF_F_HIGHDMA;
6107 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6108 if (rc) {
6109 dev_err(&pdev->dev,
6110 "pci_set_consistent_dma_mask failed, aborting.\n");
6111 goto err_out_unmap;
6112 }
6113 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6114 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6115 goto err_out_unmap;
6116 }
6117
Michael Chanb6016b72005-05-26 13:03:09 -07006118 /* Get bus information. */
6119 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6120 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6121 u32 clkreg;
6122
6123 bp->flags |= PCIX_FLAG;
6124
6125 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006126
Michael Chanb6016b72005-05-26 13:03:09 -07006127 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6128 switch (clkreg) {
6129 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6130 bp->bus_speed_mhz = 133;
6131 break;
6132
6133 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6134 bp->bus_speed_mhz = 100;
6135 break;
6136
6137 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6138 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6139 bp->bus_speed_mhz = 66;
6140 break;
6141
6142 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6143 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6144 bp->bus_speed_mhz = 50;
6145 break;
6146
6147 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6148 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6149 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6150 bp->bus_speed_mhz = 33;
6151 break;
6152 }
6153 }
6154 else {
6155 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6156 bp->bus_speed_mhz = 66;
6157 else
6158 bp->bus_speed_mhz = 33;
6159 }
6160
6161 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6162 bp->flags |= PCI_32BIT_FLAG;
6163
6164 /* 5706A0 may falsely detect SERR and PERR. */
6165 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6166 reg = REG_RD(bp, PCI_COMMAND);
6167 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6168 REG_WR(bp, PCI_COMMAND, reg);
6169 }
6170 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6171 !(bp->flags & PCIX_FLAG)) {
6172
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006173 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006174 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006175 goto err_out_unmap;
6176 }
6177
6178 bnx2_init_nvram(bp);
6179
Michael Chane3648b32005-11-04 08:51:21 -08006180 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6181
6182 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006183 BNX2_SHM_HDR_SIGNATURE_SIG) {
6184 u32 off = PCI_FUNC(pdev->devfn) << 2;
6185
6186 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6187 } else
Michael Chane3648b32005-11-04 08:51:21 -08006188 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6189
Michael Chanb6016b72005-05-26 13:03:09 -07006190 /* Get the permanent MAC address. First we need to make sure the
6191 * firmware is actually running.
6192 */
Michael Chane3648b32005-11-04 08:51:21 -08006193 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006194
6195 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6196 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006197 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006198 rc = -ENODEV;
6199 goto err_out_unmap;
6200 }
6201
Michael Chane3648b32005-11-04 08:51:21 -08006202 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006203
Michael Chane3648b32005-11-04 08:51:21 -08006204 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006205 bp->mac_addr[0] = (u8) (reg >> 8);
6206 bp->mac_addr[1] = (u8) reg;
6207
Michael Chane3648b32005-11-04 08:51:21 -08006208 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006209 bp->mac_addr[2] = (u8) (reg >> 24);
6210 bp->mac_addr[3] = (u8) (reg >> 16);
6211 bp->mac_addr[4] = (u8) (reg >> 8);
6212 bp->mac_addr[5] = (u8) reg;
6213
6214 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006215 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006216
6217 bp->rx_csum = 1;
6218
6219 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6220
6221 bp->tx_quick_cons_trip_int = 20;
6222 bp->tx_quick_cons_trip = 20;
6223 bp->tx_ticks_int = 80;
6224 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006225
Michael Chanb6016b72005-05-26 13:03:09 -07006226 bp->rx_quick_cons_trip_int = 6;
6227 bp->rx_quick_cons_trip = 6;
6228 bp->rx_ticks_int = 18;
6229 bp->rx_ticks = 18;
6230
6231 bp->stats_ticks = 1000000 & 0xffff00;
6232
6233 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006234 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006235
Michael Chan5b0c76a2005-11-04 08:45:49 -08006236 bp->phy_addr = 1;
6237
Michael Chanb6016b72005-05-26 13:03:09 -07006238 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006239 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6240 bnx2_get_5709_media(bp);
6241 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006242 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006243
6244 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07006245 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006246 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006247 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006248 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006249 BNX2_SHARED_HW_CFG_CONFIG);
6250 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6251 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6252 }
Michael Chan261dd5c2007-01-08 19:55:46 -08006253 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6254 CHIP_NUM(bp) == CHIP_NUM_5708)
6255 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006256 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6257 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006258
Michael Chan16088272006-06-12 22:16:43 -07006259 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6260 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6261 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006262 bp->flags |= NO_WOL_FLAG;
6263
Michael Chanb6016b72005-05-26 13:03:09 -07006264 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6265 bp->tx_quick_cons_trip_int =
6266 bp->tx_quick_cons_trip;
6267 bp->tx_ticks_int = bp->tx_ticks;
6268 bp->rx_quick_cons_trip_int =
6269 bp->rx_quick_cons_trip;
6270 bp->rx_ticks_int = bp->rx_ticks;
6271 bp->comp_prod_trip_int = bp->comp_prod_trip;
6272 bp->com_ticks_int = bp->com_ticks;
6273 bp->cmd_ticks_int = bp->cmd_ticks;
6274 }
6275
Michael Chanf9317a42006-09-29 17:06:23 -07006276 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6277 *
6278 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6279 * with byte enables disabled on the unused 32-bit word. This is legal
6280 * but causes problems on the AMD 8132 which will eventually stop
6281 * responding after a while.
6282 *
6283 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006284 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006285 */
6286 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6287 struct pci_dev *amd_8132 = NULL;
6288
6289 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6290 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6291 amd_8132))) {
6292 u8 rev;
6293
6294 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6295 if (rev >= 0x10 && rev <= 0x13) {
6296 disable_msi = 1;
6297 pci_dev_put(amd_8132);
6298 break;
6299 }
6300 }
6301 }
6302
Michael Chanb6016b72005-05-26 13:03:09 -07006303 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6304 bp->req_line_speed = 0;
6305 if (bp->phy_flags & PHY_SERDES_FLAG) {
6306 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006307
Michael Chane3648b32005-11-04 08:51:21 -08006308 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006309 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6310 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6311 bp->autoneg = 0;
6312 bp->req_line_speed = bp->line_speed = SPEED_1000;
6313 bp->req_duplex = DUPLEX_FULL;
6314 }
Michael Chanb6016b72005-05-26 13:03:09 -07006315 }
6316 else {
6317 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6318 }
6319
6320 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6321
Michael Chancd339a02005-08-25 15:35:24 -07006322 init_timer(&bp->timer);
6323 bp->timer.expires = RUN_AT(bp->timer_interval);
6324 bp->timer.data = (unsigned long) bp;
6325 bp->timer.function = bnx2_timer;
6326
Michael Chanb6016b72005-05-26 13:03:09 -07006327 return 0;
6328
6329err_out_unmap:
6330 if (bp->regview) {
6331 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006332 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006333 }
6334
6335err_out_release:
6336 pci_release_regions(pdev);
6337
6338err_out_disable:
6339 pci_disable_device(pdev);
6340 pci_set_drvdata(pdev, NULL);
6341
6342err_out:
6343 return rc;
6344}
6345
6346static int __devinit
6347bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6348{
6349 static int version_printed = 0;
6350 struct net_device *dev = NULL;
6351 struct bnx2 *bp;
6352 int rc, i;
6353
6354 if (version_printed++ == 0)
6355 printk(KERN_INFO "%s", version);
6356
6357 /* dev zeroed in init_etherdev */
6358 dev = alloc_etherdev(sizeof(*bp));
6359
6360 if (!dev)
6361 return -ENOMEM;
6362
6363 rc = bnx2_init_board(pdev, dev);
6364 if (rc < 0) {
6365 free_netdev(dev);
6366 return rc;
6367 }
6368
6369 dev->open = bnx2_open;
6370 dev->hard_start_xmit = bnx2_start_xmit;
6371 dev->stop = bnx2_close;
6372 dev->get_stats = bnx2_get_stats;
6373 dev->set_multicast_list = bnx2_set_rx_mode;
6374 dev->do_ioctl = bnx2_ioctl;
6375 dev->set_mac_address = bnx2_change_mac_addr;
6376 dev->change_mtu = bnx2_change_mtu;
6377 dev->tx_timeout = bnx2_tx_timeout;
6378 dev->watchdog_timeo = TX_TIMEOUT;
6379#ifdef BCM_VLAN
6380 dev->vlan_rx_register = bnx2_vlan_rx_register;
6381 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6382#endif
6383 dev->poll = bnx2_poll;
6384 dev->ethtool_ops = &bnx2_ethtool_ops;
6385 dev->weight = 64;
6386
Michael Chan972ec0d2006-01-23 16:12:43 -08006387 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006388
6389#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6390 dev->poll_controller = poll_bnx2;
6391#endif
6392
Michael Chan1b2f9222007-05-03 13:20:19 -07006393 pci_set_drvdata(pdev, dev);
6394
6395 memcpy(dev->dev_addr, bp->mac_addr, 6);
6396 memcpy(dev->perm_addr, bp->mac_addr, 6);
6397 bp->name = board_info[ent->driver_data].name;
6398
Michael Chan4666f872007-05-03 13:22:28 -07006399 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6400 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6401 else
6402 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan1b2f9222007-05-03 13:20:19 -07006403#ifdef BCM_VLAN
6404 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6405#endif
6406 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006407 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6408 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006409
Michael Chanb6016b72005-05-26 13:03:09 -07006410 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006411 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006412 if (bp->regview)
6413 iounmap(bp->regview);
6414 pci_release_regions(pdev);
6415 pci_disable_device(pdev);
6416 pci_set_drvdata(pdev, NULL);
6417 free_netdev(dev);
6418 return rc;
6419 }
6420
Michael Chanb6016b72005-05-26 13:03:09 -07006421 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6422 "IRQ %d, ",
6423 dev->name,
6424 bp->name,
6425 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6426 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6427 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6428 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6429 bp->bus_speed_mhz,
6430 dev->base_addr,
6431 bp->pdev->irq);
6432
6433 printk("node addr ");
6434 for (i = 0; i < 6; i++)
6435 printk("%2.2x", dev->dev_addr[i]);
6436 printk("\n");
6437
Michael Chanb6016b72005-05-26 13:03:09 -07006438 return 0;
6439}
6440
6441static void __devexit
6442bnx2_remove_one(struct pci_dev *pdev)
6443{
6444 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006445 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006446
Michael Chanafdc08b2005-08-25 15:34:29 -07006447 flush_scheduled_work();
6448
Michael Chanb6016b72005-05-26 13:03:09 -07006449 unregister_netdev(dev);
6450
6451 if (bp->regview)
6452 iounmap(bp->regview);
6453
6454 free_netdev(dev);
6455 pci_release_regions(pdev);
6456 pci_disable_device(pdev);
6457 pci_set_drvdata(pdev, NULL);
6458}
6459
6460static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006461bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006462{
6463 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006464 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006465 u32 reset_code;
6466
6467 if (!netif_running(dev))
6468 return 0;
6469
Michael Chan1d60290f2006-03-20 17:50:08 -08006470 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006471 bnx2_netif_stop(bp);
6472 netif_device_detach(dev);
6473 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006474 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006475 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006476 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006477 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6478 else
6479 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6480 bnx2_reset_chip(bp, reset_code);
6481 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006482 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006483 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006484 return 0;
6485}
6486
6487static int
6488bnx2_resume(struct pci_dev *pdev)
6489{
6490 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006491 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006492
6493 if (!netif_running(dev))
6494 return 0;
6495
Michael Chan30c517b2007-05-03 13:20:40 -07006496 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006497 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006498 netif_device_attach(dev);
6499 bnx2_init_nic(bp);
6500 bnx2_netif_start(bp);
6501 return 0;
6502}
6503
6504static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006505 .name = DRV_MODULE_NAME,
6506 .id_table = bnx2_pci_tbl,
6507 .probe = bnx2_init_one,
6508 .remove = __devexit_p(bnx2_remove_one),
6509 .suspend = bnx2_suspend,
6510 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006511};
6512
6513static int __init bnx2_init(void)
6514{
Jeff Garzik29917622006-08-19 17:48:59 -04006515 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006516}
6517
6518static void __exit bnx2_cleanup(void)
6519{
6520 pci_unregister_driver(&bnx2_pci_driver);
6521}
6522
6523module_init(bnx2_init);
6524module_exit(bnx2_cleanup);
6525
6526
6527