blob: 9eba7a2635ada4907e9cbedbd84eaa4f002c03b3 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan72fbaeb2007-05-03 13:25:32 -070057#define DRV_MODULE_VERSION "1.5.10"
58#define DRV_MODULE_RELDATE "May 1, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
553 if (bp->link_up) {
554 u32 bmsr;
555
556 switch (bp->line_speed) {
557 case SPEED_10:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
562 break;
563 case SPEED_100:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
568 break;
569 case SPEED_1000:
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572 else
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574 break;
575 case SPEED_2500:
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578 else
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580 break;
581 }
582
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585 if (bp->autoneg) {
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
Michael Chanca58c3a2007-05-03 13:22:52 -0700588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800590
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594 else
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596 }
597 }
598 else
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
604static void
Michael Chanb6016b72005-05-26 13:03:09 -0700605bnx2_report_link(struct bnx2 *bp)
606{
607 if (bp->link_up) {
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611 printk("%d Mbps ", bp->line_speed);
612
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
615 else
616 printk("half duplex");
617
618 if (bp->flow_ctrl) {
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
623 }
624 else {
625 printk(", transmit ");
626 }
627 printk("flow control ON");
628 }
629 printk("\n");
630 }
631 else {
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634 }
Michael Chane3648b32005-11-04 08:51:21 -0800635
636 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642 u32 local_adv, remote_adv;
643
644 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
650 }
651 return;
652 }
653
654 if (bp->duplex != DUPLEX_FULL) {
655 return;
656 }
657
Michael Chan5b0c76a2005-11-04 08:45:49 -0800658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660 u32 val;
661
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
667 return;
668 }
669
Michael Chanca58c3a2007-05-03 13:22:52 -0700670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700672
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
676
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
688 }
689
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695 }
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
698 }
699 }
700 else {
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703 }
704 }
705 }
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710 bp->flow_ctrl = FLOW_CTRL_TX;
711 }
712 }
713}
714
715static int
Michael Chan27a005b2007-05-03 13:23:41 -0700716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718 u32 val, speed;
719
720 bp->link_up = 1;
721
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
729 return 0;
730 }
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732 switch (speed) {
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
735 break;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
742 break;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
745 break;
746 }
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
749 else
750 bp->duplex = DUPLEX_HALF;
751 return 0;
752}
753
754static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757 u32 val;
758
759 bp->link_up = 1;
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
764 break;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
773 break;
774 }
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
777 else
778 bp->duplex = DUPLEX_HALF;
779
780 return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700785{
786 u32 bmcr, local_adv, remote_adv, common;
787
788 bp->link_up = 1;
789 bp->line_speed = SPEED_1000;
790
Michael Chanca58c3a2007-05-03 13:22:52 -0700791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
794 }
795 else {
796 bp->duplex = DUPLEX_HALF;
797 }
798
799 if (!(bmcr & BMCR_ANENABLE)) {
800 return 0;
801 }
802
Michael Chanca58c3a2007-05-03 13:22:52 -0700803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700805
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
811 }
812 else {
813 bp->duplex = DUPLEX_HALF;
814 }
815 }
816
817 return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823 u32 bmcr;
824
Michael Chanca58c3a2007-05-03 13:22:52 -0700825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
828
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
836 }
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
840 }
841 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700844
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
857 }
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
861 }
862 else {
863 bp->line_speed = 0;
864 bp->link_up = 0;
865 }
866 }
867 }
868 else {
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
871 }
872 else {
873 bp->line_speed = SPEED_10;
874 }
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
877 }
878 else {
879 bp->duplex = DUPLEX_HALF;
880 }
881 }
882
883 return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889 u32 val;
890
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895 }
896
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
899
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800902 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700903
904 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800905 switch (bp->line_speed) {
906 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800909 break;
910 }
911 /* fall through */
912 case SPEED_100:
913 val |= BNX2_EMAC_MODE_PORT_MII;
914 break;
915 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800916 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800917 /* fall through */
918 case SPEED_1000:
919 val |= BNX2_EMAC_MODE_PORT_GMII;
920 break;
921 }
Michael Chanb6016b72005-05-26 13:03:09 -0700922 }
923 else {
924 val |= BNX2_EMAC_MODE_PORT_GMII;
925 }
926
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
931
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950 return 0;
951}
952
Michael Chan27a005b2007-05-03 13:23:41 -0700953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
Michael Chanb6016b72005-05-26 13:03:09 -0700971static int
Michael Chan605a9e22007-05-03 13:23:13 -0700972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974 u32 up1;
975 int ret = 1;
976
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978 return 0;
979
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
982
Michael Chan27a005b2007-05-03 13:23:41 -0700983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
Michael Chan605a9e22007-05-03 13:23:13 -0700986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
990 ret = 0;
991 }
992
Michael Chan27a005b2007-05-03 13:23:41 -0700993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
Michael Chan605a9e22007-05-03 13:23:13 -0700997 return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003 u32 up1;
1004 int ret = 0;
1005
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007 return 0;
1008
Michael Chan27a005b2007-05-03 13:23:41 -07001009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
Michael Chan605a9e22007-05-03 13:23:13 -07001012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 1;
1017 }
1018
Michael Chan27a005b2007-05-03 13:23:41 -07001019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
Michael Chan605a9e22007-05-03 13:23:13 -07001023 return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029 u32 bmcr;
1030
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032 return;
1033
Michael Chan27a005b2007-05-03 13:23:41 -07001034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035 u32 val;
1036
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051 }
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1057 }
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064 u32 bmcr;
1065
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067 return;
1068
Michael Chan27a005b2007-05-03 13:23:41 -07001069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070 u32 val;
1071
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085 }
1086
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
1092static int
Michael Chanb6016b72005-05-26 13:03:09 -07001093bnx2_set_link(struct bnx2 *bp)
1094{
1095 u32 bmsr;
1096 u8 link_up;
1097
Michael Chan80be4432006-11-19 14:07:28 -08001098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001099 bp->link_up = 1;
1100 return 0;
1101 }
1102
1103 link_up = bp->link_up;
1104
Michael Chan27a005b2007-05-03 13:23:41 -07001105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001109
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112 u32 val;
1113
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1117 else
1118 bmsr &= ~BMSR_LSTATUS;
1119 }
1120
1121 if (bmsr & BMSR_LSTATUS) {
1122 bp->link_up = 1;
1123
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001131 }
1132 else {
1133 bnx2_copper_linkup(bp);
1134 }
1135 bnx2_resolve_flow_ctrl(bp);
1136 }
1137 else {
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001141
Michael Chanb6016b72005-05-26 13:03:09 -07001142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143 bp->link_up = 0;
1144 }
1145
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1148 }
1149
1150 bnx2_set_mac_link(bp);
1151
1152 return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158 int i;
1159 u32 reg;
1160
Michael Chanca58c3a2007-05-03 13:22:52 -07001161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163#define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165 udelay(10);
1166
Michael Chanca58c3a2007-05-03 13:22:52 -07001167 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001168 if (!(reg & BMCR_RESET)) {
1169 udelay(20);
1170 break;
1171 }
1172 }
1173 if (i == PHY_RESET_MAX_WAIT) {
1174 return -EBUSY;
1175 }
1176 return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182 u32 adv = 0;
1183
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1189 }
1190 else {
1191 adv = ADVERTISE_PAUSE_CAP;
1192 }
1193 }
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1197 }
1198 else {
1199 adv = ADVERTISE_PAUSE_ASYM;
1200 }
1201 }
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208 }
1209 }
1210 return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
Michael Chan605a9e22007-05-03 13:23:13 -07001216 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001217 u32 new_adv = 0;
1218
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001221 int force_link_down = 0;
1222
Michael Chan605a9e22007-05-03 13:23:13 -07001223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1229 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001230 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
Michael Chanca58c3a2007-05-03 13:22:52 -07001233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001234 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001235 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001236
Michael Chan27a005b2007-05-03 13:23:41 -07001237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1243 }
1244
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248 else
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001250 }
1251
Michael Chanb6016b72005-05-26 13:03:09 -07001252 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001253 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001254 new_bmcr |= BMCR_FULLDPLX;
1255 }
1256 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001257 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001258 new_bmcr &= ~BMCR_FULLDPLX;
1259 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001260 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001261 /* Force a link down visible on the other side */
1262 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001263 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001267 BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269 bp->link_up = 0;
1270 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001272 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001273 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001276 } else {
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001279 }
1280 return 0;
1281 }
1282
Michael Chan605a9e22007-05-03 13:23:13 -07001283 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001284
Michael Chanb6016b72005-05-26 13:03:09 -07001285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1287
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1289
Michael Chanca58c3a2007-05-03 13:22:52 -07001290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001292
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1296 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001298 spin_unlock_bh(&bp->phy_lock);
1299 msleep(20);
1300 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001301 }
1302
Michael Chanca58c3a2007-05-03 13:22:52 -07001303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001305 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1313 */
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001317 } else {
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001320 }
1321
1322 return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1327
1328#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1332
1333#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001335
Michael Chanb6016b72005-05-26 13:03:09 -07001336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338static int
1339bnx2_setup_copper_phy(struct bnx2 *bp)
1340{
1341 u32 bmcr;
1342 u32 new_bmcr;
1343
Michael Chanca58c3a2007-05-03 13:22:52 -07001344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001345
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1350
Michael Chanca58c3a2007-05-03 13:22:52 -07001351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1354
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001368
Michael Chanb6016b72005-05-26 13:03:09 -07001369 new_adv_reg |= ADVERTISE_CSMA;
1370
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1376
Michael Chanca58c3a2007-05-03 13:22:52 -07001377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001380 BMCR_ANENABLE);
1381 }
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1385
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1388 }
1389 return 0;
1390 }
1391
1392 new_bmcr = 0;
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1395 }
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1398 }
1399 if (new_bmcr != bmcr) {
1400 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001401
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001404
Michael Chanb6016b72005-05-26 13:03:09 -07001405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001408 spin_unlock_bh(&bp->phy_lock);
1409 msleep(50);
1410 spin_lock_bh(&bp->phy_lock);
1411
Michael Chanca58c3a2007-05-03 13:22:52 -07001412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001414 }
1415
Michael Chanca58c3a2007-05-03 13:22:52 -07001416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001417
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1421 */
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1427 }
Michael Chan27a005b2007-05-03 13:23:41 -07001428 } else {
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001431 }
1432 return 0;
1433}
1434
1435static int
1436bnx2_setup_phy(struct bnx2 *bp)
1437{
1438 if (bp->loopback == MAC_LOOPBACK)
1439 return 0;
1440
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1443 }
1444 else {
1445 return (bnx2_setup_copper_phy(bp));
1446 }
1447}
1448
1449static int
Michael Chan27a005b2007-05-03 13:23:41 -07001450bnx2_init_5709s_phy(struct bnx2 *bp)
1451{
1452 u32 val;
1453
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_reset_phy(bp);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1478 else
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495 return 0;
1496}
1497
1498static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001499bnx2_init_5708s_phy(struct bnx2 *bp)
1500{
1501 u32 val;
1502
Michael Chan27a005b2007-05-03 13:23:41 -07001503 bnx2_reset_phy(bp);
1504
1505 bp->mii_up1 = BCM5708S_UP1;
1506
Michael Chan5b0c76a2005-11-04 08:45:49 -08001507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523 }
1524
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535 }
1536
Michael Chane3648b32005-11-04 08:51:21 -08001537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540 if (val) {
1541 u32 is_backplane;
1542
Michael Chane3648b32005-11-04 08:51:21 -08001543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1551 }
1552 }
1553 return 0;
1554}
1555
1556static int
1557bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001558{
Michael Chan27a005b2007-05-03 13:23:41 -07001559 bnx2_reset_phy(bp);
1560
Michael Chanb6016b72005-05-26 13:03:09 -07001561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
Michael Chan59b47d82006-11-19 14:10:45 -08001563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001565
1566 if (bp->dev->mtu > 1500) {
1567 u32 val;
1568
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577 }
1578 else {
1579 u32 val;
1580
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588 }
1589
1590 return 0;
1591}
1592
1593static int
1594bnx2_init_copper_phy(struct bnx2 *bp)
1595{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001596 u32 val;
1597
Michael Chan27a005b2007-05-03 13:23:41 -07001598 bnx2_reset_phy(bp);
1599
Michael Chanb6016b72005-05-26 13:03:09 -07001600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1609 }
1610
Michael Chanb659f442007-02-02 00:46:35 -08001611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615 val &= ~(1 << 8);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617 }
1618
Michael Chanb6016b72005-05-26 13:03:09 -07001619 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1627 }
1628 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635 }
1636
Michael Chan5b0c76a2005-11-04 08:45:49 -08001637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001641 return 0;
1642}
1643
1644
1645static int
1646bnx2_init_phy(struct bnx2 *bp)
1647{
1648 u32 val;
1649 int rc = 0;
1650
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
Michael Chanca58c3a2007-05-03 13:22:52 -07001654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001656 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1659
Michael Chanb6016b72005-05-26 13:03:09 -07001660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
Michael Chanb6016b72005-05-26 13:03:09 -07001662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1666
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001674 }
1675 else {
1676 rc = bnx2_init_copper_phy(bp);
1677 }
1678
1679 bnx2_setup_phy(bp);
1680
1681 return rc;
1682}
1683
1684static int
1685bnx2_set_mac_loopback(struct bnx2 *bp)
1686{
1687 u32 mac_mode;
1688
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693 bp->link_up = 1;
1694 return 0;
1695}
1696
Michael Chanbc5a0692006-01-23 16:13:22 -08001697static int bnx2_test_link(struct bnx2 *);
1698
1699static int
1700bnx2_set_phy_loopback(struct bnx2 *bp)
1701{
1702 u32 mac_mode;
1703 int rc, i;
1704
1705 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001707 BMCR_SPEED1000);
1708 spin_unlock_bh(&bp->phy_lock);
1709 if (rc)
1710 return rc;
1711
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1714 break;
Michael Chan80be4432006-11-19 14:07:28 -08001715 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001716 }
1717
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001721 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001722
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725 bp->link_up = 1;
1726 return 0;
1727}
1728
Michael Chanb6016b72005-05-26 13:03:09 -07001729static int
Michael Chanb090ae22006-01-23 16:07:10 -08001730bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001731{
1732 int i;
1733 u32 val;
1734
Michael Chanb6016b72005-05-26 13:03:09 -07001735 bp->fw_wr_seq++;
1736 msg_data |= bp->fw_wr_seq;
1737
Michael Chane3648b32005-11-04 08:51:21 -08001738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001739
1740 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001743
Michael Chane3648b32005-11-04 08:51:21 -08001744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001745
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747 break;
1748 }
Michael Chanb090ae22006-01-23 16:07:10 -08001749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001751
1752 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754 if (!silent)
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001757
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
Michael Chane3648b32005-11-04 08:51:21 -08001761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001762
Michael Chanb6016b72005-05-26 13:03:09 -07001763 return -EBUSY;
1764 }
1765
Michael Chanb090ae22006-01-23 16:07:10 -08001766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767 return -EIO;
1768
Michael Chanb6016b72005-05-26 13:03:09 -07001769 return 0;
1770}
1771
Michael Chan59b47d82006-11-19 14:10:45 -08001772static int
1773bnx2_init_5709_context(struct bnx2 *bp)
1774{
1775 int i, ret = 0;
1776 u32 val;
1777
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07001781 for (i = 0; i < 10; i++) {
1782 val = REG_RD(bp, BNX2_CTX_COMMAND);
1783 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
1784 break;
1785 udelay(2);
1786 }
1787 if (val & BNX2_CTX_COMMAND_MEM_INIT)
1788 return -EBUSY;
1789
Michael Chan59b47d82006-11-19 14:10:45 -08001790 for (i = 0; i < bp->ctx_pages; i++) {
1791 int j;
1792
1793 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1794 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1795 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1796 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1797 (u64) bp->ctx_blk_mapping[i] >> 32);
1798 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1799 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1800 for (j = 0; j < 10; j++) {
1801
1802 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1803 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1804 break;
1805 udelay(5);
1806 }
1807 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1808 ret = -EBUSY;
1809 break;
1810 }
1811 }
1812 return ret;
1813}
1814
Michael Chanb6016b72005-05-26 13:03:09 -07001815static void
1816bnx2_init_context(struct bnx2 *bp)
1817{
1818 u32 vcid;
1819
1820 vcid = 96;
1821 while (vcid) {
1822 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07001823 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07001824
1825 vcid--;
1826
1827 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1828 u32 new_vcid;
1829
1830 vcid_addr = GET_PCID_ADDR(vcid);
1831 if (vcid & 0x8) {
1832 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1833 }
1834 else {
1835 new_vcid = vcid;
1836 }
1837 pcid_addr = GET_PCID_ADDR(new_vcid);
1838 }
1839 else {
1840 vcid_addr = GET_CID_ADDR(vcid);
1841 pcid_addr = vcid_addr;
1842 }
1843
Michael Chan7947b202007-06-04 21:17:10 -07001844 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
1845 vcid_addr += (i << PHY_CTX_SHIFT);
1846 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07001847
Michael Chan7947b202007-06-04 21:17:10 -07001848 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1849 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1850
1851 /* Zero out the context. */
1852 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
1853 CTX_WR(bp, 0x00, offset, 0);
1854
1855 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1856 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
Michael Chanb6016b72005-05-26 13:03:09 -07001857 }
Michael Chanb6016b72005-05-26 13:03:09 -07001858 }
1859}
1860
1861static int
1862bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1863{
1864 u16 *good_mbuf;
1865 u32 good_mbuf_cnt;
1866 u32 val;
1867
1868 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1869 if (good_mbuf == NULL) {
1870 printk(KERN_ERR PFX "Failed to allocate memory in "
1871 "bnx2_alloc_bad_rbuf\n");
1872 return -ENOMEM;
1873 }
1874
1875 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1876 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1877
1878 good_mbuf_cnt = 0;
1879
1880 /* Allocate a bunch of mbufs and save the good ones in an array. */
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1883 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1884
1885 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1886
1887 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1888
1889 /* The addresses with Bit 9 set are bad memory blocks. */
1890 if (!(val & (1 << 9))) {
1891 good_mbuf[good_mbuf_cnt] = (u16) val;
1892 good_mbuf_cnt++;
1893 }
1894
1895 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1896 }
1897
1898 /* Free the good ones back to the mbuf pool thus discarding
1899 * all the bad ones. */
1900 while (good_mbuf_cnt) {
1901 good_mbuf_cnt--;
1902
1903 val = good_mbuf[good_mbuf_cnt];
1904 val = (val << 9) | val | 1;
1905
1906 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1907 }
1908 kfree(good_mbuf);
1909 return 0;
1910}
1911
1912static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001913bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001914{
1915 u32 val;
1916 u8 *mac_addr = bp->dev->dev_addr;
1917
1918 val = (mac_addr[0] << 8) | mac_addr[1];
1919
1920 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1921
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001922 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001923 (mac_addr[4] << 8) | mac_addr[5];
1924
1925 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1926}
1927
1928static inline int
1929bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1930{
1931 struct sk_buff *skb;
1932 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1933 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001934 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001935 unsigned long align;
1936
Michael Chan932f3772006-08-15 01:39:36 -07001937 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001938 if (skb == NULL) {
1939 return -ENOMEM;
1940 }
1941
Michael Chan59b47d82006-11-19 14:10:45 -08001942 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1943 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001944
Michael Chanb6016b72005-05-26 13:03:09 -07001945 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1946 PCI_DMA_FROMDEVICE);
1947
1948 rx_buf->skb = skb;
1949 pci_unmap_addr_set(rx_buf, mapping, mapping);
1950
1951 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1952 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1953
1954 bp->rx_prod_bseq += bp->rx_buf_use_size;
1955
1956 return 0;
1957}
1958
Michael Chanda3e4fb2007-05-03 13:24:23 -07001959static int
1960bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1961{
1962 struct status_block *sblk = bp->status_blk;
1963 u32 new_link_state, old_link_state;
1964 int is_set = 1;
1965
1966 new_link_state = sblk->status_attn_bits & event;
1967 old_link_state = sblk->status_attn_bits_ack & event;
1968 if (new_link_state != old_link_state) {
1969 if (new_link_state)
1970 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1971 else
1972 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1973 } else
1974 is_set = 0;
1975
1976 return is_set;
1977}
1978
Michael Chanb6016b72005-05-26 13:03:09 -07001979static void
1980bnx2_phy_int(struct bnx2 *bp)
1981{
Michael Chanda3e4fb2007-05-03 13:24:23 -07001982 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1983 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001984 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07001985 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001986 }
1987}
1988
1989static void
1990bnx2_tx_int(struct bnx2 *bp)
1991{
Michael Chanf4e418f2005-11-04 08:53:48 -08001992 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001993 u16 hw_cons, sw_cons, sw_ring_cons;
1994 int tx_free_bd = 0;
1995
Michael Chanf4e418f2005-11-04 08:53:48 -08001996 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001997 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1998 hw_cons++;
1999 }
2000 sw_cons = bp->tx_cons;
2001
2002 while (sw_cons != hw_cons) {
2003 struct sw_bd *tx_buf;
2004 struct sk_buff *skb;
2005 int i, last;
2006
2007 sw_ring_cons = TX_RING_IDX(sw_cons);
2008
2009 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2010 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002011
Michael Chanb6016b72005-05-26 13:03:09 -07002012 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002013 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002014 u16 last_idx, last_ring_idx;
2015
2016 last_idx = sw_cons +
2017 skb_shinfo(skb)->nr_frags + 1;
2018 last_ring_idx = sw_ring_cons +
2019 skb_shinfo(skb)->nr_frags + 1;
2020 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2021 last_idx++;
2022 }
2023 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2024 break;
2025 }
2026 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002027
Michael Chanb6016b72005-05-26 13:03:09 -07002028 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2029 skb_headlen(skb), PCI_DMA_TODEVICE);
2030
2031 tx_buf->skb = NULL;
2032 last = skb_shinfo(skb)->nr_frags;
2033
2034 for (i = 0; i < last; i++) {
2035 sw_cons = NEXT_TX_BD(sw_cons);
2036
2037 pci_unmap_page(bp->pdev,
2038 pci_unmap_addr(
2039 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2040 mapping),
2041 skb_shinfo(skb)->frags[i].size,
2042 PCI_DMA_TODEVICE);
2043 }
2044
2045 sw_cons = NEXT_TX_BD(sw_cons);
2046
2047 tx_free_bd += last + 1;
2048
Michael Chan745720e2006-06-29 12:37:41 -07002049 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002050
Michael Chanf4e418f2005-11-04 08:53:48 -08002051 hw_cons = bp->hw_tx_cons =
2052 sblk->status_tx_quick_consumer_index0;
2053
Michael Chanb6016b72005-05-26 13:03:09 -07002054 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2055 hw_cons++;
2056 }
2057 }
2058
Michael Chane89bbf12005-08-25 15:36:58 -07002059 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002060 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2061 * before checking for netif_queue_stopped(). Without the
2062 * memory barrier, there is a small possibility that bnx2_start_xmit()
2063 * will miss it and cause the queue to be stopped forever.
2064 */
2065 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002066
Michael Chan2f8af122006-08-15 01:39:10 -07002067 if (unlikely(netif_queue_stopped(bp->dev)) &&
2068 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2069 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002070 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002071 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002072 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002073 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002074 }
Michael Chanb6016b72005-05-26 13:03:09 -07002075}
2076
2077static inline void
2078bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2079 u16 cons, u16 prod)
2080{
Michael Chan236b6392006-03-20 17:49:02 -08002081 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2082 struct rx_bd *cons_bd, *prod_bd;
2083
2084 cons_rx_buf = &bp->rx_buf_ring[cons];
2085 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002086
2087 pci_dma_sync_single_for_device(bp->pdev,
2088 pci_unmap_addr(cons_rx_buf, mapping),
2089 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2090
Michael Chan236b6392006-03-20 17:49:02 -08002091 bp->rx_prod_bseq += bp->rx_buf_use_size;
2092
2093 prod_rx_buf->skb = skb;
2094
2095 if (cons == prod)
2096 return;
2097
Michael Chanb6016b72005-05-26 13:03:09 -07002098 pci_unmap_addr_set(prod_rx_buf, mapping,
2099 pci_unmap_addr(cons_rx_buf, mapping));
2100
Michael Chan3fdfcc22006-03-20 17:49:49 -08002101 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2102 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002103 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2104 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002105}
2106
2107static int
2108bnx2_rx_int(struct bnx2 *bp, int budget)
2109{
Michael Chanf4e418f2005-11-04 08:53:48 -08002110 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002111 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2112 struct l2_fhdr *rx_hdr;
2113 int rx_pkt = 0;
2114
Michael Chanf4e418f2005-11-04 08:53:48 -08002115 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002116 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2117 hw_cons++;
2118 }
2119 sw_cons = bp->rx_cons;
2120 sw_prod = bp->rx_prod;
2121
2122 /* Memory barrier necessary as speculative reads of the rx
2123 * buffer can be ahead of the index in the status block
2124 */
2125 rmb();
2126 while (sw_cons != hw_cons) {
2127 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002128 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002129 struct sw_bd *rx_buf;
2130 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002131 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002132
2133 sw_ring_cons = RX_RING_IDX(sw_cons);
2134 sw_ring_prod = RX_RING_IDX(sw_prod);
2135
2136 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2137 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002138
2139 rx_buf->skb = NULL;
2140
2141 dma_addr = pci_unmap_addr(rx_buf, mapping);
2142
2143 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002144 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2145
2146 rx_hdr = (struct l2_fhdr *) skb->data;
2147 len = rx_hdr->l2_fhdr_pkt_len - 4;
2148
Michael Chanade2bfe2006-01-23 16:09:51 -08002149 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002150 (L2_FHDR_ERRORS_BAD_CRC |
2151 L2_FHDR_ERRORS_PHY_DECODE |
2152 L2_FHDR_ERRORS_ALIGNMENT |
2153 L2_FHDR_ERRORS_TOO_SHORT |
2154 L2_FHDR_ERRORS_GIANT_FRAME)) {
2155
2156 goto reuse_rx;
2157 }
2158
2159 /* Since we don't have a jumbo ring, copy small packets
2160 * if mtu > 1500
2161 */
2162 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2163 struct sk_buff *new_skb;
2164
Michael Chan932f3772006-08-15 01:39:36 -07002165 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002166 if (new_skb == NULL)
2167 goto reuse_rx;
2168
2169 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002170 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2171 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002172 skb_reserve(new_skb, 2);
2173 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002174
2175 bnx2_reuse_rx_skb(bp, skb,
2176 sw_ring_cons, sw_ring_prod);
2177
2178 skb = new_skb;
2179 }
2180 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002181 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002182 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2183
2184 skb_reserve(skb, bp->rx_offset);
2185 skb_put(skb, len);
2186 }
2187 else {
2188reuse_rx:
2189 bnx2_reuse_rx_skb(bp, skb,
2190 sw_ring_cons, sw_ring_prod);
2191 goto next_rx;
2192 }
2193
2194 skb->protocol = eth_type_trans(skb, bp->dev);
2195
2196 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002197 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002198
Michael Chan745720e2006-06-29 12:37:41 -07002199 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002200 goto next_rx;
2201
2202 }
2203
Michael Chanb6016b72005-05-26 13:03:09 -07002204 skb->ip_summed = CHECKSUM_NONE;
2205 if (bp->rx_csum &&
2206 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2207 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2208
Michael Chanade2bfe2006-01-23 16:09:51 -08002209 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2210 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002211 skb->ip_summed = CHECKSUM_UNNECESSARY;
2212 }
2213
2214#ifdef BCM_VLAN
2215 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2216 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2217 rx_hdr->l2_fhdr_vlan_tag);
2218 }
2219 else
2220#endif
2221 netif_receive_skb(skb);
2222
2223 bp->dev->last_rx = jiffies;
2224 rx_pkt++;
2225
2226next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002227 sw_cons = NEXT_RX_BD(sw_cons);
2228 sw_prod = NEXT_RX_BD(sw_prod);
2229
2230 if ((rx_pkt == budget))
2231 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002232
2233 /* Refresh hw_cons to see if there is new work */
2234 if (sw_cons == hw_cons) {
2235 hw_cons = bp->hw_rx_cons =
2236 sblk->status_rx_quick_consumer_index0;
2237 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2238 hw_cons++;
2239 rmb();
2240 }
Michael Chanb6016b72005-05-26 13:03:09 -07002241 }
2242 bp->rx_cons = sw_cons;
2243 bp->rx_prod = sw_prod;
2244
2245 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2246
2247 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2248
2249 mmiowb();
2250
2251 return rx_pkt;
2252
2253}
2254
2255/* MSI ISR - The only difference between this and the INTx ISR
2256 * is that the MSI interrupt is always serviced.
2257 */
2258static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002259bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002260{
2261 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002262 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002263
Michael Chanc921e4c2005-09-08 13:15:32 -07002264 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002265 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2266 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2267 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2268
2269 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002270 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2271 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002272
Michael Chan73eef4c2005-08-25 15:39:15 -07002273 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002274
Michael Chan73eef4c2005-08-25 15:39:15 -07002275 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002276}
2277
2278static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002279bnx2_msi_1shot(int irq, void *dev_instance)
2280{
2281 struct net_device *dev = dev_instance;
2282 struct bnx2 *bp = netdev_priv(dev);
2283
2284 prefetch(bp->status_blk);
2285
2286 /* Return here if interrupt is disabled. */
2287 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2288 return IRQ_HANDLED;
2289
2290 netif_rx_schedule(dev);
2291
2292 return IRQ_HANDLED;
2293}
2294
2295static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002296bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002297{
2298 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002299 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002300
2301 /* When using INTx, it is possible for the interrupt to arrive
2302 * at the CPU before the status block posted prior to the
2303 * interrupt. Reading a register will flush the status block.
2304 * When using MSI, the MSI message will always complete after
2305 * the status block write.
2306 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002307 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002308 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2309 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002310 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002311
2312 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2313 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2314 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2315
2316 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002317 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2318 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002319
Michael Chan73eef4c2005-08-25 15:39:15 -07002320 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002321
Michael Chan73eef4c2005-08-25 15:39:15 -07002322 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002323}
2324
Michael Chanda3e4fb2007-05-03 13:24:23 -07002325#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2326
Michael Chanf4e418f2005-11-04 08:53:48 -08002327static inline int
2328bnx2_has_work(struct bnx2 *bp)
2329{
2330 struct status_block *sblk = bp->status_blk;
2331
2332 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2333 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2334 return 1;
2335
Michael Chanda3e4fb2007-05-03 13:24:23 -07002336 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2337 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002338 return 1;
2339
2340 return 0;
2341}
2342
Michael Chanb6016b72005-05-26 13:03:09 -07002343static int
2344bnx2_poll(struct net_device *dev, int *budget)
2345{
Michael Chan972ec0d2006-01-23 16:12:43 -08002346 struct bnx2 *bp = netdev_priv(dev);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002347 struct status_block *sblk = bp->status_blk;
2348 u32 status_attn_bits = sblk->status_attn_bits;
2349 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002350
Michael Chanda3e4fb2007-05-03 13:24:23 -07002351 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2352 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002353
Michael Chanb6016b72005-05-26 13:03:09 -07002354 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002355
2356 /* This is needed to take care of transient status
2357 * during link changes.
2358 */
2359 REG_WR(bp, BNX2_HC_COMMAND,
2360 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2361 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002362 }
2363
Michael Chanf4e418f2005-11-04 08:53:48 -08002364 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002365 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002366
Michael Chanf4e418f2005-11-04 08:53:48 -08002367 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002368 int orig_budget = *budget;
2369 int work_done;
2370
2371 if (orig_budget > dev->quota)
2372 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002373
Michael Chanb6016b72005-05-26 13:03:09 -07002374 work_done = bnx2_rx_int(bp, orig_budget);
2375 *budget -= work_done;
2376 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002377 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002378
Michael Chanf4e418f2005-11-04 08:53:48 -08002379 bp->last_status_idx = bp->status_blk->status_idx;
2380 rmb();
2381
2382 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002383 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002384 if (likely(bp->flags & USING_MSI_FLAG)) {
2385 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2386 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2387 bp->last_status_idx);
2388 return 0;
2389 }
Michael Chanb6016b72005-05-26 13:03:09 -07002390 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002391 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2392 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2393 bp->last_status_idx);
2394
2395 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2396 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2397 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002398 return 0;
2399 }
2400
2401 return 1;
2402}
2403
Herbert Xu932ff272006-06-09 12:20:56 -07002404/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002405 * from set_multicast.
2406 */
2407static void
2408bnx2_set_rx_mode(struct net_device *dev)
2409{
Michael Chan972ec0d2006-01-23 16:12:43 -08002410 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002411 u32 rx_mode, sort_mode;
2412 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002413
Michael Chanc770a652005-08-25 15:38:39 -07002414 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002415
2416 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2417 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2418 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2419#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002420 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002421 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002422#else
Michael Chane29054f2006-01-23 16:06:06 -08002423 if (!(bp->flags & ASF_ENABLE_FLAG))
2424 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002425#endif
2426 if (dev->flags & IFF_PROMISC) {
2427 /* Promiscuous mode. */
2428 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002429 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2430 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002431 }
2432 else if (dev->flags & IFF_ALLMULTI) {
2433 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2434 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2435 0xffffffff);
2436 }
2437 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2438 }
2439 else {
2440 /* Accept one or more multicast(s). */
2441 struct dev_mc_list *mclist;
2442 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2443 u32 regidx;
2444 u32 bit;
2445 u32 crc;
2446
2447 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2448
2449 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2450 i++, mclist = mclist->next) {
2451
2452 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2453 bit = crc & 0xff;
2454 regidx = (bit & 0xe0) >> 5;
2455 bit &= 0x1f;
2456 mc_filter[regidx] |= (1 << bit);
2457 }
2458
2459 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2460 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2461 mc_filter[i]);
2462 }
2463
2464 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2465 }
2466
2467 if (rx_mode != bp->rx_mode) {
2468 bp->rx_mode = rx_mode;
2469 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2470 }
2471
2472 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2473 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2474 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2475
Michael Chanc770a652005-08-25 15:38:39 -07002476 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002477}
2478
Michael Chanfba9fe92006-06-12 22:21:25 -07002479#define FW_BUF_SIZE 0x8000
2480
2481static int
2482bnx2_gunzip_init(struct bnx2 *bp)
2483{
2484 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2485 goto gunzip_nomem1;
2486
2487 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2488 goto gunzip_nomem2;
2489
2490 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2491 if (bp->strm->workspace == NULL)
2492 goto gunzip_nomem3;
2493
2494 return 0;
2495
2496gunzip_nomem3:
2497 kfree(bp->strm);
2498 bp->strm = NULL;
2499
2500gunzip_nomem2:
2501 vfree(bp->gunzip_buf);
2502 bp->gunzip_buf = NULL;
2503
2504gunzip_nomem1:
2505 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2506 "uncompression.\n", bp->dev->name);
2507 return -ENOMEM;
2508}
2509
2510static void
2511bnx2_gunzip_end(struct bnx2 *bp)
2512{
2513 kfree(bp->strm->workspace);
2514
2515 kfree(bp->strm);
2516 bp->strm = NULL;
2517
2518 if (bp->gunzip_buf) {
2519 vfree(bp->gunzip_buf);
2520 bp->gunzip_buf = NULL;
2521 }
2522}
2523
2524static int
2525bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2526{
2527 int n, rc;
2528
2529 /* check gzip header */
2530 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2531 return -EINVAL;
2532
2533 n = 10;
2534
2535#define FNAME 0x8
2536 if (zbuf[3] & FNAME)
2537 while ((zbuf[n++] != 0) && (n < len));
2538
2539 bp->strm->next_in = zbuf + n;
2540 bp->strm->avail_in = len - n;
2541 bp->strm->next_out = bp->gunzip_buf;
2542 bp->strm->avail_out = FW_BUF_SIZE;
2543
2544 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2545 if (rc != Z_OK)
2546 return rc;
2547
2548 rc = zlib_inflate(bp->strm, Z_FINISH);
2549
2550 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2551 *outbuf = bp->gunzip_buf;
2552
2553 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2554 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2555 bp->dev->name, bp->strm->msg);
2556
2557 zlib_inflateEnd(bp->strm);
2558
2559 if (rc == Z_STREAM_END)
2560 return 0;
2561
2562 return rc;
2563}
2564
Michael Chanb6016b72005-05-26 13:03:09 -07002565static void
2566load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2567 u32 rv2p_proc)
2568{
2569 int i;
2570 u32 val;
2571
2572
2573 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002574 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002575 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002576 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002577 rv2p_code++;
2578
2579 if (rv2p_proc == RV2P_PROC1) {
2580 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2581 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2582 }
2583 else {
2584 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2585 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2586 }
2587 }
2588
2589 /* Reset the processor, un-stall is done later. */
2590 if (rv2p_proc == RV2P_PROC1) {
2591 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2592 }
2593 else {
2594 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2595 }
2596}
2597
Michael Chanaf3ee512006-11-19 14:09:25 -08002598static int
Michael Chanb6016b72005-05-26 13:03:09 -07002599load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2600{
2601 u32 offset;
2602 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002603 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002604
2605 /* Halt the CPU. */
2606 val = REG_RD_IND(bp, cpu_reg->mode);
2607 val |= cpu_reg->mode_value_halt;
2608 REG_WR_IND(bp, cpu_reg->mode, val);
2609 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2610
2611 /* Load the Text area. */
2612 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002613 if (fw->gz_text) {
2614 u32 text_len;
2615 void *text;
2616
2617 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2618 &text_len);
2619 if (rc)
2620 return rc;
2621
2622 fw->text = text;
2623 }
2624 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002625 int j;
2626
2627 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002628 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002629 }
2630 }
2631
2632 /* Load the Data area. */
2633 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2634 if (fw->data) {
2635 int j;
2636
2637 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2638 REG_WR_IND(bp, offset, fw->data[j]);
2639 }
2640 }
2641
2642 /* Load the SBSS area. */
2643 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2644 if (fw->sbss) {
2645 int j;
2646
2647 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2648 REG_WR_IND(bp, offset, fw->sbss[j]);
2649 }
2650 }
2651
2652 /* Load the BSS area. */
2653 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2654 if (fw->bss) {
2655 int j;
2656
2657 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2658 REG_WR_IND(bp, offset, fw->bss[j]);
2659 }
2660 }
2661
2662 /* Load the Read-Only area. */
2663 offset = cpu_reg->spad_base +
2664 (fw->rodata_addr - cpu_reg->mips_view_base);
2665 if (fw->rodata) {
2666 int j;
2667
2668 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2669 REG_WR_IND(bp, offset, fw->rodata[j]);
2670 }
2671 }
2672
2673 /* Clear the pre-fetch instruction. */
2674 REG_WR_IND(bp, cpu_reg->inst, 0);
2675 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2676
2677 /* Start the CPU. */
2678 val = REG_RD_IND(bp, cpu_reg->mode);
2679 val &= ~cpu_reg->mode_value_halt;
2680 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2681 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002682
2683 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002684}
2685
Michael Chanfba9fe92006-06-12 22:21:25 -07002686static int
Michael Chanb6016b72005-05-26 13:03:09 -07002687bnx2_init_cpus(struct bnx2 *bp)
2688{
2689 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002690 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002691 int rc = 0;
2692 void *text;
2693 u32 text_len;
2694
2695 if ((rc = bnx2_gunzip_init(bp)) != 0)
2696 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002697
2698 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002699 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2700 &text_len);
2701 if (rc)
2702 goto init_cpu_err;
2703
2704 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2705
2706 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2707 &text_len);
2708 if (rc)
2709 goto init_cpu_err;
2710
2711 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002712
2713 /* Initialize the RX Processor. */
2714 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2715 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2716 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2717 cpu_reg.state = BNX2_RXP_CPU_STATE;
2718 cpu_reg.state_value_clear = 0xffffff;
2719 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2720 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2721 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2722 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2723 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2724 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2725 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002726
Michael Chand43584c2006-11-19 14:14:35 -08002727 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2728 fw = &bnx2_rxp_fw_09;
2729 else
2730 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002731
Michael Chanaf3ee512006-11-19 14:09:25 -08002732 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002733 if (rc)
2734 goto init_cpu_err;
2735
Michael Chanb6016b72005-05-26 13:03:09 -07002736 /* Initialize the TX Processor. */
2737 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2738 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2739 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2740 cpu_reg.state = BNX2_TXP_CPU_STATE;
2741 cpu_reg.state_value_clear = 0xffffff;
2742 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2743 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2744 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2745 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2746 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2747 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2748 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002749
Michael Chand43584c2006-11-19 14:14:35 -08002750 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2751 fw = &bnx2_txp_fw_09;
2752 else
2753 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002754
Michael Chanaf3ee512006-11-19 14:09:25 -08002755 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002756 if (rc)
2757 goto init_cpu_err;
2758
Michael Chanb6016b72005-05-26 13:03:09 -07002759 /* Initialize the TX Patch-up Processor. */
2760 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2761 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2762 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2763 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2764 cpu_reg.state_value_clear = 0xffffff;
2765 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2766 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2767 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2768 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2769 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2770 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2771 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002772
Michael Chand43584c2006-11-19 14:14:35 -08002773 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2774 fw = &bnx2_tpat_fw_09;
2775 else
2776 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002777
Michael Chanaf3ee512006-11-19 14:09:25 -08002778 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002779 if (rc)
2780 goto init_cpu_err;
2781
Michael Chanb6016b72005-05-26 13:03:09 -07002782 /* Initialize the Completion Processor. */
2783 cpu_reg.mode = BNX2_COM_CPU_MODE;
2784 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2785 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2786 cpu_reg.state = BNX2_COM_CPU_STATE;
2787 cpu_reg.state_value_clear = 0xffffff;
2788 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2789 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2790 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2791 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2792 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2793 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2794 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002795
Michael Chand43584c2006-11-19 14:14:35 -08002796 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2797 fw = &bnx2_com_fw_09;
2798 else
2799 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002800
Michael Chanaf3ee512006-11-19 14:09:25 -08002801 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002802 if (rc)
2803 goto init_cpu_err;
2804
Michael Chand43584c2006-11-19 14:14:35 -08002805 /* Initialize the Command Processor. */
2806 cpu_reg.mode = BNX2_CP_CPU_MODE;
2807 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2808 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2809 cpu_reg.state = BNX2_CP_CPU_STATE;
2810 cpu_reg.state_value_clear = 0xffffff;
2811 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2812 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2813 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2814 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2815 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2816 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2817 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002818
Michael Chand43584c2006-11-19 14:14:35 -08002819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2820 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002821
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002822 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002823 if (rc)
2824 goto init_cpu_err;
2825 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002826init_cpu_err:
2827 bnx2_gunzip_end(bp);
2828 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002829}
2830
2831static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002832bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002833{
2834 u16 pmcsr;
2835
2836 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2837
2838 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002839 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002840 u32 val;
2841
2842 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2843 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2844 PCI_PM_CTRL_PME_STATUS);
2845
2846 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2847 /* delay required during transition out of D3hot */
2848 msleep(20);
2849
2850 val = REG_RD(bp, BNX2_EMAC_MODE);
2851 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2852 val &= ~BNX2_EMAC_MODE_MPKT;
2853 REG_WR(bp, BNX2_EMAC_MODE, val);
2854
2855 val = REG_RD(bp, BNX2_RPM_CONFIG);
2856 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2857 REG_WR(bp, BNX2_RPM_CONFIG, val);
2858 break;
2859 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002860 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002861 int i;
2862 u32 val, wol_msg;
2863
2864 if (bp->wol) {
2865 u32 advertising;
2866 u8 autoneg;
2867
2868 autoneg = bp->autoneg;
2869 advertising = bp->advertising;
2870
2871 bp->autoneg = AUTONEG_SPEED;
2872 bp->advertising = ADVERTISED_10baseT_Half |
2873 ADVERTISED_10baseT_Full |
2874 ADVERTISED_100baseT_Half |
2875 ADVERTISED_100baseT_Full |
2876 ADVERTISED_Autoneg;
2877
2878 bnx2_setup_copper_phy(bp);
2879
2880 bp->autoneg = autoneg;
2881 bp->advertising = advertising;
2882
2883 bnx2_set_mac_addr(bp);
2884
2885 val = REG_RD(bp, BNX2_EMAC_MODE);
2886
2887 /* Enable port mode. */
2888 val &= ~BNX2_EMAC_MODE_PORT;
2889 val |= BNX2_EMAC_MODE_PORT_MII |
2890 BNX2_EMAC_MODE_MPKT_RCVD |
2891 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002892 BNX2_EMAC_MODE_MPKT;
2893
2894 REG_WR(bp, BNX2_EMAC_MODE, val);
2895
2896 /* receive all multicast */
2897 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2898 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2899 0xffffffff);
2900 }
2901 REG_WR(bp, BNX2_EMAC_RX_MODE,
2902 BNX2_EMAC_RX_MODE_SORT_MODE);
2903
2904 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2905 BNX2_RPM_SORT_USER0_MC_EN;
2906 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2907 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2908 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2909 BNX2_RPM_SORT_USER0_ENA);
2910
2911 /* Need to enable EMAC and RPM for WOL. */
2912 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2913 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2914 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2915 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2916
2917 val = REG_RD(bp, BNX2_RPM_CONFIG);
2918 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2919 REG_WR(bp, BNX2_RPM_CONFIG, val);
2920
2921 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2922 }
2923 else {
2924 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2925 }
2926
Michael Chandda1e392006-01-23 16:08:14 -08002927 if (!(bp->flags & NO_WOL_FLAG))
2928 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002929
2930 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2931 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2932 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2933
2934 if (bp->wol)
2935 pmcsr |= 3;
2936 }
2937 else {
2938 pmcsr |= 3;
2939 }
2940 if (bp->wol) {
2941 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2942 }
2943 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2944 pmcsr);
2945
2946 /* No more memory access after this point until
2947 * device is brought back to D0.
2948 */
2949 udelay(50);
2950 break;
2951 }
2952 default:
2953 return -EINVAL;
2954 }
2955 return 0;
2956}
2957
2958static int
2959bnx2_acquire_nvram_lock(struct bnx2 *bp)
2960{
2961 u32 val;
2962 int j;
2963
2964 /* Request access to the flash interface. */
2965 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2966 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2967 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2968 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2969 break;
2970
2971 udelay(5);
2972 }
2973
2974 if (j >= NVRAM_TIMEOUT_COUNT)
2975 return -EBUSY;
2976
2977 return 0;
2978}
2979
2980static int
2981bnx2_release_nvram_lock(struct bnx2 *bp)
2982{
2983 int j;
2984 u32 val;
2985
2986 /* Relinquish nvram interface. */
2987 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2988
2989 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2990 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2991 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2992 break;
2993
2994 udelay(5);
2995 }
2996
2997 if (j >= NVRAM_TIMEOUT_COUNT)
2998 return -EBUSY;
2999
3000 return 0;
3001}
3002
3003
3004static int
3005bnx2_enable_nvram_write(struct bnx2 *bp)
3006{
3007 u32 val;
3008
3009 val = REG_RD(bp, BNX2_MISC_CFG);
3010 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3011
3012 if (!bp->flash_info->buffered) {
3013 int j;
3014
3015 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3016 REG_WR(bp, BNX2_NVM_COMMAND,
3017 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3018
3019 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3020 udelay(5);
3021
3022 val = REG_RD(bp, BNX2_NVM_COMMAND);
3023 if (val & BNX2_NVM_COMMAND_DONE)
3024 break;
3025 }
3026
3027 if (j >= NVRAM_TIMEOUT_COUNT)
3028 return -EBUSY;
3029 }
3030 return 0;
3031}
3032
3033static void
3034bnx2_disable_nvram_write(struct bnx2 *bp)
3035{
3036 u32 val;
3037
3038 val = REG_RD(bp, BNX2_MISC_CFG);
3039 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3040}
3041
3042
3043static void
3044bnx2_enable_nvram_access(struct bnx2 *bp)
3045{
3046 u32 val;
3047
3048 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3049 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003050 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003051 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3052}
3053
3054static void
3055bnx2_disable_nvram_access(struct bnx2 *bp)
3056{
3057 u32 val;
3058
3059 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3060 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003061 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003062 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3063 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3064}
3065
3066static int
3067bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3068{
3069 u32 cmd;
3070 int j;
3071
3072 if (bp->flash_info->buffered)
3073 /* Buffered flash, no erase needed */
3074 return 0;
3075
3076 /* Build an erase command */
3077 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3078 BNX2_NVM_COMMAND_DOIT;
3079
3080 /* Need to clear DONE bit separately. */
3081 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3082
3083 /* Address of the NVRAM to read from. */
3084 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3085
3086 /* Issue an erase command. */
3087 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3088
3089 /* Wait for completion. */
3090 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3091 u32 val;
3092
3093 udelay(5);
3094
3095 val = REG_RD(bp, BNX2_NVM_COMMAND);
3096 if (val & BNX2_NVM_COMMAND_DONE)
3097 break;
3098 }
3099
3100 if (j >= NVRAM_TIMEOUT_COUNT)
3101 return -EBUSY;
3102
3103 return 0;
3104}
3105
3106static int
3107bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3108{
3109 u32 cmd;
3110 int j;
3111
3112 /* Build the command word. */
3113 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3114
3115 /* Calculate an offset of a buffered flash. */
3116 if (bp->flash_info->buffered) {
3117 offset = ((offset / bp->flash_info->page_size) <<
3118 bp->flash_info->page_bits) +
3119 (offset % bp->flash_info->page_size);
3120 }
3121
3122 /* Need to clear DONE bit separately. */
3123 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3124
3125 /* Address of the NVRAM to read from. */
3126 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3127
3128 /* Issue a read command. */
3129 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3130
3131 /* Wait for completion. */
3132 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3133 u32 val;
3134
3135 udelay(5);
3136
3137 val = REG_RD(bp, BNX2_NVM_COMMAND);
3138 if (val & BNX2_NVM_COMMAND_DONE) {
3139 val = REG_RD(bp, BNX2_NVM_READ);
3140
3141 val = be32_to_cpu(val);
3142 memcpy(ret_val, &val, 4);
3143 break;
3144 }
3145 }
3146 if (j >= NVRAM_TIMEOUT_COUNT)
3147 return -EBUSY;
3148
3149 return 0;
3150}
3151
3152
3153static int
3154bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3155{
3156 u32 cmd, val32;
3157 int j;
3158
3159 /* Build the command word. */
3160 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3161
3162 /* Calculate an offset of a buffered flash. */
3163 if (bp->flash_info->buffered) {
3164 offset = ((offset / bp->flash_info->page_size) <<
3165 bp->flash_info->page_bits) +
3166 (offset % bp->flash_info->page_size);
3167 }
3168
3169 /* Need to clear DONE bit separately. */
3170 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3171
3172 memcpy(&val32, val, 4);
3173 val32 = cpu_to_be32(val32);
3174
3175 /* Write the data. */
3176 REG_WR(bp, BNX2_NVM_WRITE, val32);
3177
3178 /* Address of the NVRAM to write to. */
3179 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3180
3181 /* Issue the write command. */
3182 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3183
3184 /* Wait for completion. */
3185 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3186 udelay(5);
3187
3188 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3189 break;
3190 }
3191 if (j >= NVRAM_TIMEOUT_COUNT)
3192 return -EBUSY;
3193
3194 return 0;
3195}
3196
3197static int
3198bnx2_init_nvram(struct bnx2 *bp)
3199{
3200 u32 val;
3201 int j, entry_count, rc;
3202 struct flash_spec *flash;
3203
3204 /* Determine the selected interface. */
3205 val = REG_RD(bp, BNX2_NVM_CFG1);
3206
3207 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3208
3209 rc = 0;
3210 if (val & 0x40000000) {
3211
3212 /* Flash interface has been reconfigured */
3213 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003214 j++, flash++) {
3215 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3216 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003217 bp->flash_info = flash;
3218 break;
3219 }
3220 }
3221 }
3222 else {
Michael Chan37137702005-11-04 08:49:17 -08003223 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003224 /* Not yet been reconfigured */
3225
Michael Chan37137702005-11-04 08:49:17 -08003226 if (val & (1 << 23))
3227 mask = FLASH_BACKUP_STRAP_MASK;
3228 else
3229 mask = FLASH_STRAP_MASK;
3230
Michael Chanb6016b72005-05-26 13:03:09 -07003231 for (j = 0, flash = &flash_table[0]; j < entry_count;
3232 j++, flash++) {
3233
Michael Chan37137702005-11-04 08:49:17 -08003234 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003235 bp->flash_info = flash;
3236
3237 /* Request access to the flash interface. */
3238 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3239 return rc;
3240
3241 /* Enable access to flash interface */
3242 bnx2_enable_nvram_access(bp);
3243
3244 /* Reconfigure the flash interface */
3245 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3246 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3247 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3248 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3249
3250 /* Disable access to flash interface */
3251 bnx2_disable_nvram_access(bp);
3252 bnx2_release_nvram_lock(bp);
3253
3254 break;
3255 }
3256 }
3257 } /* if (val & 0x40000000) */
3258
3259 if (j == entry_count) {
3260 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003261 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003262 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003263 }
3264
Michael Chan1122db72006-01-23 16:11:42 -08003265 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3266 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3267 if (val)
3268 bp->flash_size = val;
3269 else
3270 bp->flash_size = bp->flash_info->total_size;
3271
Michael Chanb6016b72005-05-26 13:03:09 -07003272 return rc;
3273}
3274
3275static int
3276bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3277 int buf_size)
3278{
3279 int rc = 0;
3280 u32 cmd_flags, offset32, len32, extra;
3281
3282 if (buf_size == 0)
3283 return 0;
3284
3285 /* Request access to the flash interface. */
3286 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3287 return rc;
3288
3289 /* Enable access to flash interface */
3290 bnx2_enable_nvram_access(bp);
3291
3292 len32 = buf_size;
3293 offset32 = offset;
3294 extra = 0;
3295
3296 cmd_flags = 0;
3297
3298 if (offset32 & 3) {
3299 u8 buf[4];
3300 u32 pre_len;
3301
3302 offset32 &= ~3;
3303 pre_len = 4 - (offset & 3);
3304
3305 if (pre_len >= len32) {
3306 pre_len = len32;
3307 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3308 BNX2_NVM_COMMAND_LAST;
3309 }
3310 else {
3311 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3312 }
3313
3314 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3315
3316 if (rc)
3317 return rc;
3318
3319 memcpy(ret_buf, buf + (offset & 3), pre_len);
3320
3321 offset32 += 4;
3322 ret_buf += pre_len;
3323 len32 -= pre_len;
3324 }
3325 if (len32 & 3) {
3326 extra = 4 - (len32 & 3);
3327 len32 = (len32 + 4) & ~3;
3328 }
3329
3330 if (len32 == 4) {
3331 u8 buf[4];
3332
3333 if (cmd_flags)
3334 cmd_flags = BNX2_NVM_COMMAND_LAST;
3335 else
3336 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3337 BNX2_NVM_COMMAND_LAST;
3338
3339 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3340
3341 memcpy(ret_buf, buf, 4 - extra);
3342 }
3343 else if (len32 > 0) {
3344 u8 buf[4];
3345
3346 /* Read the first word. */
3347 if (cmd_flags)
3348 cmd_flags = 0;
3349 else
3350 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3351
3352 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3353
3354 /* Advance to the next dword. */
3355 offset32 += 4;
3356 ret_buf += 4;
3357 len32 -= 4;
3358
3359 while (len32 > 4 && rc == 0) {
3360 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3361
3362 /* Advance to the next dword. */
3363 offset32 += 4;
3364 ret_buf += 4;
3365 len32 -= 4;
3366 }
3367
3368 if (rc)
3369 return rc;
3370
3371 cmd_flags = BNX2_NVM_COMMAND_LAST;
3372 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3373
3374 memcpy(ret_buf, buf, 4 - extra);
3375 }
3376
3377 /* Disable access to flash interface */
3378 bnx2_disable_nvram_access(bp);
3379
3380 bnx2_release_nvram_lock(bp);
3381
3382 return rc;
3383}
3384
3385static int
3386bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3387 int buf_size)
3388{
3389 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003390 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003391 int rc = 0;
3392 int align_start, align_end;
3393
3394 buf = data_buf;
3395 offset32 = offset;
3396 len32 = buf_size;
3397 align_start = align_end = 0;
3398
3399 if ((align_start = (offset32 & 3))) {
3400 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003401 len32 += align_start;
3402 if (len32 < 4)
3403 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003404 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3405 return rc;
3406 }
3407
3408 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003409 align_end = 4 - (len32 & 3);
3410 len32 += align_end;
3411 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3412 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003413 }
3414
3415 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003416 align_buf = kmalloc(len32, GFP_KERNEL);
3417 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003418 return -ENOMEM;
3419 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003420 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003421 }
3422 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003423 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003424 }
Michael Chane6be7632007-01-08 19:56:13 -08003425 memcpy(align_buf + align_start, data_buf, buf_size);
3426 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003427 }
3428
Michael Chanae181bc2006-05-22 16:39:20 -07003429 if (bp->flash_info->buffered == 0) {
3430 flash_buffer = kmalloc(264, GFP_KERNEL);
3431 if (flash_buffer == NULL) {
3432 rc = -ENOMEM;
3433 goto nvram_write_end;
3434 }
3435 }
3436
Michael Chanb6016b72005-05-26 13:03:09 -07003437 written = 0;
3438 while ((written < len32) && (rc == 0)) {
3439 u32 page_start, page_end, data_start, data_end;
3440 u32 addr, cmd_flags;
3441 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003442
3443 /* Find the page_start addr */
3444 page_start = offset32 + written;
3445 page_start -= (page_start % bp->flash_info->page_size);
3446 /* Find the page_end addr */
3447 page_end = page_start + bp->flash_info->page_size;
3448 /* Find the data_start addr */
3449 data_start = (written == 0) ? offset32 : page_start;
3450 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003451 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003452 (offset32 + len32) : page_end;
3453
3454 /* Request access to the flash interface. */
3455 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3456 goto nvram_write_end;
3457
3458 /* Enable access to flash interface */
3459 bnx2_enable_nvram_access(bp);
3460
3461 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3462 if (bp->flash_info->buffered == 0) {
3463 int j;
3464
3465 /* Read the whole page into the buffer
3466 * (non-buffer flash only) */
3467 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3468 if (j == (bp->flash_info->page_size - 4)) {
3469 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3470 }
3471 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003472 page_start + j,
3473 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003474 cmd_flags);
3475
3476 if (rc)
3477 goto nvram_write_end;
3478
3479 cmd_flags = 0;
3480 }
3481 }
3482
3483 /* Enable writes to flash interface (unlock write-protect) */
3484 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3485 goto nvram_write_end;
3486
Michael Chanb6016b72005-05-26 13:03:09 -07003487 /* Loop to write back the buffer data from page_start to
3488 * data_start */
3489 i = 0;
3490 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003491 /* Erase the page */
3492 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3493 goto nvram_write_end;
3494
3495 /* Re-enable the write again for the actual write */
3496 bnx2_enable_nvram_write(bp);
3497
Michael Chanb6016b72005-05-26 13:03:09 -07003498 for (addr = page_start; addr < data_start;
3499 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003500
Michael Chanb6016b72005-05-26 13:03:09 -07003501 rc = bnx2_nvram_write_dword(bp, addr,
3502 &flash_buffer[i], cmd_flags);
3503
3504 if (rc != 0)
3505 goto nvram_write_end;
3506
3507 cmd_flags = 0;
3508 }
3509 }
3510
3511 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003512 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003513 if ((addr == page_end - 4) ||
3514 ((bp->flash_info->buffered) &&
3515 (addr == data_end - 4))) {
3516
3517 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3518 }
3519 rc = bnx2_nvram_write_dword(bp, addr, buf,
3520 cmd_flags);
3521
3522 if (rc != 0)
3523 goto nvram_write_end;
3524
3525 cmd_flags = 0;
3526 buf += 4;
3527 }
3528
3529 /* Loop to write back the buffer data from data_end
3530 * to page_end */
3531 if (bp->flash_info->buffered == 0) {
3532 for (addr = data_end; addr < page_end;
3533 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003534
Michael Chanb6016b72005-05-26 13:03:09 -07003535 if (addr == page_end-4) {
3536 cmd_flags = BNX2_NVM_COMMAND_LAST;
3537 }
3538 rc = bnx2_nvram_write_dword(bp, addr,
3539 &flash_buffer[i], cmd_flags);
3540
3541 if (rc != 0)
3542 goto nvram_write_end;
3543
3544 cmd_flags = 0;
3545 }
3546 }
3547
3548 /* Disable writes to flash interface (lock write-protect) */
3549 bnx2_disable_nvram_write(bp);
3550
3551 /* Disable access to flash interface */
3552 bnx2_disable_nvram_access(bp);
3553 bnx2_release_nvram_lock(bp);
3554
3555 /* Increment written */
3556 written += data_end - data_start;
3557 }
3558
3559nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003560 kfree(flash_buffer);
3561 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003562 return rc;
3563}
3564
3565static int
3566bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3567{
3568 u32 val;
3569 int i, rc = 0;
3570
3571 /* Wait for the current PCI transaction to complete before
3572 * issuing a reset. */
3573 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3574 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3575 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3576 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3577 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3578 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3579 udelay(5);
3580
Michael Chanb090ae22006-01-23 16:07:10 -08003581 /* Wait for the firmware to tell us it is ok to issue a reset. */
3582 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3583
Michael Chanb6016b72005-05-26 13:03:09 -07003584 /* Deposit a driver reset signature so the firmware knows that
3585 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003586 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003587 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3588
Michael Chanb6016b72005-05-26 13:03:09 -07003589 /* Do a dummy read to force the chip to complete all current transaction
3590 * before we issue a reset. */
3591 val = REG_RD(bp, BNX2_MISC_ID);
3592
Michael Chan234754d2006-11-19 14:11:41 -08003593 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3594 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3595 REG_RD(bp, BNX2_MISC_COMMAND);
3596 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003597
Michael Chan234754d2006-11-19 14:11:41 -08003598 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3599 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003600
Michael Chan234754d2006-11-19 14:11:41 -08003601 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003602
Michael Chan234754d2006-11-19 14:11:41 -08003603 } else {
3604 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3605 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3606 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3607
3608 /* Chip reset. */
3609 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3610
3611 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3612 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3613 current->state = TASK_UNINTERRUPTIBLE;
3614 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003615 }
Michael Chanb6016b72005-05-26 13:03:09 -07003616
Michael Chan234754d2006-11-19 14:11:41 -08003617 /* Reset takes approximate 30 usec */
3618 for (i = 0; i < 10; i++) {
3619 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3620 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3621 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3622 break;
3623 udelay(10);
3624 }
3625
3626 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3627 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3628 printk(KERN_ERR PFX "Chip reset did not complete\n");
3629 return -EBUSY;
3630 }
Michael Chanb6016b72005-05-26 13:03:09 -07003631 }
3632
3633 /* Make sure byte swapping is properly configured. */
3634 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3635 if (val != 0x01020304) {
3636 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3637 return -ENODEV;
3638 }
3639
Michael Chanb6016b72005-05-26 13:03:09 -07003640 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003641 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3642 if (rc)
3643 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003644
3645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3646 /* Adjust the voltage regular to two steps lower. The default
3647 * of this register is 0x0000000e. */
3648 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3649
3650 /* Remove bad rbuf memory from the free pool. */
3651 rc = bnx2_alloc_bad_rbuf(bp);
3652 }
3653
3654 return rc;
3655}
3656
3657static int
3658bnx2_init_chip(struct bnx2 *bp)
3659{
3660 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003661 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003662
3663 /* Make sure the interrupt is not active. */
3664 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3665
3666 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3667 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3668#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003669 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003670#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003671 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003672 DMA_READ_CHANS << 12 |
3673 DMA_WRITE_CHANS << 16;
3674
3675 val |= (0x2 << 20) | (1 << 11);
3676
Michael Chandda1e392006-01-23 16:08:14 -08003677 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003678 val |= (1 << 23);
3679
3680 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3681 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3682 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3683
3684 REG_WR(bp, BNX2_DMA_CONFIG, val);
3685
3686 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3687 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3688 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3689 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3690 }
3691
3692 if (bp->flags & PCIX_FLAG) {
3693 u16 val16;
3694
3695 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3696 &val16);
3697 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3698 val16 & ~PCI_X_CMD_ERO);
3699 }
3700
3701 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3702 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3703 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3704 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3705
3706 /* Initialize context mapping and zero out the quick contexts. The
3707 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07003708 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3709 rc = bnx2_init_5709_context(bp);
3710 if (rc)
3711 return rc;
3712 } else
Michael Chan59b47d82006-11-19 14:10:45 -08003713 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003714
Michael Chanfba9fe92006-06-12 22:21:25 -07003715 if ((rc = bnx2_init_cpus(bp)) != 0)
3716 return rc;
3717
Michael Chanb6016b72005-05-26 13:03:09 -07003718 bnx2_init_nvram(bp);
3719
3720 bnx2_set_mac_addr(bp);
3721
3722 val = REG_RD(bp, BNX2_MQ_CONFIG);
3723 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3724 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003725 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3726 val |= BNX2_MQ_CONFIG_HALT_DIS;
3727
Michael Chanb6016b72005-05-26 13:03:09 -07003728 REG_WR(bp, BNX2_MQ_CONFIG, val);
3729
3730 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3731 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3732 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3733
3734 val = (BCM_PAGE_BITS - 8) << 24;
3735 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3736
3737 /* Configure page size. */
3738 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3739 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3740 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3741 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3742
3743 val = bp->mac_addr[0] +
3744 (bp->mac_addr[1] << 8) +
3745 (bp->mac_addr[2] << 16) +
3746 bp->mac_addr[3] +
3747 (bp->mac_addr[4] << 8) +
3748 (bp->mac_addr[5] << 16);
3749 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3750
3751 /* Program the MTU. Also include 4 bytes for CRC32. */
3752 val = bp->dev->mtu + ETH_HLEN + 4;
3753 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3754 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3755 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3756
3757 bp->last_status_idx = 0;
3758 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3759
3760 /* Set up how to generate a link change interrupt. */
3761 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3762
3763 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3764 (u64) bp->status_blk_mapping & 0xffffffff);
3765 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3766
3767 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3768 (u64) bp->stats_blk_mapping & 0xffffffff);
3769 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3770 (u64) bp->stats_blk_mapping >> 32);
3771
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003772 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003773 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3774
3775 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3776 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3777
3778 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3779 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3780
3781 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3782
3783 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3784
3785 REG_WR(bp, BNX2_HC_COM_TICKS,
3786 (bp->com_ticks_int << 16) | bp->com_ticks);
3787
3788 REG_WR(bp, BNX2_HC_CMD_TICKS,
3789 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3790
3791 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3792 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3793
3794 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07003795 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003796 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07003797 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3798 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003799 }
3800
Michael Chan8e6a72c2007-05-03 13:24:48 -07003801 if (bp->flags & ONE_SHOT_MSI_FLAG)
3802 val |= BNX2_HC_CONFIG_ONE_SHOT;
3803
3804 REG_WR(bp, BNX2_HC_CONFIG, val);
3805
Michael Chanb6016b72005-05-26 13:03:09 -07003806 /* Clear internal stats counters. */
3807 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3808
Michael Chanda3e4fb2007-05-03 13:24:23 -07003809 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07003810
Michael Chane29054f2006-01-23 16:06:06 -08003811 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3812 BNX2_PORT_FEATURE_ASF_ENABLED)
3813 bp->flags |= ASF_ENABLE_FLAG;
3814
Michael Chanb6016b72005-05-26 13:03:09 -07003815 /* Initialize the receive filter. */
3816 bnx2_set_rx_mode(bp->dev);
3817
Michael Chanb090ae22006-01-23 16:07:10 -08003818 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3819 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003820
3821 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3822 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3823
3824 udelay(20);
3825
Michael Chanbf5295b2006-03-23 01:11:56 -08003826 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3827
Michael Chanb090ae22006-01-23 16:07:10 -08003828 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003829}
3830
Michael Chan59b47d82006-11-19 14:10:45 -08003831static void
3832bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3833{
3834 u32 val, offset0, offset1, offset2, offset3;
3835
3836 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3837 offset0 = BNX2_L2CTX_TYPE_XI;
3838 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3839 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3840 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3841 } else {
3842 offset0 = BNX2_L2CTX_TYPE;
3843 offset1 = BNX2_L2CTX_CMD_TYPE;
3844 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3845 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3846 }
3847 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3848 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3849
3850 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3851 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3852
3853 val = (u64) bp->tx_desc_mapping >> 32;
3854 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3855
3856 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3857 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3858}
Michael Chanb6016b72005-05-26 13:03:09 -07003859
3860static void
3861bnx2_init_tx_ring(struct bnx2 *bp)
3862{
3863 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003864 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003865
Michael Chan2f8af122006-08-15 01:39:10 -07003866 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3867
Michael Chanb6016b72005-05-26 13:03:09 -07003868 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003869
Michael Chanb6016b72005-05-26 13:03:09 -07003870 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3871 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3872
3873 bp->tx_prod = 0;
3874 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003875 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003876 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003877
Michael Chan59b47d82006-11-19 14:10:45 -08003878 cid = TX_CID;
3879 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3880 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003881
Michael Chan59b47d82006-11-19 14:10:45 -08003882 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003883}
3884
3885static void
3886bnx2_init_rx_ring(struct bnx2 *bp)
3887{
3888 struct rx_bd *rxbd;
3889 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003890 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003891 u32 val;
3892
3893 /* 8 for CRC and VLAN */
3894 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003895 /* hw alignment */
3896 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003897
3898 ring_prod = prod = bp->rx_prod = 0;
3899 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003900 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003901 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003902
Michael Chan13daffa2006-03-20 17:49:20 -08003903 for (i = 0; i < bp->rx_max_ring; i++) {
3904 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003905
Michael Chan13daffa2006-03-20 17:49:20 -08003906 rxbd = &bp->rx_desc_ring[i][0];
3907 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3908 rxbd->rx_bd_len = bp->rx_buf_use_size;
3909 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3910 }
3911 if (i == (bp->rx_max_ring - 1))
3912 j = 0;
3913 else
3914 j = i + 1;
3915 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3916 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3917 0xffffffff;
3918 }
Michael Chanb6016b72005-05-26 13:03:09 -07003919
3920 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3921 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3922 val |= 0x02 << 8;
3923 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3924
Michael Chan13daffa2006-03-20 17:49:20 -08003925 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003926 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3927
Michael Chan13daffa2006-03-20 17:49:20 -08003928 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003929 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3930
Michael Chan236b6392006-03-20 17:49:02 -08003931 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003932 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3933 break;
3934 }
3935 prod = NEXT_RX_BD(prod);
3936 ring_prod = RX_RING_IDX(prod);
3937 }
3938 bp->rx_prod = prod;
3939
3940 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3941
3942 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3943}
3944
3945static void
Michael Chan13daffa2006-03-20 17:49:20 -08003946bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3947{
3948 u32 num_rings, max;
3949
3950 bp->rx_ring_size = size;
3951 num_rings = 1;
3952 while (size > MAX_RX_DESC_CNT) {
3953 size -= MAX_RX_DESC_CNT;
3954 num_rings++;
3955 }
3956 /* round to next power of 2 */
3957 max = MAX_RX_RINGS;
3958 while ((max & num_rings) == 0)
3959 max >>= 1;
3960
3961 if (num_rings != max)
3962 max <<= 1;
3963
3964 bp->rx_max_ring = max;
3965 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3966}
3967
3968static void
Michael Chanb6016b72005-05-26 13:03:09 -07003969bnx2_free_tx_skbs(struct bnx2 *bp)
3970{
3971 int i;
3972
3973 if (bp->tx_buf_ring == NULL)
3974 return;
3975
3976 for (i = 0; i < TX_DESC_CNT; ) {
3977 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3978 struct sk_buff *skb = tx_buf->skb;
3979 int j, last;
3980
3981 if (skb == NULL) {
3982 i++;
3983 continue;
3984 }
3985
3986 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3987 skb_headlen(skb), PCI_DMA_TODEVICE);
3988
3989 tx_buf->skb = NULL;
3990
3991 last = skb_shinfo(skb)->nr_frags;
3992 for (j = 0; j < last; j++) {
3993 tx_buf = &bp->tx_buf_ring[i + j + 1];
3994 pci_unmap_page(bp->pdev,
3995 pci_unmap_addr(tx_buf, mapping),
3996 skb_shinfo(skb)->frags[j].size,
3997 PCI_DMA_TODEVICE);
3998 }
Michael Chan745720e2006-06-29 12:37:41 -07003999 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004000 i += j + 1;
4001 }
4002
4003}
4004
4005static void
4006bnx2_free_rx_skbs(struct bnx2 *bp)
4007{
4008 int i;
4009
4010 if (bp->rx_buf_ring == NULL)
4011 return;
4012
Michael Chan13daffa2006-03-20 17:49:20 -08004013 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004014 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4015 struct sk_buff *skb = rx_buf->skb;
4016
Michael Chan05d0f1c2005-11-04 08:53:48 -08004017 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004018 continue;
4019
4020 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4021 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4022
4023 rx_buf->skb = NULL;
4024
Michael Chan745720e2006-06-29 12:37:41 -07004025 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004026 }
4027}
4028
4029static void
4030bnx2_free_skbs(struct bnx2 *bp)
4031{
4032 bnx2_free_tx_skbs(bp);
4033 bnx2_free_rx_skbs(bp);
4034}
4035
4036static int
4037bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4038{
4039 int rc;
4040
4041 rc = bnx2_reset_chip(bp, reset_code);
4042 bnx2_free_skbs(bp);
4043 if (rc)
4044 return rc;
4045
Michael Chanfba9fe92006-06-12 22:21:25 -07004046 if ((rc = bnx2_init_chip(bp)) != 0)
4047 return rc;
4048
Michael Chanb6016b72005-05-26 13:03:09 -07004049 bnx2_init_tx_ring(bp);
4050 bnx2_init_rx_ring(bp);
4051 return 0;
4052}
4053
4054static int
4055bnx2_init_nic(struct bnx2 *bp)
4056{
4057 int rc;
4058
4059 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4060 return rc;
4061
Michael Chan80be4432006-11-19 14:07:28 -08004062 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004063 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08004064 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004065 bnx2_set_link(bp);
4066 return 0;
4067}
4068
4069static int
4070bnx2_test_registers(struct bnx2 *bp)
4071{
4072 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004073 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004074 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004075 u16 offset;
4076 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004077#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004078 u32 rw_mask;
4079 u32 ro_mask;
4080 } reg_tbl[] = {
4081 { 0x006c, 0, 0x00000000, 0x0000003f },
4082 { 0x0090, 0, 0xffffffff, 0x00000000 },
4083 { 0x0094, 0, 0x00000000, 0x00000000 },
4084
Michael Chan5bae30c2007-05-03 13:18:46 -07004085 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4086 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4087 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4088 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4089 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4090 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4091 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4092 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4093 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004094
Michael Chan5bae30c2007-05-03 13:18:46 -07004095 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4096 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4097 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4098 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4099 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4100 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004101
Michael Chan5bae30c2007-05-03 13:18:46 -07004102 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4103 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4104 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004105
4106 { 0x1000, 0, 0x00000000, 0x00000001 },
4107 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004108
4109 { 0x1408, 0, 0x01c00800, 0x00000000 },
4110 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4111 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004112 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004113 { 0x14b0, 0, 0x00000002, 0x00000001 },
4114 { 0x14b8, 0, 0x00000000, 0x00000000 },
4115 { 0x14c0, 0, 0x00000000, 0x00000009 },
4116 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4117 { 0x14cc, 0, 0x00000000, 0x00000001 },
4118 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004119
4120 { 0x1800, 0, 0x00000000, 0x00000001 },
4121 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004122
4123 { 0x2800, 0, 0x00000000, 0x00000001 },
4124 { 0x2804, 0, 0x00000000, 0x00003f01 },
4125 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4126 { 0x2810, 0, 0xffff0000, 0x00000000 },
4127 { 0x2814, 0, 0xffff0000, 0x00000000 },
4128 { 0x2818, 0, 0xffff0000, 0x00000000 },
4129 { 0x281c, 0, 0xffff0000, 0x00000000 },
4130 { 0x2834, 0, 0xffffffff, 0x00000000 },
4131 { 0x2840, 0, 0x00000000, 0xffffffff },
4132 { 0x2844, 0, 0x00000000, 0xffffffff },
4133 { 0x2848, 0, 0xffffffff, 0x00000000 },
4134 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4135
4136 { 0x2c00, 0, 0x00000000, 0x00000011 },
4137 { 0x2c04, 0, 0x00000000, 0x00030007 },
4138
Michael Chanb6016b72005-05-26 13:03:09 -07004139 { 0x3c00, 0, 0x00000000, 0x00000001 },
4140 { 0x3c04, 0, 0x00000000, 0x00070000 },
4141 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4142 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4143 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4144 { 0x3c14, 0, 0x00000000, 0xffffffff },
4145 { 0x3c18, 0, 0x00000000, 0xffffffff },
4146 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4147 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004148
4149 { 0x5004, 0, 0x00000000, 0x0000007f },
4150 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004151
Michael Chanb6016b72005-05-26 13:03:09 -07004152 { 0x5c00, 0, 0x00000000, 0x00000001 },
4153 { 0x5c04, 0, 0x00000000, 0x0003000f },
4154 { 0x5c08, 0, 0x00000003, 0x00000000 },
4155 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4156 { 0x5c10, 0, 0x00000000, 0xffffffff },
4157 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4158 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4159 { 0x5c88, 0, 0x00000000, 0x00077373 },
4160 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4161
4162 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4163 { 0x680c, 0, 0xffffffff, 0x00000000 },
4164 { 0x6810, 0, 0xffffffff, 0x00000000 },
4165 { 0x6814, 0, 0xffffffff, 0x00000000 },
4166 { 0x6818, 0, 0xffffffff, 0x00000000 },
4167 { 0x681c, 0, 0xffffffff, 0x00000000 },
4168 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4169 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4170 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4171 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4172 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4173 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4174 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4175 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4176 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4177 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4178 { 0x684c, 0, 0xffffffff, 0x00000000 },
4179 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4180 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4181 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4182 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4183 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4184 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4185
4186 { 0xffff, 0, 0x00000000, 0x00000000 },
4187 };
4188
4189 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004190 is_5709 = 0;
4191 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4192 is_5709 = 1;
4193
Michael Chanb6016b72005-05-26 13:03:09 -07004194 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4195 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004196 u16 flags = reg_tbl[i].flags;
4197
4198 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4199 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004200
4201 offset = (u32) reg_tbl[i].offset;
4202 rw_mask = reg_tbl[i].rw_mask;
4203 ro_mask = reg_tbl[i].ro_mask;
4204
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004205 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004206
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004207 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004208
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004209 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004210 if ((val & rw_mask) != 0) {
4211 goto reg_test_err;
4212 }
4213
4214 if ((val & ro_mask) != (save_val & ro_mask)) {
4215 goto reg_test_err;
4216 }
4217
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004218 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004219
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004220 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004221 if ((val & rw_mask) != rw_mask) {
4222 goto reg_test_err;
4223 }
4224
4225 if ((val & ro_mask) != (save_val & ro_mask)) {
4226 goto reg_test_err;
4227 }
4228
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004229 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004230 continue;
4231
4232reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004233 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004234 ret = -ENODEV;
4235 break;
4236 }
4237 return ret;
4238}
4239
4240static int
4241bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4242{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004243 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004244 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4245 int i;
4246
4247 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4248 u32 offset;
4249
4250 for (offset = 0; offset < size; offset += 4) {
4251
4252 REG_WR_IND(bp, start + offset, test_pattern[i]);
4253
4254 if (REG_RD_IND(bp, start + offset) !=
4255 test_pattern[i]) {
4256 return -ENODEV;
4257 }
4258 }
4259 }
4260 return 0;
4261}
4262
4263static int
4264bnx2_test_memory(struct bnx2 *bp)
4265{
4266 int ret = 0;
4267 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004268 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004269 u32 offset;
4270 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004271 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004272 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004273 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004274 { 0xe0000, 0x4000 },
4275 { 0x120000, 0x4000 },
4276 { 0x1a0000, 0x4000 },
4277 { 0x160000, 0x4000 },
4278 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004279 },
4280 mem_tbl_5709[] = {
4281 { 0x60000, 0x4000 },
4282 { 0xa0000, 0x3000 },
4283 { 0xe0000, 0x4000 },
4284 { 0x120000, 0x4000 },
4285 { 0x1a0000, 0x4000 },
4286 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004287 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004288 struct mem_entry *mem_tbl;
4289
4290 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4291 mem_tbl = mem_tbl_5709;
4292 else
4293 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004294
4295 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4296 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4297 mem_tbl[i].len)) != 0) {
4298 return ret;
4299 }
4300 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004301
Michael Chanb6016b72005-05-26 13:03:09 -07004302 return ret;
4303}
4304
Michael Chanbc5a0692006-01-23 16:13:22 -08004305#define BNX2_MAC_LOOPBACK 0
4306#define BNX2_PHY_LOOPBACK 1
4307
Michael Chanb6016b72005-05-26 13:03:09 -07004308static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004309bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004310{
4311 unsigned int pkt_size, num_pkts, i;
4312 struct sk_buff *skb, *rx_skb;
4313 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004314 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004315 dma_addr_t map;
4316 struct tx_bd *txbd;
4317 struct sw_bd *rx_buf;
4318 struct l2_fhdr *rx_hdr;
4319 int ret = -ENODEV;
4320
Michael Chanbc5a0692006-01-23 16:13:22 -08004321 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4322 bp->loopback = MAC_LOOPBACK;
4323 bnx2_set_mac_loopback(bp);
4324 }
4325 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004326 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004327 bnx2_set_phy_loopback(bp);
4328 }
4329 else
4330 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004331
4332 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004333 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004334 if (!skb)
4335 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004336 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004337 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004338 memset(packet + 6, 0x0, 8);
4339 for (i = 14; i < pkt_size; i++)
4340 packet[i] = (unsigned char) (i & 0xff);
4341
4342 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4343 PCI_DMA_TODEVICE);
4344
Michael Chanbf5295b2006-03-23 01:11:56 -08004345 REG_WR(bp, BNX2_HC_COMMAND,
4346 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4347
Michael Chanb6016b72005-05-26 13:03:09 -07004348 REG_RD(bp, BNX2_HC_COMMAND);
4349
4350 udelay(5);
4351 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4352
Michael Chanb6016b72005-05-26 13:03:09 -07004353 num_pkts = 0;
4354
Michael Chanbc5a0692006-01-23 16:13:22 -08004355 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004356
4357 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4358 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4359 txbd->tx_bd_mss_nbytes = pkt_size;
4360 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4361
4362 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004363 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4364 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004365
Michael Chan234754d2006-11-19 14:11:41 -08004366 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4367 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004368
4369 udelay(100);
4370
Michael Chanbf5295b2006-03-23 01:11:56 -08004371 REG_WR(bp, BNX2_HC_COMMAND,
4372 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4373
Michael Chanb6016b72005-05-26 13:03:09 -07004374 REG_RD(bp, BNX2_HC_COMMAND);
4375
4376 udelay(5);
4377
4378 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004379 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004380
Michael Chanbc5a0692006-01-23 16:13:22 -08004381 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004382 goto loopback_test_done;
4383 }
4384
4385 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4386 if (rx_idx != rx_start_idx + num_pkts) {
4387 goto loopback_test_done;
4388 }
4389
4390 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4391 rx_skb = rx_buf->skb;
4392
4393 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4394 skb_reserve(rx_skb, bp->rx_offset);
4395
4396 pci_dma_sync_single_for_cpu(bp->pdev,
4397 pci_unmap_addr(rx_buf, mapping),
4398 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4399
Michael Chanade2bfe2006-01-23 16:09:51 -08004400 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004401 (L2_FHDR_ERRORS_BAD_CRC |
4402 L2_FHDR_ERRORS_PHY_DECODE |
4403 L2_FHDR_ERRORS_ALIGNMENT |
4404 L2_FHDR_ERRORS_TOO_SHORT |
4405 L2_FHDR_ERRORS_GIANT_FRAME)) {
4406
4407 goto loopback_test_done;
4408 }
4409
4410 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4411 goto loopback_test_done;
4412 }
4413
4414 for (i = 14; i < pkt_size; i++) {
4415 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4416 goto loopback_test_done;
4417 }
4418 }
4419
4420 ret = 0;
4421
4422loopback_test_done:
4423 bp->loopback = 0;
4424 return ret;
4425}
4426
Michael Chanbc5a0692006-01-23 16:13:22 -08004427#define BNX2_MAC_LOOPBACK_FAILED 1
4428#define BNX2_PHY_LOOPBACK_FAILED 2
4429#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4430 BNX2_PHY_LOOPBACK_FAILED)
4431
4432static int
4433bnx2_test_loopback(struct bnx2 *bp)
4434{
4435 int rc = 0;
4436
4437 if (!netif_running(bp->dev))
4438 return BNX2_LOOPBACK_FAILED;
4439
4440 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4441 spin_lock_bh(&bp->phy_lock);
4442 bnx2_init_phy(bp);
4443 spin_unlock_bh(&bp->phy_lock);
4444 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4445 rc |= BNX2_MAC_LOOPBACK_FAILED;
4446 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4447 rc |= BNX2_PHY_LOOPBACK_FAILED;
4448 return rc;
4449}
4450
Michael Chanb6016b72005-05-26 13:03:09 -07004451#define NVRAM_SIZE 0x200
4452#define CRC32_RESIDUAL 0xdebb20e3
4453
4454static int
4455bnx2_test_nvram(struct bnx2 *bp)
4456{
4457 u32 buf[NVRAM_SIZE / 4];
4458 u8 *data = (u8 *) buf;
4459 int rc = 0;
4460 u32 magic, csum;
4461
4462 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4463 goto test_nvram_done;
4464
4465 magic = be32_to_cpu(buf[0]);
4466 if (magic != 0x669955aa) {
4467 rc = -ENODEV;
4468 goto test_nvram_done;
4469 }
4470
4471 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4472 goto test_nvram_done;
4473
4474 csum = ether_crc_le(0x100, data);
4475 if (csum != CRC32_RESIDUAL) {
4476 rc = -ENODEV;
4477 goto test_nvram_done;
4478 }
4479
4480 csum = ether_crc_le(0x100, data + 0x100);
4481 if (csum != CRC32_RESIDUAL) {
4482 rc = -ENODEV;
4483 }
4484
4485test_nvram_done:
4486 return rc;
4487}
4488
4489static int
4490bnx2_test_link(struct bnx2 *bp)
4491{
4492 u32 bmsr;
4493
Michael Chanc770a652005-08-25 15:38:39 -07004494 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004495 bnx2_enable_bmsr1(bp);
4496 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4497 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4498 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004499 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004500
Michael Chanb6016b72005-05-26 13:03:09 -07004501 if (bmsr & BMSR_LSTATUS) {
4502 return 0;
4503 }
4504 return -ENODEV;
4505}
4506
4507static int
4508bnx2_test_intr(struct bnx2 *bp)
4509{
4510 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004511 u16 status_idx;
4512
4513 if (!netif_running(bp->dev))
4514 return -ENODEV;
4515
4516 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4517
4518 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004519 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004520 REG_RD(bp, BNX2_HC_COMMAND);
4521
4522 for (i = 0; i < 10; i++) {
4523 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4524 status_idx) {
4525
4526 break;
4527 }
4528
4529 msleep_interruptible(10);
4530 }
4531 if (i < 10)
4532 return 0;
4533
4534 return -ENODEV;
4535}
4536
4537static void
Michael Chan48b01e22006-11-19 14:08:00 -08004538bnx2_5706_serdes_timer(struct bnx2 *bp)
4539{
4540 spin_lock(&bp->phy_lock);
4541 if (bp->serdes_an_pending)
4542 bp->serdes_an_pending--;
4543 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4544 u32 bmcr;
4545
4546 bp->current_interval = bp->timer_interval;
4547
Michael Chanca58c3a2007-05-03 13:22:52 -07004548 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004549
4550 if (bmcr & BMCR_ANENABLE) {
4551 u32 phy1, phy2;
4552
4553 bnx2_write_phy(bp, 0x1c, 0x7c00);
4554 bnx2_read_phy(bp, 0x1c, &phy1);
4555
4556 bnx2_write_phy(bp, 0x17, 0x0f01);
4557 bnx2_read_phy(bp, 0x15, &phy2);
4558 bnx2_write_phy(bp, 0x17, 0x0f01);
4559 bnx2_read_phy(bp, 0x15, &phy2);
4560
4561 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4562 !(phy2 & 0x20)) { /* no CONFIG */
4563
4564 bmcr &= ~BMCR_ANENABLE;
4565 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004566 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004567 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4568 }
4569 }
4570 }
4571 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4572 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4573 u32 phy2;
4574
4575 bnx2_write_phy(bp, 0x17, 0x0f01);
4576 bnx2_read_phy(bp, 0x15, &phy2);
4577 if (phy2 & 0x20) {
4578 u32 bmcr;
4579
Michael Chanca58c3a2007-05-03 13:22:52 -07004580 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004581 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004582 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004583
4584 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4585 }
4586 } else
4587 bp->current_interval = bp->timer_interval;
4588
4589 spin_unlock(&bp->phy_lock);
4590}
4591
4592static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004593bnx2_5708_serdes_timer(struct bnx2 *bp)
4594{
4595 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4596 bp->serdes_an_pending = 0;
4597 return;
4598 }
4599
4600 spin_lock(&bp->phy_lock);
4601 if (bp->serdes_an_pending)
4602 bp->serdes_an_pending--;
4603 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4604 u32 bmcr;
4605
Michael Chanca58c3a2007-05-03 13:22:52 -07004606 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004607 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004608 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004609 bp->current_interval = SERDES_FORCED_TIMEOUT;
4610 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004611 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004612 bp->serdes_an_pending = 2;
4613 bp->current_interval = bp->timer_interval;
4614 }
4615
4616 } else
4617 bp->current_interval = bp->timer_interval;
4618
4619 spin_unlock(&bp->phy_lock);
4620}
4621
4622static void
Michael Chanb6016b72005-05-26 13:03:09 -07004623bnx2_timer(unsigned long data)
4624{
4625 struct bnx2 *bp = (struct bnx2 *) data;
4626 u32 msg;
4627
Michael Chancd339a02005-08-25 15:35:24 -07004628 if (!netif_running(bp->dev))
4629 return;
4630
Michael Chanb6016b72005-05-26 13:03:09 -07004631 if (atomic_read(&bp->intr_sem) != 0)
4632 goto bnx2_restart_timer;
4633
4634 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004635 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004636
Michael Chancea94db2006-06-12 22:16:13 -07004637 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4638
Michael Chanf8dd0642006-11-19 14:08:29 -08004639 if (bp->phy_flags & PHY_SERDES_FLAG) {
4640 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4641 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004642 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004643 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004644 }
4645
4646bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004647 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004648}
4649
Michael Chan8e6a72c2007-05-03 13:24:48 -07004650static int
4651bnx2_request_irq(struct bnx2 *bp)
4652{
4653 struct net_device *dev = bp->dev;
4654 int rc = 0;
4655
4656 if (bp->flags & USING_MSI_FLAG) {
4657 irq_handler_t fn = bnx2_msi;
4658
4659 if (bp->flags & ONE_SHOT_MSI_FLAG)
4660 fn = bnx2_msi_1shot;
4661
4662 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4663 } else
4664 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4665 IRQF_SHARED, dev->name, dev);
4666 return rc;
4667}
4668
4669static void
4670bnx2_free_irq(struct bnx2 *bp)
4671{
4672 struct net_device *dev = bp->dev;
4673
4674 if (bp->flags & USING_MSI_FLAG) {
4675 free_irq(bp->pdev->irq, dev);
4676 pci_disable_msi(bp->pdev);
4677 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4678 } else
4679 free_irq(bp->pdev->irq, dev);
4680}
4681
Michael Chanb6016b72005-05-26 13:03:09 -07004682/* Called with rtnl_lock */
4683static int
4684bnx2_open(struct net_device *dev)
4685{
Michael Chan972ec0d2006-01-23 16:12:43 -08004686 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004687 int rc;
4688
Michael Chan1b2f9222007-05-03 13:20:19 -07004689 netif_carrier_off(dev);
4690
Pavel Machek829ca9a2005-09-03 15:56:56 -07004691 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004692 bnx2_disable_int(bp);
4693
4694 rc = bnx2_alloc_mem(bp);
4695 if (rc)
4696 return rc;
4697
Michael Chan8e6a72c2007-05-03 13:24:48 -07004698 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07004699 if (pci_enable_msi(bp->pdev) == 0) {
4700 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07004701 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4702 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07004703 }
4704 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07004705 rc = bnx2_request_irq(bp);
4706
Michael Chanb6016b72005-05-26 13:03:09 -07004707 if (rc) {
4708 bnx2_free_mem(bp);
4709 return rc;
4710 }
4711
4712 rc = bnx2_init_nic(bp);
4713
4714 if (rc) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004715 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004716 bnx2_free_skbs(bp);
4717 bnx2_free_mem(bp);
4718 return rc;
4719 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004720
Michael Chancd339a02005-08-25 15:35:24 -07004721 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004722
4723 atomic_set(&bp->intr_sem, 0);
4724
4725 bnx2_enable_int(bp);
4726
4727 if (bp->flags & USING_MSI_FLAG) {
4728 /* Test MSI to make sure it is working
4729 * If MSI test fails, go back to INTx mode
4730 */
4731 if (bnx2_test_intr(bp) != 0) {
4732 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4733 " using MSI, switching to INTx mode. Please"
4734 " report this failure to the PCI maintainer"
4735 " and include system chipset information.\n",
4736 bp->dev->name);
4737
4738 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07004739 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004740
4741 rc = bnx2_init_nic(bp);
4742
Michael Chan8e6a72c2007-05-03 13:24:48 -07004743 if (!rc)
4744 rc = bnx2_request_irq(bp);
4745
Michael Chanb6016b72005-05-26 13:03:09 -07004746 if (rc) {
4747 bnx2_free_skbs(bp);
4748 bnx2_free_mem(bp);
4749 del_timer_sync(&bp->timer);
4750 return rc;
4751 }
4752 bnx2_enable_int(bp);
4753 }
4754 }
4755 if (bp->flags & USING_MSI_FLAG) {
4756 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4757 }
4758
4759 netif_start_queue(dev);
4760
4761 return 0;
4762}
4763
4764static void
David Howellsc4028952006-11-22 14:57:56 +00004765bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004766{
David Howellsc4028952006-11-22 14:57:56 +00004767 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004768
Michael Chanafdc08b2005-08-25 15:34:29 -07004769 if (!netif_running(bp->dev))
4770 return;
4771
4772 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004773 bnx2_netif_stop(bp);
4774
4775 bnx2_init_nic(bp);
4776
4777 atomic_set(&bp->intr_sem, 1);
4778 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004779 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004780}
4781
4782static void
4783bnx2_tx_timeout(struct net_device *dev)
4784{
Michael Chan972ec0d2006-01-23 16:12:43 -08004785 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004786
4787 /* This allows the netif to be shutdown gracefully before resetting */
4788 schedule_work(&bp->reset_task);
4789}
4790
4791#ifdef BCM_VLAN
4792/* Called with rtnl_lock */
4793static void
4794bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4795{
Michael Chan972ec0d2006-01-23 16:12:43 -08004796 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004797
4798 bnx2_netif_stop(bp);
4799
4800 bp->vlgrp = vlgrp;
4801 bnx2_set_rx_mode(dev);
4802
4803 bnx2_netif_start(bp);
4804}
Michael Chanb6016b72005-05-26 13:03:09 -07004805#endif
4806
Herbert Xu932ff272006-06-09 12:20:56 -07004807/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004808 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4809 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004810 */
4811static int
4812bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4813{
Michael Chan972ec0d2006-01-23 16:12:43 -08004814 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004815 dma_addr_t mapping;
4816 struct tx_bd *txbd;
4817 struct sw_bd *tx_buf;
4818 u32 len, vlan_tag_flags, last_frag, mss;
4819 u16 prod, ring_prod;
4820 int i;
4821
Michael Chane89bbf12005-08-25 15:36:58 -07004822 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004823 netif_stop_queue(dev);
4824 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4825 dev->name);
4826
4827 return NETDEV_TX_BUSY;
4828 }
4829 len = skb_headlen(skb);
4830 prod = bp->tx_prod;
4831 ring_prod = TX_RING_IDX(prod);
4832
4833 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004834 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004835 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4836 }
4837
4838 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4839 vlan_tag_flags |=
4840 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4841 }
Michael Chanfde82052007-05-03 17:23:35 -07004842 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07004843 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004844 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004845
Michael Chanb6016b72005-05-26 13:03:09 -07004846 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4847
Michael Chan4666f872007-05-03 13:22:28 -07004848 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004849
Michael Chan4666f872007-05-03 13:22:28 -07004850 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4851 u32 tcp_off = skb_transport_offset(skb) -
4852 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004853
Michael Chan4666f872007-05-03 13:22:28 -07004854 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4855 TX_BD_FLAGS_SW_FLAGS;
4856 if (likely(tcp_off == 0))
4857 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4858 else {
4859 tcp_off >>= 3;
4860 vlan_tag_flags |= ((tcp_off & 0x3) <<
4861 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4862 ((tcp_off & 0x10) <<
4863 TX_BD_FLAGS_TCP6_OFF4_SHL);
4864 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4865 }
4866 } else {
4867 if (skb_header_cloned(skb) &&
4868 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4869 dev_kfree_skb(skb);
4870 return NETDEV_TX_OK;
4871 }
4872
4873 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4874
4875 iph = ip_hdr(skb);
4876 iph->check = 0;
4877 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4878 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4879 iph->daddr, 0,
4880 IPPROTO_TCP,
4881 0);
4882 if (tcp_opt_len || (iph->ihl > 5)) {
4883 vlan_tag_flags |= ((iph->ihl - 5) +
4884 (tcp_opt_len >> 2)) << 8;
4885 }
Michael Chanb6016b72005-05-26 13:03:09 -07004886 }
Michael Chan4666f872007-05-03 13:22:28 -07004887 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004888 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004889
4890 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004891
Michael Chanb6016b72005-05-26 13:03:09 -07004892 tx_buf = &bp->tx_buf_ring[ring_prod];
4893 tx_buf->skb = skb;
4894 pci_unmap_addr_set(tx_buf, mapping, mapping);
4895
4896 txbd = &bp->tx_desc_ring[ring_prod];
4897
4898 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4899 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4900 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4901 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4902
4903 last_frag = skb_shinfo(skb)->nr_frags;
4904
4905 for (i = 0; i < last_frag; i++) {
4906 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4907
4908 prod = NEXT_TX_BD(prod);
4909 ring_prod = TX_RING_IDX(prod);
4910 txbd = &bp->tx_desc_ring[ring_prod];
4911
4912 len = frag->size;
4913 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4914 len, PCI_DMA_TODEVICE);
4915 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4916 mapping, mapping);
4917
4918 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4919 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4920 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4921 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4922
4923 }
4924 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4925
4926 prod = NEXT_TX_BD(prod);
4927 bp->tx_prod_bseq += skb->len;
4928
Michael Chan234754d2006-11-19 14:11:41 -08004929 REG_WR16(bp, bp->tx_bidx_addr, prod);
4930 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004931
4932 mmiowb();
4933
4934 bp->tx_prod = prod;
4935 dev->trans_start = jiffies;
4936
Michael Chane89bbf12005-08-25 15:36:58 -07004937 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004938 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004939 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004940 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004941 }
4942
4943 return NETDEV_TX_OK;
4944}
4945
4946/* Called with rtnl_lock */
4947static int
4948bnx2_close(struct net_device *dev)
4949{
Michael Chan972ec0d2006-01-23 16:12:43 -08004950 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004951 u32 reset_code;
4952
Michael Chanafdc08b2005-08-25 15:34:29 -07004953 /* Calling flush_scheduled_work() may deadlock because
4954 * linkwatch_event() may be on the workqueue and it will try to get
4955 * the rtnl_lock which we are holding.
4956 */
4957 while (bp->in_reset_task)
4958 msleep(1);
4959
Michael Chanb6016b72005-05-26 13:03:09 -07004960 bnx2_netif_stop(bp);
4961 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004962 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004963 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004964 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004965 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4966 else
4967 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4968 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07004969 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004970 bnx2_free_skbs(bp);
4971 bnx2_free_mem(bp);
4972 bp->link_up = 0;
4973 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004974 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004975 return 0;
4976}
4977
4978#define GET_NET_STATS64(ctr) \
4979 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4980 (unsigned long) (ctr##_lo)
4981
4982#define GET_NET_STATS32(ctr) \
4983 (ctr##_lo)
4984
4985#if (BITS_PER_LONG == 64)
4986#define GET_NET_STATS GET_NET_STATS64
4987#else
4988#define GET_NET_STATS GET_NET_STATS32
4989#endif
4990
4991static struct net_device_stats *
4992bnx2_get_stats(struct net_device *dev)
4993{
Michael Chan972ec0d2006-01-23 16:12:43 -08004994 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004995 struct statistics_block *stats_blk = bp->stats_blk;
4996 struct net_device_stats *net_stats = &bp->net_stats;
4997
4998 if (bp->stats_blk == NULL) {
4999 return net_stats;
5000 }
5001 net_stats->rx_packets =
5002 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5003 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5004 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5005
5006 net_stats->tx_packets =
5007 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5008 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5009 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5010
5011 net_stats->rx_bytes =
5012 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5013
5014 net_stats->tx_bytes =
5015 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5016
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005017 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005018 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5019
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005020 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005021 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5022
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005023 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005024 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5025 stats_blk->stat_EtherStatsOverrsizePkts);
5026
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005027 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005028 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5029
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005030 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005031 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5032
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005033 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005034 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5035
5036 net_stats->rx_errors = net_stats->rx_length_errors +
5037 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5038 net_stats->rx_crc_errors;
5039
5040 net_stats->tx_aborted_errors =
5041 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5042 stats_blk->stat_Dot3StatsLateCollisions);
5043
Michael Chan5b0c76a2005-11-04 08:45:49 -08005044 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5045 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005046 net_stats->tx_carrier_errors = 0;
5047 else {
5048 net_stats->tx_carrier_errors =
5049 (unsigned long)
5050 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5051 }
5052
5053 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005054 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005055 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5056 +
5057 net_stats->tx_aborted_errors +
5058 net_stats->tx_carrier_errors;
5059
Michael Chancea94db2006-06-12 22:16:13 -07005060 net_stats->rx_missed_errors =
5061 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5062 stats_blk->stat_FwRxDrop);
5063
Michael Chanb6016b72005-05-26 13:03:09 -07005064 return net_stats;
5065}
5066
5067/* All ethtool functions called with rtnl_lock */
5068
5069static int
5070bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5071{
Michael Chan972ec0d2006-01-23 16:12:43 -08005072 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005073
5074 cmd->supported = SUPPORTED_Autoneg;
5075 if (bp->phy_flags & PHY_SERDES_FLAG) {
5076 cmd->supported |= SUPPORTED_1000baseT_Full |
5077 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005078 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5079 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005080
5081 cmd->port = PORT_FIBRE;
5082 }
5083 else {
5084 cmd->supported |= SUPPORTED_10baseT_Half |
5085 SUPPORTED_10baseT_Full |
5086 SUPPORTED_100baseT_Half |
5087 SUPPORTED_100baseT_Full |
5088 SUPPORTED_1000baseT_Full |
5089 SUPPORTED_TP;
5090
5091 cmd->port = PORT_TP;
5092 }
5093
5094 cmd->advertising = bp->advertising;
5095
5096 if (bp->autoneg & AUTONEG_SPEED) {
5097 cmd->autoneg = AUTONEG_ENABLE;
5098 }
5099 else {
5100 cmd->autoneg = AUTONEG_DISABLE;
5101 }
5102
5103 if (netif_carrier_ok(dev)) {
5104 cmd->speed = bp->line_speed;
5105 cmd->duplex = bp->duplex;
5106 }
5107 else {
5108 cmd->speed = -1;
5109 cmd->duplex = -1;
5110 }
5111
5112 cmd->transceiver = XCVR_INTERNAL;
5113 cmd->phy_address = bp->phy_addr;
5114
5115 return 0;
5116}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005117
Michael Chanb6016b72005-05-26 13:03:09 -07005118static int
5119bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5120{
Michael Chan972ec0d2006-01-23 16:12:43 -08005121 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005122 u8 autoneg = bp->autoneg;
5123 u8 req_duplex = bp->req_duplex;
5124 u16 req_line_speed = bp->req_line_speed;
5125 u32 advertising = bp->advertising;
5126
5127 if (cmd->autoneg == AUTONEG_ENABLE) {
5128 autoneg |= AUTONEG_SPEED;
5129
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005130 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005131
5132 /* allow advertising 1 speed */
5133 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5134 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5135 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5136 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5137
5138 if (bp->phy_flags & PHY_SERDES_FLAG)
5139 return -EINVAL;
5140
5141 advertising = cmd->advertising;
5142
Michael Chan27a005b2007-05-03 13:23:41 -07005143 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5144 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5145 return -EINVAL;
5146 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005147 advertising = cmd->advertising;
5148 }
5149 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5150 return -EINVAL;
5151 }
5152 else {
5153 if (bp->phy_flags & PHY_SERDES_FLAG) {
5154 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5155 }
5156 else {
5157 advertising = ETHTOOL_ALL_COPPER_SPEED;
5158 }
5159 }
5160 advertising |= ADVERTISED_Autoneg;
5161 }
5162 else {
5163 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005164 if ((cmd->speed != SPEED_1000 &&
5165 cmd->speed != SPEED_2500) ||
5166 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005167 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005168
5169 if (cmd->speed == SPEED_2500 &&
5170 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5171 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005172 }
5173 else if (cmd->speed == SPEED_1000) {
5174 return -EINVAL;
5175 }
5176 autoneg &= ~AUTONEG_SPEED;
5177 req_line_speed = cmd->speed;
5178 req_duplex = cmd->duplex;
5179 advertising = 0;
5180 }
5181
5182 bp->autoneg = autoneg;
5183 bp->advertising = advertising;
5184 bp->req_line_speed = req_line_speed;
5185 bp->req_duplex = req_duplex;
5186
Michael Chanc770a652005-08-25 15:38:39 -07005187 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005188
5189 bnx2_setup_phy(bp);
5190
Michael Chanc770a652005-08-25 15:38:39 -07005191 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005192
5193 return 0;
5194}
5195
5196static void
5197bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5198{
Michael Chan972ec0d2006-01-23 16:12:43 -08005199 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005200
5201 strcpy(info->driver, DRV_MODULE_NAME);
5202 strcpy(info->version, DRV_MODULE_VERSION);
5203 strcpy(info->bus_info, pci_name(bp->pdev));
5204 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5205 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5206 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005207 info->fw_version[1] = info->fw_version[3] = '.';
5208 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005209}
5210
Michael Chan244ac4f2006-03-20 17:48:46 -08005211#define BNX2_REGDUMP_LEN (32 * 1024)
5212
5213static int
5214bnx2_get_regs_len(struct net_device *dev)
5215{
5216 return BNX2_REGDUMP_LEN;
5217}
5218
5219static void
5220bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5221{
5222 u32 *p = _p, i, offset;
5223 u8 *orig_p = _p;
5224 struct bnx2 *bp = netdev_priv(dev);
5225 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5226 0x0800, 0x0880, 0x0c00, 0x0c10,
5227 0x0c30, 0x0d08, 0x1000, 0x101c,
5228 0x1040, 0x1048, 0x1080, 0x10a4,
5229 0x1400, 0x1490, 0x1498, 0x14f0,
5230 0x1500, 0x155c, 0x1580, 0x15dc,
5231 0x1600, 0x1658, 0x1680, 0x16d8,
5232 0x1800, 0x1820, 0x1840, 0x1854,
5233 0x1880, 0x1894, 0x1900, 0x1984,
5234 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5235 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5236 0x2000, 0x2030, 0x23c0, 0x2400,
5237 0x2800, 0x2820, 0x2830, 0x2850,
5238 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5239 0x3c00, 0x3c94, 0x4000, 0x4010,
5240 0x4080, 0x4090, 0x43c0, 0x4458,
5241 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5242 0x4fc0, 0x5010, 0x53c0, 0x5444,
5243 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5244 0x5fc0, 0x6000, 0x6400, 0x6428,
5245 0x6800, 0x6848, 0x684c, 0x6860,
5246 0x6888, 0x6910, 0x8000 };
5247
5248 regs->version = 0;
5249
5250 memset(p, 0, BNX2_REGDUMP_LEN);
5251
5252 if (!netif_running(bp->dev))
5253 return;
5254
5255 i = 0;
5256 offset = reg_boundaries[0];
5257 p += offset;
5258 while (offset < BNX2_REGDUMP_LEN) {
5259 *p++ = REG_RD(bp, offset);
5260 offset += 4;
5261 if (offset == reg_boundaries[i + 1]) {
5262 offset = reg_boundaries[i + 2];
5263 p = (u32 *) (orig_p + offset);
5264 i += 2;
5265 }
5266 }
5267}
5268
Michael Chanb6016b72005-05-26 13:03:09 -07005269static void
5270bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5271{
Michael Chan972ec0d2006-01-23 16:12:43 -08005272 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005273
5274 if (bp->flags & NO_WOL_FLAG) {
5275 wol->supported = 0;
5276 wol->wolopts = 0;
5277 }
5278 else {
5279 wol->supported = WAKE_MAGIC;
5280 if (bp->wol)
5281 wol->wolopts = WAKE_MAGIC;
5282 else
5283 wol->wolopts = 0;
5284 }
5285 memset(&wol->sopass, 0, sizeof(wol->sopass));
5286}
5287
5288static int
5289bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5290{
Michael Chan972ec0d2006-01-23 16:12:43 -08005291 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005292
5293 if (wol->wolopts & ~WAKE_MAGIC)
5294 return -EINVAL;
5295
5296 if (wol->wolopts & WAKE_MAGIC) {
5297 if (bp->flags & NO_WOL_FLAG)
5298 return -EINVAL;
5299
5300 bp->wol = 1;
5301 }
5302 else {
5303 bp->wol = 0;
5304 }
5305 return 0;
5306}
5307
5308static int
5309bnx2_nway_reset(struct net_device *dev)
5310{
Michael Chan972ec0d2006-01-23 16:12:43 -08005311 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005312 u32 bmcr;
5313
5314 if (!(bp->autoneg & AUTONEG_SPEED)) {
5315 return -EINVAL;
5316 }
5317
Michael Chanc770a652005-08-25 15:38:39 -07005318 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005319
5320 /* Force a link down visible on the other side */
5321 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005322 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005323 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005324
5325 msleep(20);
5326
Michael Chanc770a652005-08-25 15:38:39 -07005327 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005328
5329 bp->current_interval = SERDES_AN_TIMEOUT;
5330 bp->serdes_an_pending = 1;
5331 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005332 }
5333
Michael Chanca58c3a2007-05-03 13:22:52 -07005334 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005335 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005336 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005337
Michael Chanc770a652005-08-25 15:38:39 -07005338 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005339
5340 return 0;
5341}
5342
5343static int
5344bnx2_get_eeprom_len(struct net_device *dev)
5345{
Michael Chan972ec0d2006-01-23 16:12:43 -08005346 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005347
Michael Chan1122db72006-01-23 16:11:42 -08005348 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005349 return 0;
5350
Michael Chan1122db72006-01-23 16:11:42 -08005351 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005352}
5353
5354static int
5355bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5356 u8 *eebuf)
5357{
Michael Chan972ec0d2006-01-23 16:12:43 -08005358 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005359 int rc;
5360
John W. Linville1064e942005-11-10 12:58:24 -08005361 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005362
5363 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5364
5365 return rc;
5366}
5367
5368static int
5369bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5370 u8 *eebuf)
5371{
Michael Chan972ec0d2006-01-23 16:12:43 -08005372 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005373 int rc;
5374
John W. Linville1064e942005-11-10 12:58:24 -08005375 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005376
5377 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5378
5379 return rc;
5380}
5381
5382static int
5383bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5384{
Michael Chan972ec0d2006-01-23 16:12:43 -08005385 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005386
5387 memset(coal, 0, sizeof(struct ethtool_coalesce));
5388
5389 coal->rx_coalesce_usecs = bp->rx_ticks;
5390 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5391 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5392 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5393
5394 coal->tx_coalesce_usecs = bp->tx_ticks;
5395 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5396 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5397 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5398
5399 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5400
5401 return 0;
5402}
5403
5404static int
5405bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5406{
Michael Chan972ec0d2006-01-23 16:12:43 -08005407 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005408
5409 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5410 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5411
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005412 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005413 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5414
5415 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5416 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5417
5418 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5419 if (bp->rx_quick_cons_trip_int > 0xff)
5420 bp->rx_quick_cons_trip_int = 0xff;
5421
5422 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5423 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5424
5425 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5426 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5427
5428 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5429 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5430
5431 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5432 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5433 0xff;
5434
5435 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5436 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5437 bp->stats_ticks &= 0xffff00;
5438
5439 if (netif_running(bp->dev)) {
5440 bnx2_netif_stop(bp);
5441 bnx2_init_nic(bp);
5442 bnx2_netif_start(bp);
5443 }
5444
5445 return 0;
5446}
5447
5448static void
5449bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5450{
Michael Chan972ec0d2006-01-23 16:12:43 -08005451 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005452
Michael Chan13daffa2006-03-20 17:49:20 -08005453 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005454 ering->rx_mini_max_pending = 0;
5455 ering->rx_jumbo_max_pending = 0;
5456
5457 ering->rx_pending = bp->rx_ring_size;
5458 ering->rx_mini_pending = 0;
5459 ering->rx_jumbo_pending = 0;
5460
5461 ering->tx_max_pending = MAX_TX_DESC_CNT;
5462 ering->tx_pending = bp->tx_ring_size;
5463}
5464
5465static int
5466bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5467{
Michael Chan972ec0d2006-01-23 16:12:43 -08005468 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005469
Michael Chan13daffa2006-03-20 17:49:20 -08005470 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005471 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5472 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5473
5474 return -EINVAL;
5475 }
Michael Chan13daffa2006-03-20 17:49:20 -08005476 if (netif_running(bp->dev)) {
5477 bnx2_netif_stop(bp);
5478 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5479 bnx2_free_skbs(bp);
5480 bnx2_free_mem(bp);
5481 }
5482
5483 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005484 bp->tx_ring_size = ering->tx_pending;
5485
5486 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005487 int rc;
5488
5489 rc = bnx2_alloc_mem(bp);
5490 if (rc)
5491 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005492 bnx2_init_nic(bp);
5493 bnx2_netif_start(bp);
5494 }
5495
5496 return 0;
5497}
5498
5499static void
5500bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5501{
Michael Chan972ec0d2006-01-23 16:12:43 -08005502 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005503
5504 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5505 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5506 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5507}
5508
5509static int
5510bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5511{
Michael Chan972ec0d2006-01-23 16:12:43 -08005512 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005513
5514 bp->req_flow_ctrl = 0;
5515 if (epause->rx_pause)
5516 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5517 if (epause->tx_pause)
5518 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5519
5520 if (epause->autoneg) {
5521 bp->autoneg |= AUTONEG_FLOW_CTRL;
5522 }
5523 else {
5524 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5525 }
5526
Michael Chanc770a652005-08-25 15:38:39 -07005527 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005528
5529 bnx2_setup_phy(bp);
5530
Michael Chanc770a652005-08-25 15:38:39 -07005531 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005532
5533 return 0;
5534}
5535
5536static u32
5537bnx2_get_rx_csum(struct net_device *dev)
5538{
Michael Chan972ec0d2006-01-23 16:12:43 -08005539 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005540
5541 return bp->rx_csum;
5542}
5543
5544static int
5545bnx2_set_rx_csum(struct net_device *dev, u32 data)
5546{
Michael Chan972ec0d2006-01-23 16:12:43 -08005547 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005548
5549 bp->rx_csum = data;
5550 return 0;
5551}
5552
Michael Chanb11d6212006-06-29 12:31:21 -07005553static int
5554bnx2_set_tso(struct net_device *dev, u32 data)
5555{
Michael Chan4666f872007-05-03 13:22:28 -07005556 struct bnx2 *bp = netdev_priv(dev);
5557
5558 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005559 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005560 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5561 dev->features |= NETIF_F_TSO6;
5562 } else
5563 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5564 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005565 return 0;
5566}
5567
Michael Chancea94db2006-06-12 22:16:13 -07005568#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005569
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005570static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005571 char string[ETH_GSTRING_LEN];
5572} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5573 { "rx_bytes" },
5574 { "rx_error_bytes" },
5575 { "tx_bytes" },
5576 { "tx_error_bytes" },
5577 { "rx_ucast_packets" },
5578 { "rx_mcast_packets" },
5579 { "rx_bcast_packets" },
5580 { "tx_ucast_packets" },
5581 { "tx_mcast_packets" },
5582 { "tx_bcast_packets" },
5583 { "tx_mac_errors" },
5584 { "tx_carrier_errors" },
5585 { "rx_crc_errors" },
5586 { "rx_align_errors" },
5587 { "tx_single_collisions" },
5588 { "tx_multi_collisions" },
5589 { "tx_deferred" },
5590 { "tx_excess_collisions" },
5591 { "tx_late_collisions" },
5592 { "tx_total_collisions" },
5593 { "rx_fragments" },
5594 { "rx_jabbers" },
5595 { "rx_undersize_packets" },
5596 { "rx_oversize_packets" },
5597 { "rx_64_byte_packets" },
5598 { "rx_65_to_127_byte_packets" },
5599 { "rx_128_to_255_byte_packets" },
5600 { "rx_256_to_511_byte_packets" },
5601 { "rx_512_to_1023_byte_packets" },
5602 { "rx_1024_to_1522_byte_packets" },
5603 { "rx_1523_to_9022_byte_packets" },
5604 { "tx_64_byte_packets" },
5605 { "tx_65_to_127_byte_packets" },
5606 { "tx_128_to_255_byte_packets" },
5607 { "tx_256_to_511_byte_packets" },
5608 { "tx_512_to_1023_byte_packets" },
5609 { "tx_1024_to_1522_byte_packets" },
5610 { "tx_1523_to_9022_byte_packets" },
5611 { "rx_xon_frames" },
5612 { "rx_xoff_frames" },
5613 { "tx_xon_frames" },
5614 { "tx_xoff_frames" },
5615 { "rx_mac_ctrl_frames" },
5616 { "rx_filtered_packets" },
5617 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005618 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005619};
5620
5621#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5622
Arjan van de Venf71e1302006-03-03 21:33:57 -05005623static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005624 STATS_OFFSET32(stat_IfHCInOctets_hi),
5625 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5626 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5627 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5628 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5629 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5630 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5631 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5632 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5633 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5634 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005635 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5636 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5637 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5638 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5639 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5640 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5641 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5642 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5643 STATS_OFFSET32(stat_EtherStatsCollisions),
5644 STATS_OFFSET32(stat_EtherStatsFragments),
5645 STATS_OFFSET32(stat_EtherStatsJabbers),
5646 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5647 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5648 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5649 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5650 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5651 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5652 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5653 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5654 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5655 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5656 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5657 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5658 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5659 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5660 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5661 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5662 STATS_OFFSET32(stat_XonPauseFramesReceived),
5663 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5664 STATS_OFFSET32(stat_OutXonSent),
5665 STATS_OFFSET32(stat_OutXoffSent),
5666 STATS_OFFSET32(stat_MacControlFramesReceived),
5667 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5668 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005669 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005670};
5671
5672/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5673 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005674 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005675static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005676 8,0,8,8,8,8,8,8,8,8,
5677 4,0,4,4,4,4,4,4,4,4,
5678 4,4,4,4,4,4,4,4,4,4,
5679 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005680 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005681};
5682
Michael Chan5b0c76a2005-11-04 08:45:49 -08005683static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5684 8,0,8,8,8,8,8,8,8,8,
5685 4,4,4,4,4,4,4,4,4,4,
5686 4,4,4,4,4,4,4,4,4,4,
5687 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005688 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005689};
5690
Michael Chanb6016b72005-05-26 13:03:09 -07005691#define BNX2_NUM_TESTS 6
5692
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005693static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005694 char string[ETH_GSTRING_LEN];
5695} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5696 { "register_test (offline)" },
5697 { "memory_test (offline)" },
5698 { "loopback_test (offline)" },
5699 { "nvram_test (online)" },
5700 { "interrupt_test (online)" },
5701 { "link_test (online)" },
5702};
5703
5704static int
5705bnx2_self_test_count(struct net_device *dev)
5706{
5707 return BNX2_NUM_TESTS;
5708}
5709
5710static void
5711bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5712{
Michael Chan972ec0d2006-01-23 16:12:43 -08005713 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005714
5715 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5716 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005717 int i;
5718
Michael Chanb6016b72005-05-26 13:03:09 -07005719 bnx2_netif_stop(bp);
5720 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5721 bnx2_free_skbs(bp);
5722
5723 if (bnx2_test_registers(bp) != 0) {
5724 buf[0] = 1;
5725 etest->flags |= ETH_TEST_FL_FAILED;
5726 }
5727 if (bnx2_test_memory(bp) != 0) {
5728 buf[1] = 1;
5729 etest->flags |= ETH_TEST_FL_FAILED;
5730 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005731 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005732 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005733
5734 if (!netif_running(bp->dev)) {
5735 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5736 }
5737 else {
5738 bnx2_init_nic(bp);
5739 bnx2_netif_start(bp);
5740 }
5741
5742 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005743 for (i = 0; i < 7; i++) {
5744 if (bp->link_up)
5745 break;
5746 msleep_interruptible(1000);
5747 }
Michael Chanb6016b72005-05-26 13:03:09 -07005748 }
5749
5750 if (bnx2_test_nvram(bp) != 0) {
5751 buf[3] = 1;
5752 etest->flags |= ETH_TEST_FL_FAILED;
5753 }
5754 if (bnx2_test_intr(bp) != 0) {
5755 buf[4] = 1;
5756 etest->flags |= ETH_TEST_FL_FAILED;
5757 }
5758
5759 if (bnx2_test_link(bp) != 0) {
5760 buf[5] = 1;
5761 etest->flags |= ETH_TEST_FL_FAILED;
5762
5763 }
5764}
5765
5766static void
5767bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5768{
5769 switch (stringset) {
5770 case ETH_SS_STATS:
5771 memcpy(buf, bnx2_stats_str_arr,
5772 sizeof(bnx2_stats_str_arr));
5773 break;
5774 case ETH_SS_TEST:
5775 memcpy(buf, bnx2_tests_str_arr,
5776 sizeof(bnx2_tests_str_arr));
5777 break;
5778 }
5779}
5780
5781static int
5782bnx2_get_stats_count(struct net_device *dev)
5783{
5784 return BNX2_NUM_STATS;
5785}
5786
5787static void
5788bnx2_get_ethtool_stats(struct net_device *dev,
5789 struct ethtool_stats *stats, u64 *buf)
5790{
Michael Chan972ec0d2006-01-23 16:12:43 -08005791 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005792 int i;
5793 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005794 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005795
5796 if (hw_stats == NULL) {
5797 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5798 return;
5799 }
5800
Michael Chan5b0c76a2005-11-04 08:45:49 -08005801 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5802 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5803 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5804 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005805 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005806 else
5807 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005808
5809 for (i = 0; i < BNX2_NUM_STATS; i++) {
5810 if (stats_len_arr[i] == 0) {
5811 /* skip this counter */
5812 buf[i] = 0;
5813 continue;
5814 }
5815 if (stats_len_arr[i] == 4) {
5816 /* 4-byte counter */
5817 buf[i] = (u64)
5818 *(hw_stats + bnx2_stats_offset_arr[i]);
5819 continue;
5820 }
5821 /* 8-byte counter */
5822 buf[i] = (((u64) *(hw_stats +
5823 bnx2_stats_offset_arr[i])) << 32) +
5824 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5825 }
5826}
5827
5828static int
5829bnx2_phys_id(struct net_device *dev, u32 data)
5830{
Michael Chan972ec0d2006-01-23 16:12:43 -08005831 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005832 int i;
5833 u32 save;
5834
5835 if (data == 0)
5836 data = 2;
5837
5838 save = REG_RD(bp, BNX2_MISC_CFG);
5839 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5840
5841 for (i = 0; i < (data * 2); i++) {
5842 if ((i % 2) == 0) {
5843 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5844 }
5845 else {
5846 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5847 BNX2_EMAC_LED_1000MB_OVERRIDE |
5848 BNX2_EMAC_LED_100MB_OVERRIDE |
5849 BNX2_EMAC_LED_10MB_OVERRIDE |
5850 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5851 BNX2_EMAC_LED_TRAFFIC);
5852 }
5853 msleep_interruptible(500);
5854 if (signal_pending(current))
5855 break;
5856 }
5857 REG_WR(bp, BNX2_EMAC_LED, 0);
5858 REG_WR(bp, BNX2_MISC_CFG, save);
5859 return 0;
5860}
5861
Michael Chan4666f872007-05-03 13:22:28 -07005862static int
5863bnx2_set_tx_csum(struct net_device *dev, u32 data)
5864{
5865 struct bnx2 *bp = netdev_priv(dev);
5866
5867 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5868 return (ethtool_op_set_tx_hw_csum(dev, data));
5869 else
5870 return (ethtool_op_set_tx_csum(dev, data));
5871}
5872
Jeff Garzik7282d492006-09-13 14:30:00 -04005873static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005874 .get_settings = bnx2_get_settings,
5875 .set_settings = bnx2_set_settings,
5876 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005877 .get_regs_len = bnx2_get_regs_len,
5878 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005879 .get_wol = bnx2_get_wol,
5880 .set_wol = bnx2_set_wol,
5881 .nway_reset = bnx2_nway_reset,
5882 .get_link = ethtool_op_get_link,
5883 .get_eeprom_len = bnx2_get_eeprom_len,
5884 .get_eeprom = bnx2_get_eeprom,
5885 .set_eeprom = bnx2_set_eeprom,
5886 .get_coalesce = bnx2_get_coalesce,
5887 .set_coalesce = bnx2_set_coalesce,
5888 .get_ringparam = bnx2_get_ringparam,
5889 .set_ringparam = bnx2_set_ringparam,
5890 .get_pauseparam = bnx2_get_pauseparam,
5891 .set_pauseparam = bnx2_set_pauseparam,
5892 .get_rx_csum = bnx2_get_rx_csum,
5893 .set_rx_csum = bnx2_set_rx_csum,
5894 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005895 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005896 .get_sg = ethtool_op_get_sg,
5897 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005898 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005899 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005900 .self_test_count = bnx2_self_test_count,
5901 .self_test = bnx2_self_test,
5902 .get_strings = bnx2_get_strings,
5903 .phys_id = bnx2_phys_id,
5904 .get_stats_count = bnx2_get_stats_count,
5905 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005906 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005907};
5908
5909/* Called with rtnl_lock */
5910static int
5911bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5912{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005913 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005914 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005915 int err;
5916
5917 switch(cmd) {
5918 case SIOCGMIIPHY:
5919 data->phy_id = bp->phy_addr;
5920
5921 /* fallthru */
5922 case SIOCGMIIREG: {
5923 u32 mii_regval;
5924
Michael Chandad3e452007-05-03 13:18:03 -07005925 if (!netif_running(dev))
5926 return -EAGAIN;
5927
Michael Chanc770a652005-08-25 15:38:39 -07005928 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005929 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005930 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005931
5932 data->val_out = mii_regval;
5933
5934 return err;
5935 }
5936
5937 case SIOCSMIIREG:
5938 if (!capable(CAP_NET_ADMIN))
5939 return -EPERM;
5940
Michael Chandad3e452007-05-03 13:18:03 -07005941 if (!netif_running(dev))
5942 return -EAGAIN;
5943
Michael Chanc770a652005-08-25 15:38:39 -07005944 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005945 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005946 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005947
5948 return err;
5949
5950 default:
5951 /* do nothing */
5952 break;
5953 }
5954 return -EOPNOTSUPP;
5955}
5956
5957/* Called with rtnl_lock */
5958static int
5959bnx2_change_mac_addr(struct net_device *dev, void *p)
5960{
5961 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005962 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005963
Michael Chan73eef4c2005-08-25 15:39:15 -07005964 if (!is_valid_ether_addr(addr->sa_data))
5965 return -EINVAL;
5966
Michael Chanb6016b72005-05-26 13:03:09 -07005967 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5968 if (netif_running(dev))
5969 bnx2_set_mac_addr(bp);
5970
5971 return 0;
5972}
5973
5974/* Called with rtnl_lock */
5975static int
5976bnx2_change_mtu(struct net_device *dev, int new_mtu)
5977{
Michael Chan972ec0d2006-01-23 16:12:43 -08005978 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005979
5980 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5981 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5982 return -EINVAL;
5983
5984 dev->mtu = new_mtu;
5985 if (netif_running(dev)) {
5986 bnx2_netif_stop(bp);
5987
5988 bnx2_init_nic(bp);
5989
5990 bnx2_netif_start(bp);
5991 }
5992 return 0;
5993}
5994
5995#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5996static void
5997poll_bnx2(struct net_device *dev)
5998{
Michael Chan972ec0d2006-01-23 16:12:43 -08005999 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006000
6001 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006002 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006003 enable_irq(bp->pdev->irq);
6004}
6005#endif
6006
Michael Chan253c8b72007-01-08 19:56:01 -08006007static void __devinit
6008bnx2_get_5709_media(struct bnx2 *bp)
6009{
6010 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6011 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6012 u32 strap;
6013
6014 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6015 return;
6016 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6017 bp->phy_flags |= PHY_SERDES_FLAG;
6018 return;
6019 }
6020
6021 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6022 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6023 else
6024 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6025
6026 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6027 switch (strap) {
6028 case 0x4:
6029 case 0x5:
6030 case 0x6:
6031 bp->phy_flags |= PHY_SERDES_FLAG;
6032 return;
6033 }
6034 } else {
6035 switch (strap) {
6036 case 0x1:
6037 case 0x2:
6038 case 0x4:
6039 bp->phy_flags |= PHY_SERDES_FLAG;
6040 return;
6041 }
6042 }
6043}
6044
Michael Chan883e5152007-05-03 13:25:11 -07006045static void __devinit
6046bnx2_get_pci_speed(struct bnx2 *bp)
6047{
6048 u32 reg;
6049
6050 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6051 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6052 u32 clkreg;
6053
6054 bp->flags |= PCIX_FLAG;
6055
6056 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6057
6058 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6059 switch (clkreg) {
6060 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6061 bp->bus_speed_mhz = 133;
6062 break;
6063
6064 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6065 bp->bus_speed_mhz = 100;
6066 break;
6067
6068 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6069 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6070 bp->bus_speed_mhz = 66;
6071 break;
6072
6073 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6074 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6075 bp->bus_speed_mhz = 50;
6076 break;
6077
6078 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6079 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6080 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6081 bp->bus_speed_mhz = 33;
6082 break;
6083 }
6084 }
6085 else {
6086 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6087 bp->bus_speed_mhz = 66;
6088 else
6089 bp->bus_speed_mhz = 33;
6090 }
6091
6092 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6093 bp->flags |= PCI_32BIT_FLAG;
6094
6095}
6096
Michael Chanb6016b72005-05-26 13:03:09 -07006097static int __devinit
6098bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6099{
6100 struct bnx2 *bp;
6101 unsigned long mem_len;
6102 int rc;
6103 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006104 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006105
6106 SET_MODULE_OWNER(dev);
6107 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006108 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006109
6110 bp->flags = 0;
6111 bp->phy_flags = 0;
6112
6113 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6114 rc = pci_enable_device(pdev);
6115 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006116 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006117 goto err_out;
6118 }
6119
6120 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006121 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006122 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006123 rc = -ENODEV;
6124 goto err_out_disable;
6125 }
6126
6127 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6128 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006129 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006130 goto err_out_disable;
6131 }
6132
6133 pci_set_master(pdev);
6134
6135 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6136 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006137 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006138 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006139 rc = -EIO;
6140 goto err_out_release;
6141 }
6142
Michael Chanb6016b72005-05-26 13:03:09 -07006143 bp->dev = dev;
6144 bp->pdev = pdev;
6145
6146 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006147 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006148 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006149
6150 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006151 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006152 dev->mem_end = dev->mem_start + mem_len;
6153 dev->irq = pdev->irq;
6154
6155 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6156
6157 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006158 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006159 rc = -ENOMEM;
6160 goto err_out_release;
6161 }
6162
6163 /* Configure byte swap and enable write to the reg_window registers.
6164 * Rely on CPU to do target byte swapping on big endian systems
6165 * The chip's target access swapping will not swap all accesses
6166 */
6167 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6168 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6169 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6170
Pavel Machek829ca9a2005-09-03 15:56:56 -07006171 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006172
6173 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6174
Michael Chan883e5152007-05-03 13:25:11 -07006175 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6176 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6177 dev_err(&pdev->dev,
6178 "Cannot find PCIE capability, aborting.\n");
6179 rc = -EIO;
6180 goto err_out_unmap;
6181 }
6182 bp->flags |= PCIE_FLAG;
6183 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006184 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6185 if (bp->pcix_cap == 0) {
6186 dev_err(&pdev->dev,
6187 "Cannot find PCIX capability, aborting.\n");
6188 rc = -EIO;
6189 goto err_out_unmap;
6190 }
6191 }
6192
Michael Chan8e6a72c2007-05-03 13:24:48 -07006193 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6194 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6195 bp->flags |= MSI_CAP_FLAG;
6196 }
6197
Michael Chan40453c82007-05-03 13:19:18 -07006198 /* 5708 cannot support DMA addresses > 40-bit. */
6199 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6200 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6201 else
6202 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6203
6204 /* Configure DMA attributes. */
6205 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6206 dev->features |= NETIF_F_HIGHDMA;
6207 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6208 if (rc) {
6209 dev_err(&pdev->dev,
6210 "pci_set_consistent_dma_mask failed, aborting.\n");
6211 goto err_out_unmap;
6212 }
6213 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6214 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6215 goto err_out_unmap;
6216 }
6217
Michael Chan883e5152007-05-03 13:25:11 -07006218 if (!(bp->flags & PCIE_FLAG))
6219 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006220
6221 /* 5706A0 may falsely detect SERR and PERR. */
6222 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6223 reg = REG_RD(bp, PCI_COMMAND);
6224 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6225 REG_WR(bp, PCI_COMMAND, reg);
6226 }
6227 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6228 !(bp->flags & PCIX_FLAG)) {
6229
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006230 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006231 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006232 goto err_out_unmap;
6233 }
6234
6235 bnx2_init_nvram(bp);
6236
Michael Chane3648b32005-11-04 08:51:21 -08006237 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6238
6239 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006240 BNX2_SHM_HDR_SIGNATURE_SIG) {
6241 u32 off = PCI_FUNC(pdev->devfn) << 2;
6242
6243 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6244 } else
Michael Chane3648b32005-11-04 08:51:21 -08006245 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6246
Michael Chanb6016b72005-05-26 13:03:09 -07006247 /* Get the permanent MAC address. First we need to make sure the
6248 * firmware is actually running.
6249 */
Michael Chane3648b32005-11-04 08:51:21 -08006250 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006251
6252 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6253 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006254 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006255 rc = -ENODEV;
6256 goto err_out_unmap;
6257 }
6258
Michael Chane3648b32005-11-04 08:51:21 -08006259 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006260
Michael Chane3648b32005-11-04 08:51:21 -08006261 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006262 bp->mac_addr[0] = (u8) (reg >> 8);
6263 bp->mac_addr[1] = (u8) reg;
6264
Michael Chane3648b32005-11-04 08:51:21 -08006265 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006266 bp->mac_addr[2] = (u8) (reg >> 24);
6267 bp->mac_addr[3] = (u8) (reg >> 16);
6268 bp->mac_addr[4] = (u8) (reg >> 8);
6269 bp->mac_addr[5] = (u8) reg;
6270
6271 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006272 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006273
6274 bp->rx_csum = 1;
6275
6276 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6277
6278 bp->tx_quick_cons_trip_int = 20;
6279 bp->tx_quick_cons_trip = 20;
6280 bp->tx_ticks_int = 80;
6281 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006282
Michael Chanb6016b72005-05-26 13:03:09 -07006283 bp->rx_quick_cons_trip_int = 6;
6284 bp->rx_quick_cons_trip = 6;
6285 bp->rx_ticks_int = 18;
6286 bp->rx_ticks = 18;
6287
6288 bp->stats_ticks = 1000000 & 0xffff00;
6289
6290 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006291 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006292
Michael Chan5b0c76a2005-11-04 08:45:49 -08006293 bp->phy_addr = 1;
6294
Michael Chanb6016b72005-05-26 13:03:09 -07006295 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006296 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6297 bnx2_get_5709_media(bp);
6298 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006299 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006300
6301 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07006302 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006303 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006304 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006305 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006306 BNX2_SHARED_HW_CFG_CONFIG);
6307 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6308 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6309 }
Michael Chan261dd5c2007-01-08 19:55:46 -08006310 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6311 CHIP_NUM(bp) == CHIP_NUM_5708)
6312 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006313 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6314 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006315
Michael Chan16088272006-06-12 22:16:43 -07006316 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6317 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6318 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006319 bp->flags |= NO_WOL_FLAG;
6320
Michael Chanb6016b72005-05-26 13:03:09 -07006321 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6322 bp->tx_quick_cons_trip_int =
6323 bp->tx_quick_cons_trip;
6324 bp->tx_ticks_int = bp->tx_ticks;
6325 bp->rx_quick_cons_trip_int =
6326 bp->rx_quick_cons_trip;
6327 bp->rx_ticks_int = bp->rx_ticks;
6328 bp->comp_prod_trip_int = bp->comp_prod_trip;
6329 bp->com_ticks_int = bp->com_ticks;
6330 bp->cmd_ticks_int = bp->cmd_ticks;
6331 }
6332
Michael Chanf9317a42006-09-29 17:06:23 -07006333 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6334 *
6335 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6336 * with byte enables disabled on the unused 32-bit word. This is legal
6337 * but causes problems on the AMD 8132 which will eventually stop
6338 * responding after a while.
6339 *
6340 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006341 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006342 */
6343 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6344 struct pci_dev *amd_8132 = NULL;
6345
6346 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6347 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6348 amd_8132))) {
6349 u8 rev;
6350
6351 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6352 if (rev >= 0x10 && rev <= 0x13) {
6353 disable_msi = 1;
6354 pci_dev_put(amd_8132);
6355 break;
6356 }
6357 }
6358 }
6359
Michael Chanb6016b72005-05-26 13:03:09 -07006360 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6361 bp->req_line_speed = 0;
6362 if (bp->phy_flags & PHY_SERDES_FLAG) {
6363 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006364
Michael Chane3648b32005-11-04 08:51:21 -08006365 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006366 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6367 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6368 bp->autoneg = 0;
6369 bp->req_line_speed = bp->line_speed = SPEED_1000;
6370 bp->req_duplex = DUPLEX_FULL;
6371 }
Michael Chanb6016b72005-05-26 13:03:09 -07006372 }
6373 else {
6374 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6375 }
6376
6377 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6378
Michael Chancd339a02005-08-25 15:35:24 -07006379 init_timer(&bp->timer);
6380 bp->timer.expires = RUN_AT(bp->timer_interval);
6381 bp->timer.data = (unsigned long) bp;
6382 bp->timer.function = bnx2_timer;
6383
Michael Chanb6016b72005-05-26 13:03:09 -07006384 return 0;
6385
6386err_out_unmap:
6387 if (bp->regview) {
6388 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006389 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006390 }
6391
6392err_out_release:
6393 pci_release_regions(pdev);
6394
6395err_out_disable:
6396 pci_disable_device(pdev);
6397 pci_set_drvdata(pdev, NULL);
6398
6399err_out:
6400 return rc;
6401}
6402
Michael Chan883e5152007-05-03 13:25:11 -07006403static char * __devinit
6404bnx2_bus_string(struct bnx2 *bp, char *str)
6405{
6406 char *s = str;
6407
6408 if (bp->flags & PCIE_FLAG) {
6409 s += sprintf(s, "PCI Express");
6410 } else {
6411 s += sprintf(s, "PCI");
6412 if (bp->flags & PCIX_FLAG)
6413 s += sprintf(s, "-X");
6414 if (bp->flags & PCI_32BIT_FLAG)
6415 s += sprintf(s, " 32-bit");
6416 else
6417 s += sprintf(s, " 64-bit");
6418 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6419 }
6420 return str;
6421}
6422
Michael Chanb6016b72005-05-26 13:03:09 -07006423static int __devinit
6424bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6425{
6426 static int version_printed = 0;
6427 struct net_device *dev = NULL;
6428 struct bnx2 *bp;
6429 int rc, i;
Michael Chan883e5152007-05-03 13:25:11 -07006430 char str[40];
Michael Chanb6016b72005-05-26 13:03:09 -07006431
6432 if (version_printed++ == 0)
6433 printk(KERN_INFO "%s", version);
6434
6435 /* dev zeroed in init_etherdev */
6436 dev = alloc_etherdev(sizeof(*bp));
6437
6438 if (!dev)
6439 return -ENOMEM;
6440
6441 rc = bnx2_init_board(pdev, dev);
6442 if (rc < 0) {
6443 free_netdev(dev);
6444 return rc;
6445 }
6446
6447 dev->open = bnx2_open;
6448 dev->hard_start_xmit = bnx2_start_xmit;
6449 dev->stop = bnx2_close;
6450 dev->get_stats = bnx2_get_stats;
6451 dev->set_multicast_list = bnx2_set_rx_mode;
6452 dev->do_ioctl = bnx2_ioctl;
6453 dev->set_mac_address = bnx2_change_mac_addr;
6454 dev->change_mtu = bnx2_change_mtu;
6455 dev->tx_timeout = bnx2_tx_timeout;
6456 dev->watchdog_timeo = TX_TIMEOUT;
6457#ifdef BCM_VLAN
6458 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07006459#endif
6460 dev->poll = bnx2_poll;
6461 dev->ethtool_ops = &bnx2_ethtool_ops;
6462 dev->weight = 64;
6463
Michael Chan972ec0d2006-01-23 16:12:43 -08006464 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006465
6466#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6467 dev->poll_controller = poll_bnx2;
6468#endif
6469
Michael Chan1b2f9222007-05-03 13:20:19 -07006470 pci_set_drvdata(pdev, dev);
6471
6472 memcpy(dev->dev_addr, bp->mac_addr, 6);
6473 memcpy(dev->perm_addr, bp->mac_addr, 6);
6474 bp->name = board_info[ent->driver_data].name;
6475
Michael Chan4666f872007-05-03 13:22:28 -07006476 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6477 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6478 else
6479 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan1b2f9222007-05-03 13:20:19 -07006480#ifdef BCM_VLAN
6481 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6482#endif
6483 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006484 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6485 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006486
Michael Chanb6016b72005-05-26 13:03:09 -07006487 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006488 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006489 if (bp->regview)
6490 iounmap(bp->regview);
6491 pci_release_regions(pdev);
6492 pci_disable_device(pdev);
6493 pci_set_drvdata(pdev, NULL);
6494 free_netdev(dev);
6495 return rc;
6496 }
6497
Michael Chan883e5152007-05-03 13:25:11 -07006498 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Michael Chanb6016b72005-05-26 13:03:09 -07006499 "IRQ %d, ",
6500 dev->name,
6501 bp->name,
6502 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6503 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07006504 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07006505 dev->base_addr,
6506 bp->pdev->irq);
6507
6508 printk("node addr ");
6509 for (i = 0; i < 6; i++)
6510 printk("%2.2x", dev->dev_addr[i]);
6511 printk("\n");
6512
Michael Chanb6016b72005-05-26 13:03:09 -07006513 return 0;
6514}
6515
6516static void __devexit
6517bnx2_remove_one(struct pci_dev *pdev)
6518{
6519 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006520 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006521
Michael Chanafdc08b2005-08-25 15:34:29 -07006522 flush_scheduled_work();
6523
Michael Chanb6016b72005-05-26 13:03:09 -07006524 unregister_netdev(dev);
6525
6526 if (bp->regview)
6527 iounmap(bp->regview);
6528
6529 free_netdev(dev);
6530 pci_release_regions(pdev);
6531 pci_disable_device(pdev);
6532 pci_set_drvdata(pdev, NULL);
6533}
6534
6535static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006536bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006537{
6538 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006539 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006540 u32 reset_code;
6541
6542 if (!netif_running(dev))
6543 return 0;
6544
Michael Chan1d60290f2006-03-20 17:50:08 -08006545 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006546 bnx2_netif_stop(bp);
6547 netif_device_detach(dev);
6548 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006549 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006550 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006551 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006552 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6553 else
6554 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6555 bnx2_reset_chip(bp, reset_code);
6556 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006557 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006558 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006559 return 0;
6560}
6561
6562static int
6563bnx2_resume(struct pci_dev *pdev)
6564{
6565 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006566 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006567
6568 if (!netif_running(dev))
6569 return 0;
6570
Michael Chan30c517b2007-05-03 13:20:40 -07006571 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006572 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006573 netif_device_attach(dev);
6574 bnx2_init_nic(bp);
6575 bnx2_netif_start(bp);
6576 return 0;
6577}
6578
6579static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006580 .name = DRV_MODULE_NAME,
6581 .id_table = bnx2_pci_tbl,
6582 .probe = bnx2_init_one,
6583 .remove = __devexit_p(bnx2_remove_one),
6584 .suspend = bnx2_suspend,
6585 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006586};
6587
6588static int __init bnx2_init(void)
6589{
Jeff Garzik29917622006-08-19 17:48:59 -04006590 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006591}
6592
6593static void __exit bnx2_cleanup(void)
6594{
6595 pci_unregister_driver(&bnx2_pci_driver);
6596}
6597
6598module_init(bnx2_init);
6599module_exit(bnx2_cleanup);
6600
6601
6602