blob: 14119fb5964d634bba3dd1b3a56075fe07bec69b [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070029#include <linux/bitops.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080030#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
Linus Torvaldsde081fa2007-07-12 16:40:08 -070043#include <net/tcp.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080044#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
Denys Vlasenkob3448b02007-09-30 17:55:51 -070055#define FW_BUF_SIZE 0x8000
56
Michael Chanb6016b72005-05-26 13:03:09 -070057#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chanbbe42972007-12-10 17:18:51 -080059#define DRV_MODULE_VERSION "1.6.9"
60#define DRV_MODULE_RELDATE "December 8, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080088 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070089 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070090} board_t;
91
92/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050093static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070094 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
Michael Chane30372c2007-07-16 18:26:23 -0700131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700216};
217
Michael Chane30372c2007-07-16 18:26:23 -0700218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
Michael Chanb6016b72005-05-26 13:03:09 -0700227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
Michael Chane89bbf12005-08-25 15:36:58 -0700229static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230{
Michael Chan2f8af122006-08-15 01:39:10 -0700231 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700232
Michael Chan2f8af122006-08-15 01:39:10 -0700233 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
Michael Chane89bbf12005-08-25 15:36:58 -0700244 return (bp->tx_ring_size - diff);
245}
246
Michael Chanb6016b72005-05-26 13:03:09 -0700247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
Michael Chan1b8227c2007-05-03 13:24:05 -0700250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
Michael Chan1b8227c2007-05-03 13:24:05 -0700262 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700265 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700272 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700290 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400370
Michael Chanb6016b72005-05-26 13:03:09 -0700371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
Michael Chanb6016b72005-05-26 13:03:09 -0700410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
Michael Chanbf5295b2006-03-23 01:11:56 -0800417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700418}
419
420static void
421bnx2_disable_int_sync(struct bnx2 *bp)
422{
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
426}
427
428static void
429bnx2_netif_stop(struct bnx2 *bp)
430{
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700433 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -0700434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436 }
437}
438
439static void
440bnx2_netif_start(struct bnx2 *bp)
441{
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700445 napi_enable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -0700446 bnx2_enable_int(bp);
447 }
448 }
449}
450
451static void
452bnx2_free_mem(struct bnx2 *bp)
453{
Michael Chan13daffa2006-03-20 17:49:20 -0800454 int i;
455
Michael Chan59b47d82006-11-19 14:10:45 -0800456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
462 }
463 }
Michael Chanb6016b72005-05-26 13:03:09 -0700464 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800465 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800468 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700469 }
470 if (bp->tx_desc_ring) {
Michael Chane343d552007-12-12 11:16:19 -0800471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
474 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
Michael Chane343d552007-12-12 11:16:19 -0800479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700483 }
Michael Chan13daffa2006-03-20 17:49:20 -0800484 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400485 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700486}
487
488static int
489bnx2_alloc_mem(struct bnx2 *bp)
490{
Michael Chan0f31f992006-03-23 01:12:38 -0800491 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800492
Michael Chane343d552007-12-12 11:16:19 -0800493 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700494 if (bp->tx_buf_ring == NULL)
495 return -ENOMEM;
496
Michael Chane343d552007-12-12 11:16:19 -0800497 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700498 &bp->tx_desc_mapping);
499 if (bp->tx_desc_ring == NULL)
500 goto alloc_mem_err;
501
Michael Chane343d552007-12-12 11:16:19 -0800502 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700503 if (bp->rx_buf_ring == NULL)
504 goto alloc_mem_err;
505
Michael Chane343d552007-12-12 11:16:19 -0800506 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chan13daffa2006-03-20 17:49:20 -0800507
508 for (i = 0; i < bp->rx_max_ring; i++) {
509 bp->rx_desc_ring[i] =
Michael Chane343d552007-12-12 11:16:19 -0800510 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800511 &bp->rx_desc_mapping[i]);
512 if (bp->rx_desc_ring[i] == NULL)
513 goto alloc_mem_err;
514
515 }
Michael Chanb6016b72005-05-26 13:03:09 -0700516
Michael Chan0f31f992006-03-23 01:12:38 -0800517 /* Combine status and statistics blocks into one allocation. */
518 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519 bp->status_stats_size = status_blk_size +
520 sizeof(struct statistics_block);
521
522 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700523 &bp->status_blk_mapping);
524 if (bp->status_blk == NULL)
525 goto alloc_mem_err;
526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan0f31f992006-03-23 01:12:38 -0800529 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
530 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700531
Michael Chan0f31f992006-03-23 01:12:38 -0800532 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700533
Michael Chan59b47d82006-11-19 14:10:45 -0800534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536 if (bp->ctx_pages == 0)
537 bp->ctx_pages = 1;
538 for (i = 0; i < bp->ctx_pages; i++) {
539 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
540 BCM_PAGE_SIZE,
541 &bp->ctx_blk_mapping[i]);
542 if (bp->ctx_blk[i] == NULL)
543 goto alloc_mem_err;
544 }
545 }
Michael Chanb6016b72005-05-26 13:03:09 -0700546 return 0;
547
548alloc_mem_err:
549 bnx2_free_mem(bp);
550 return -ENOMEM;
551}
552
553static void
Michael Chane3648b32005-11-04 08:51:21 -0800554bnx2_report_fw_link(struct bnx2 *bp)
555{
556 u32 fw_link_status = 0;
557
Michael Chan0d8a6572007-07-07 22:49:43 -0700558 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559 return;
560
Michael Chane3648b32005-11-04 08:51:21 -0800561 if (bp->link_up) {
562 u32 bmsr;
563
564 switch (bp->line_speed) {
565 case SPEED_10:
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_10HALF;
568 else
569 fw_link_status = BNX2_LINK_STATUS_10FULL;
570 break;
571 case SPEED_100:
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_100HALF;
574 else
575 fw_link_status = BNX2_LINK_STATUS_100FULL;
576 break;
577 case SPEED_1000:
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_1000HALF;
580 else
581 fw_link_status = BNX2_LINK_STATUS_1000FULL;
582 break;
583 case SPEED_2500:
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_2500HALF;
586 else
587 fw_link_status = BNX2_LINK_STATUS_2500FULL;
588 break;
589 }
590
591 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
592
593 if (bp->autoneg) {
594 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
595
Michael Chanca58c3a2007-05-03 13:22:52 -0700596 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800598
599 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
602 else
603 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
604 }
605 }
606 else
607 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
608
609 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
610}
611
Michael Chan9b1084b2007-07-07 22:50:37 -0700612static char *
613bnx2_xceiver_str(struct bnx2 *bp)
614{
615 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
617 "Copper"));
618}
619
Michael Chane3648b32005-11-04 08:51:21 -0800620static void
Michael Chanb6016b72005-05-26 13:03:09 -0700621bnx2_report_link(struct bnx2 *bp)
622{
623 if (bp->link_up) {
624 netif_carrier_on(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700625 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700627
628 printk("%d Mbps ", bp->line_speed);
629
630 if (bp->duplex == DUPLEX_FULL)
631 printk("full duplex");
632 else
633 printk("half duplex");
634
635 if (bp->flow_ctrl) {
636 if (bp->flow_ctrl & FLOW_CTRL_RX) {
637 printk(", receive ");
638 if (bp->flow_ctrl & FLOW_CTRL_TX)
639 printk("& transmit ");
640 }
641 else {
642 printk(", transmit ");
643 }
644 printk("flow control ON");
645 }
646 printk("\n");
647 }
648 else {
649 netif_carrier_off(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700650 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700652 }
Michael Chane3648b32005-11-04 08:51:21 -0800653
654 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700655}
656
657static void
658bnx2_resolve_flow_ctrl(struct bnx2 *bp)
659{
660 u32 local_adv, remote_adv;
661
662 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400663 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700664 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
665
666 if (bp->duplex == DUPLEX_FULL) {
667 bp->flow_ctrl = bp->req_flow_ctrl;
668 }
669 return;
670 }
671
672 if (bp->duplex != DUPLEX_FULL) {
673 return;
674 }
675
Michael Chan5b0c76a2005-11-04 08:45:49 -0800676 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
678 u32 val;
679
680 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682 bp->flow_ctrl |= FLOW_CTRL_TX;
683 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684 bp->flow_ctrl |= FLOW_CTRL_RX;
685 return;
686 }
687
Michael Chanca58c3a2007-05-03 13:22:52 -0700688 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700690
691 if (bp->phy_flags & PHY_SERDES_FLAG) {
692 u32 new_local_adv = 0;
693 u32 new_remote_adv = 0;
694
695 if (local_adv & ADVERTISE_1000XPAUSE)
696 new_local_adv |= ADVERTISE_PAUSE_CAP;
697 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698 new_local_adv |= ADVERTISE_PAUSE_ASYM;
699 if (remote_adv & ADVERTISE_1000XPAUSE)
700 new_remote_adv |= ADVERTISE_PAUSE_CAP;
701 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
703
704 local_adv = new_local_adv;
705 remote_adv = new_remote_adv;
706 }
707
708 /* See Table 28B-3 of 802.3ab-1999 spec. */
709 if (local_adv & ADVERTISE_PAUSE_CAP) {
710 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711 if (remote_adv & ADVERTISE_PAUSE_CAP) {
712 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
713 }
714 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715 bp->flow_ctrl = FLOW_CTRL_RX;
716 }
717 }
718 else {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721 }
722 }
723 }
724 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
727
728 bp->flow_ctrl = FLOW_CTRL_TX;
729 }
730 }
731}
732
733static int
Michael Chan27a005b2007-05-03 13:23:41 -0700734bnx2_5709s_linkup(struct bnx2 *bp)
735{
736 u32 val, speed;
737
738 bp->link_up = 1;
739
740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
743
744 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745 bp->line_speed = bp->req_line_speed;
746 bp->duplex = bp->req_duplex;
747 return 0;
748 }
749 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
750 switch (speed) {
751 case MII_BNX2_GP_TOP_AN_SPEED_10:
752 bp->line_speed = SPEED_10;
753 break;
754 case MII_BNX2_GP_TOP_AN_SPEED_100:
755 bp->line_speed = SPEED_100;
756 break;
757 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759 bp->line_speed = SPEED_1000;
760 break;
761 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762 bp->line_speed = SPEED_2500;
763 break;
764 }
765 if (val & MII_BNX2_GP_TOP_AN_FD)
766 bp->duplex = DUPLEX_FULL;
767 else
768 bp->duplex = DUPLEX_HALF;
769 return 0;
770}
771
772static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800773bnx2_5708s_linkup(struct bnx2 *bp)
774{
775 u32 val;
776
777 bp->link_up = 1;
778 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780 case BCM5708S_1000X_STAT1_SPEED_10:
781 bp->line_speed = SPEED_10;
782 break;
783 case BCM5708S_1000X_STAT1_SPEED_100:
784 bp->line_speed = SPEED_100;
785 break;
786 case BCM5708S_1000X_STAT1_SPEED_1G:
787 bp->line_speed = SPEED_1000;
788 break;
789 case BCM5708S_1000X_STAT1_SPEED_2G5:
790 bp->line_speed = SPEED_2500;
791 break;
792 }
793 if (val & BCM5708S_1000X_STAT1_FD)
794 bp->duplex = DUPLEX_FULL;
795 else
796 bp->duplex = DUPLEX_HALF;
797
798 return 0;
799}
800
801static int
802bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700803{
804 u32 bmcr, local_adv, remote_adv, common;
805
806 bp->link_up = 1;
807 bp->line_speed = SPEED_1000;
808
Michael Chanca58c3a2007-05-03 13:22:52 -0700809 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700810 if (bmcr & BMCR_FULLDPLX) {
811 bp->duplex = DUPLEX_FULL;
812 }
813 else {
814 bp->duplex = DUPLEX_HALF;
815 }
816
817 if (!(bmcr & BMCR_ANENABLE)) {
818 return 0;
819 }
820
Michael Chanca58c3a2007-05-03 13:22:52 -0700821 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700823
824 common = local_adv & remote_adv;
825 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
826
827 if (common & ADVERTISE_1000XFULL) {
828 bp->duplex = DUPLEX_FULL;
829 }
830 else {
831 bp->duplex = DUPLEX_HALF;
832 }
833 }
834
835 return 0;
836}
837
838static int
839bnx2_copper_linkup(struct bnx2 *bp)
840{
841 u32 bmcr;
842
Michael Chanca58c3a2007-05-03 13:22:52 -0700843 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700844 if (bmcr & BMCR_ANENABLE) {
845 u32 local_adv, remote_adv, common;
846
847 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
849
850 common = local_adv & (remote_adv >> 2);
851 if (common & ADVERTISE_1000FULL) {
852 bp->line_speed = SPEED_1000;
853 bp->duplex = DUPLEX_FULL;
854 }
855 else if (common & ADVERTISE_1000HALF) {
856 bp->line_speed = SPEED_1000;
857 bp->duplex = DUPLEX_HALF;
858 }
859 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700860 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700862
863 common = local_adv & remote_adv;
864 if (common & ADVERTISE_100FULL) {
865 bp->line_speed = SPEED_100;
866 bp->duplex = DUPLEX_FULL;
867 }
868 else if (common & ADVERTISE_100HALF) {
869 bp->line_speed = SPEED_100;
870 bp->duplex = DUPLEX_HALF;
871 }
872 else if (common & ADVERTISE_10FULL) {
873 bp->line_speed = SPEED_10;
874 bp->duplex = DUPLEX_FULL;
875 }
876 else if (common & ADVERTISE_10HALF) {
877 bp->line_speed = SPEED_10;
878 bp->duplex = DUPLEX_HALF;
879 }
880 else {
881 bp->line_speed = 0;
882 bp->link_up = 0;
883 }
884 }
885 }
886 else {
887 if (bmcr & BMCR_SPEED100) {
888 bp->line_speed = SPEED_100;
889 }
890 else {
891 bp->line_speed = SPEED_10;
892 }
893 if (bmcr & BMCR_FULLDPLX) {
894 bp->duplex = DUPLEX_FULL;
895 }
896 else {
897 bp->duplex = DUPLEX_HALF;
898 }
899 }
900
901 return 0;
902}
903
904static int
905bnx2_set_mac_link(struct bnx2 *bp)
906{
907 u32 val;
908
909 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911 (bp->duplex == DUPLEX_HALF)) {
912 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
913 }
914
915 /* Configure the EMAC mode register. */
916 val = REG_RD(bp, BNX2_EMAC_MODE);
917
918 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800919 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800920 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700921
922 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800923 switch (bp->line_speed) {
924 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800925 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800927 break;
928 }
929 /* fall through */
930 case SPEED_100:
931 val |= BNX2_EMAC_MODE_PORT_MII;
932 break;
933 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800934 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800935 /* fall through */
936 case SPEED_1000:
937 val |= BNX2_EMAC_MODE_PORT_GMII;
938 break;
939 }
Michael Chanb6016b72005-05-26 13:03:09 -0700940 }
941 else {
942 val |= BNX2_EMAC_MODE_PORT_GMII;
943 }
944
945 /* Set the MAC to operate in the appropriate duplex mode. */
946 if (bp->duplex == DUPLEX_HALF)
947 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948 REG_WR(bp, BNX2_EMAC_MODE, val);
949
950 /* Enable/disable rx PAUSE. */
951 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
952
953 if (bp->flow_ctrl & FLOW_CTRL_RX)
954 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
956
957 /* Enable/disable tx PAUSE. */
958 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
960
961 if (bp->flow_ctrl & FLOW_CTRL_TX)
962 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
964
965 /* Acknowledge the interrupt. */
966 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967
968 return 0;
969}
970
Michael Chan27a005b2007-05-03 13:23:41 -0700971static void
972bnx2_enable_bmsr1(struct bnx2 *bp)
973{
974 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975 (CHIP_NUM(bp) == CHIP_NUM_5709))
976 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977 MII_BNX2_BLK_ADDR_GP_STATUS);
978}
979
980static void
981bnx2_disable_bmsr1(struct bnx2 *bp)
982{
983 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984 (CHIP_NUM(bp) == CHIP_NUM_5709))
985 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987}
988
Michael Chanb6016b72005-05-26 13:03:09 -0700989static int
Michael Chan605a9e22007-05-03 13:23:13 -0700990bnx2_test_and_enable_2g5(struct bnx2 *bp)
991{
992 u32 up1;
993 int ret = 1;
994
995 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
996 return 0;
997
998 if (bp->autoneg & AUTONEG_SPEED)
999 bp->advertising |= ADVERTISED_2500baseX_Full;
1000
Michael Chan27a005b2007-05-03 13:23:41 -07001001 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1003
Michael Chan605a9e22007-05-03 13:23:13 -07001004 bnx2_read_phy(bp, bp->mii_up1, &up1);
1005 if (!(up1 & BCM5708S_UP1_2G5)) {
1006 up1 |= BCM5708S_UP1_2G5;
1007 bnx2_write_phy(bp, bp->mii_up1, up1);
1008 ret = 0;
1009 }
1010
Michael Chan27a005b2007-05-03 13:23:41 -07001011 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014
Michael Chan605a9e22007-05-03 13:23:13 -07001015 return ret;
1016}
1017
1018static int
1019bnx2_test_and_disable_2g5(struct bnx2 *bp)
1020{
1021 u32 up1;
1022 int ret = 0;
1023
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025 return 0;
1026
Michael Chan27a005b2007-05-03 13:23:41 -07001027 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1029
Michael Chan605a9e22007-05-03 13:23:13 -07001030 bnx2_read_phy(bp, bp->mii_up1, &up1);
1031 if (up1 & BCM5708S_UP1_2G5) {
1032 up1 &= ~BCM5708S_UP1_2G5;
1033 bnx2_write_phy(bp, bp->mii_up1, up1);
1034 ret = 1;
1035 }
1036
Michael Chan27a005b2007-05-03 13:23:41 -07001037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040
Michael Chan605a9e22007-05-03 13:23:13 -07001041 return ret;
1042}
1043
1044static void
1045bnx2_enable_forced_2g5(struct bnx2 *bp)
1046{
1047 u32 bmcr;
1048
1049 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1050 return;
1051
Michael Chan27a005b2007-05-03 13:23:41 -07001052 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1053 u32 val;
1054
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_SERDES_DIG);
1057 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1061
1062 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1065
1066 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001067 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068 bmcr |= BCM5708S_BMCR_FORCE_2500;
1069 }
1070
1071 if (bp->autoneg & AUTONEG_SPEED) {
1072 bmcr &= ~BMCR_ANENABLE;
1073 if (bp->req_duplex == DUPLEX_FULL)
1074 bmcr |= BMCR_FULLDPLX;
1075 }
1076 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1077}
1078
1079static void
1080bnx2_disable_forced_2g5(struct bnx2 *bp)
1081{
1082 u32 bmcr;
1083
1084 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1085 return;
1086
Michael Chan27a005b2007-05-03 13:23:41 -07001087 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1088 u32 val;
1089
1090 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091 MII_BNX2_BLK_ADDR_SERDES_DIG);
1092 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1095
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1099
1100 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001101 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1103 }
1104
1105 if (bp->autoneg & AUTONEG_SPEED)
1106 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1108}
1109
1110static int
Michael Chanb6016b72005-05-26 13:03:09 -07001111bnx2_set_link(struct bnx2 *bp)
1112{
1113 u32 bmsr;
1114 u8 link_up;
1115
Michael Chan80be4432006-11-19 14:07:28 -08001116 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001117 bp->link_up = 1;
1118 return 0;
1119 }
1120
Michael Chan0d8a6572007-07-07 22:49:43 -07001121 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1122 return 0;
1123
Michael Chanb6016b72005-05-26 13:03:09 -07001124 link_up = bp->link_up;
1125
Michael Chan27a005b2007-05-03 13:23:41 -07001126 bnx2_enable_bmsr1(bp);
1127 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001130
1131 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1133 u32 val;
1134
1135 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136 if (val & BNX2_EMAC_STATUS_LINK)
1137 bmsr |= BMSR_LSTATUS;
1138 else
1139 bmsr &= ~BMSR_LSTATUS;
1140 }
1141
1142 if (bmsr & BMSR_LSTATUS) {
1143 bp->link_up = 1;
1144
1145 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001146 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147 bnx2_5706s_linkup(bp);
1148 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001150 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001152 }
1153 else {
1154 bnx2_copper_linkup(bp);
1155 }
1156 bnx2_resolve_flow_ctrl(bp);
1157 }
1158 else {
1159 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001160 (bp->autoneg & AUTONEG_SPEED))
1161 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001162
Michael Chanb6016b72005-05-26 13:03:09 -07001163 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1164 bp->link_up = 0;
1165 }
1166
1167 if (bp->link_up != link_up) {
1168 bnx2_report_link(bp);
1169 }
1170
1171 bnx2_set_mac_link(bp);
1172
1173 return 0;
1174}
1175
1176static int
1177bnx2_reset_phy(struct bnx2 *bp)
1178{
1179 int i;
1180 u32 reg;
1181
Michael Chanca58c3a2007-05-03 13:22:52 -07001182 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001183
1184#define PHY_RESET_MAX_WAIT 100
1185 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1186 udelay(10);
1187
Michael Chanca58c3a2007-05-03 13:22:52 -07001188 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001189 if (!(reg & BMCR_RESET)) {
1190 udelay(20);
1191 break;
1192 }
1193 }
1194 if (i == PHY_RESET_MAX_WAIT) {
1195 return -EBUSY;
1196 }
1197 return 0;
1198}
1199
1200static u32
1201bnx2_phy_get_pause_adv(struct bnx2 *bp)
1202{
1203 u32 adv = 0;
1204
1205 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1207
1208 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209 adv = ADVERTISE_1000XPAUSE;
1210 }
1211 else {
1212 adv = ADVERTISE_PAUSE_CAP;
1213 }
1214 }
1215 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPSE_ASYM;
1218 }
1219 else {
1220 adv = ADVERTISE_PAUSE_ASYM;
1221 }
1222 }
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1226 }
1227 else {
1228 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229 }
1230 }
1231 return adv;
1232}
1233
Michael Chan0d8a6572007-07-07 22:49:43 -07001234static int bnx2_fw_sync(struct bnx2 *, u32, int);
1235
Michael Chanb6016b72005-05-26 13:03:09 -07001236static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001237bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1238{
1239 u32 speed_arg = 0, pause_adv;
1240
1241 pause_adv = bnx2_phy_get_pause_adv(bp);
1242
1243 if (bp->autoneg & AUTONEG_SPEED) {
1244 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245 if (bp->advertising & ADVERTISED_10baseT_Half)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247 if (bp->advertising & ADVERTISED_10baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249 if (bp->advertising & ADVERTISED_100baseT_Half)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251 if (bp->advertising & ADVERTISED_100baseT_Full)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1257 } else {
1258 if (bp->req_line_speed == SPEED_2500)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260 else if (bp->req_line_speed == SPEED_1000)
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262 else if (bp->req_line_speed == SPEED_100) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267 } else if (bp->req_line_speed == SPEED_10) {
1268 if (bp->req_duplex == DUPLEX_FULL)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1270 else
1271 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1272 }
1273 }
1274
1275 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1279
1280 if (port == PORT_TP)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1283
1284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1285
1286 spin_unlock_bh(&bp->phy_lock);
1287 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288 spin_lock_bh(&bp->phy_lock);
1289
1290 return 0;
1291}
1292
1293static int
1294bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001295{
Michael Chan605a9e22007-05-03 13:23:13 -07001296 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001297 u32 new_adv = 0;
1298
Michael Chan0d8a6572007-07-07 22:49:43 -07001299 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300 return (bnx2_setup_remote_phy(bp, port));
1301
Michael Chanb6016b72005-05-26 13:03:09 -07001302 if (!(bp->autoneg & AUTONEG_SPEED)) {
1303 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001304 int force_link_down = 0;
1305
Michael Chan605a9e22007-05-03 13:23:13 -07001306 if (bp->req_line_speed == SPEED_2500) {
1307 if (!bnx2_test_and_enable_2g5(bp))
1308 force_link_down = 1;
1309 } else if (bp->req_line_speed == SPEED_1000) {
1310 if (bnx2_test_and_disable_2g5(bp))
1311 force_link_down = 1;
1312 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001313 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001314 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1315
Michael Chanca58c3a2007-05-03 13:22:52 -07001316 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001317 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001318 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001319
Michael Chan27a005b2007-05-03 13:23:41 -07001320 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321 if (bp->req_line_speed == SPEED_2500)
1322 bnx2_enable_forced_2g5(bp);
1323 else if (bp->req_line_speed == SPEED_1000) {
1324 bnx2_disable_forced_2g5(bp);
1325 new_bmcr &= ~0x2000;
1326 }
1327
1328 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001329 if (bp->req_line_speed == SPEED_2500)
1330 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1331 else
1332 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001333 }
1334
Michael Chanb6016b72005-05-26 13:03:09 -07001335 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001336 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001337 new_bmcr |= BMCR_FULLDPLX;
1338 }
1339 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001340 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001341 new_bmcr &= ~BMCR_FULLDPLX;
1342 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001343 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001344 /* Force a link down visible on the other side */
1345 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001346 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001347 ~(ADVERTISE_1000XFULL |
1348 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001349 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001350 BMCR_ANRESTART | BMCR_ANENABLE);
1351
1352 bp->link_up = 0;
1353 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001354 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001355 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001356 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001357 bnx2_write_phy(bp, bp->mii_adv, adv);
1358 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001359 } else {
1360 bnx2_resolve_flow_ctrl(bp);
1361 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001362 }
1363 return 0;
1364 }
1365
Michael Chan605a9e22007-05-03 13:23:13 -07001366 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001367
Michael Chanb6016b72005-05-26 13:03:09 -07001368 if (bp->advertising & ADVERTISED_1000baseT_Full)
1369 new_adv |= ADVERTISE_1000XFULL;
1370
1371 new_adv |= bnx2_phy_get_pause_adv(bp);
1372
Michael Chanca58c3a2007-05-03 13:22:52 -07001373 bnx2_read_phy(bp, bp->mii_adv, &adv);
1374 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001375
1376 bp->serdes_an_pending = 0;
1377 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378 /* Force a link down visible on the other side */
1379 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001380 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001381 spin_unlock_bh(&bp->phy_lock);
1382 msleep(20);
1383 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001384 }
1385
Michael Chanca58c3a2007-05-03 13:22:52 -07001386 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001388 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001389 /* Speed up link-up time when the link partner
1390 * does not autonegotiate which is very common
1391 * in blade servers. Some blade servers use
1392 * IPMI for kerboard input and it's important
1393 * to minimize link disruptions. Autoneg. involves
1394 * exchanging base pages plus 3 next pages and
1395 * normally completes in about 120 msec.
1396 */
1397 bp->current_interval = SERDES_AN_TIMEOUT;
1398 bp->serdes_an_pending = 1;
1399 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001400 } else {
1401 bnx2_resolve_flow_ctrl(bp);
1402 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001403 }
1404
1405 return 0;
1406}
1407
1408#define ETHTOOL_ALL_FIBRE_SPEED \
Michael Chandeaf3912007-07-07 22:48:00 -07001409 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1410 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411 (ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07001412
1413#define ETHTOOL_ALL_COPPER_SPEED \
1414 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1415 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1416 ADVERTISED_1000baseT_Full)
1417
1418#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001420
Michael Chanb6016b72005-05-26 13:03:09 -07001421#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1422
Michael Chandeaf3912007-07-07 22:48:00 -07001423static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001424bnx2_set_default_remote_link(struct bnx2 *bp)
1425{
1426 u32 link;
1427
1428 if (bp->phy_port == PORT_TP)
1429 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1430 else
1431 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1432
1433 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434 bp->req_line_speed = 0;
1435 bp->autoneg |= AUTONEG_SPEED;
1436 bp->advertising = ADVERTISED_Autoneg;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438 bp->advertising |= ADVERTISED_10baseT_Half;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440 bp->advertising |= ADVERTISED_10baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442 bp->advertising |= ADVERTISED_100baseT_Half;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444 bp->advertising |= ADVERTISED_100baseT_Full;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446 bp->advertising |= ADVERTISED_1000baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448 bp->advertising |= ADVERTISED_2500baseX_Full;
1449 } else {
1450 bp->autoneg = 0;
1451 bp->advertising = 0;
1452 bp->req_duplex = DUPLEX_FULL;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454 bp->req_line_speed = SPEED_10;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459 bp->req_line_speed = SPEED_100;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461 bp->req_duplex = DUPLEX_HALF;
1462 }
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464 bp->req_line_speed = SPEED_1000;
1465 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466 bp->req_line_speed = SPEED_2500;
1467 }
1468}
1469
1470static void
Michael Chandeaf3912007-07-07 22:48:00 -07001471bnx2_set_default_link(struct bnx2 *bp)
1472{
Michael Chan0d8a6572007-07-07 22:49:43 -07001473 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474 return bnx2_set_default_remote_link(bp);
1475
Michael Chandeaf3912007-07-07 22:48:00 -07001476 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477 bp->req_line_speed = 0;
1478 if (bp->phy_flags & PHY_SERDES_FLAG) {
1479 u32 reg;
1480
1481 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1482
1483 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1486 bp->autoneg = 0;
1487 bp->req_line_speed = bp->line_speed = SPEED_1000;
1488 bp->req_duplex = DUPLEX_FULL;
1489 }
1490 } else
1491 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1492}
1493
Michael Chan0d8a6572007-07-07 22:49:43 -07001494static void
Michael Chandf149d72007-07-07 22:51:36 -07001495bnx2_send_heart_beat(struct bnx2 *bp)
1496{
1497 u32 msg;
1498 u32 addr;
1499
1500 spin_lock(&bp->indirect_lock);
1501 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505 spin_unlock(&bp->indirect_lock);
1506}
1507
1508static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001509bnx2_remote_phy_event(struct bnx2 *bp)
1510{
1511 u32 msg;
1512 u8 link_up = bp->link_up;
1513 u8 old_port;
1514
1515 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1516
Michael Chandf149d72007-07-07 22:51:36 -07001517 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518 bnx2_send_heart_beat(bp);
1519
1520 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1521
Michael Chan0d8a6572007-07-07 22:49:43 -07001522 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1523 bp->link_up = 0;
1524 else {
1525 u32 speed;
1526
1527 bp->link_up = 1;
1528 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529 bp->duplex = DUPLEX_FULL;
1530 switch (speed) {
1531 case BNX2_LINK_STATUS_10HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_10FULL:
1534 bp->line_speed = SPEED_10;
1535 break;
1536 case BNX2_LINK_STATUS_100HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_100BASE_T4:
1539 case BNX2_LINK_STATUS_100FULL:
1540 bp->line_speed = SPEED_100;
1541 break;
1542 case BNX2_LINK_STATUS_1000HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_1000FULL:
1545 bp->line_speed = SPEED_1000;
1546 break;
1547 case BNX2_LINK_STATUS_2500HALF:
1548 bp->duplex = DUPLEX_HALF;
1549 case BNX2_LINK_STATUS_2500FULL:
1550 bp->line_speed = SPEED_2500;
1551 break;
1552 default:
1553 bp->line_speed = 0;
1554 break;
1555 }
1556
1557 spin_lock(&bp->phy_lock);
1558 bp->flow_ctrl = 0;
1559 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561 if (bp->duplex == DUPLEX_FULL)
1562 bp->flow_ctrl = bp->req_flow_ctrl;
1563 } else {
1564 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565 bp->flow_ctrl |= FLOW_CTRL_TX;
1566 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567 bp->flow_ctrl |= FLOW_CTRL_RX;
1568 }
1569
1570 old_port = bp->phy_port;
1571 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572 bp->phy_port = PORT_FIBRE;
1573 else
1574 bp->phy_port = PORT_TP;
1575
1576 if (old_port != bp->phy_port)
1577 bnx2_set_default_link(bp);
1578
1579 spin_unlock(&bp->phy_lock);
1580 }
1581 if (bp->link_up != link_up)
1582 bnx2_report_link(bp);
1583
1584 bnx2_set_mac_link(bp);
1585}
1586
1587static int
1588bnx2_set_remote_link(struct bnx2 *bp)
1589{
1590 u32 evt_code;
1591
1592 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1593 switch (evt_code) {
1594 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595 bnx2_remote_phy_event(bp);
1596 break;
1597 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1598 default:
Michael Chandf149d72007-07-07 22:51:36 -07001599 bnx2_send_heart_beat(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07001600 break;
1601 }
1602 return 0;
1603}
1604
Michael Chanb6016b72005-05-26 13:03:09 -07001605static int
1606bnx2_setup_copper_phy(struct bnx2 *bp)
1607{
1608 u32 bmcr;
1609 u32 new_bmcr;
1610
Michael Chanca58c3a2007-05-03 13:22:52 -07001611 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001612
1613 if (bp->autoneg & AUTONEG_SPEED) {
1614 u32 adv_reg, adv1000_reg;
1615 u32 new_adv_reg = 0;
1616 u32 new_adv1000_reg = 0;
1617
Michael Chanca58c3a2007-05-03 13:22:52 -07001618 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001619 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620 ADVERTISE_PAUSE_ASYM);
1621
1622 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623 adv1000_reg &= PHY_ALL_1000_SPEED;
1624
1625 if (bp->advertising & ADVERTISED_10baseT_Half)
1626 new_adv_reg |= ADVERTISE_10HALF;
1627 if (bp->advertising & ADVERTISED_10baseT_Full)
1628 new_adv_reg |= ADVERTISE_10FULL;
1629 if (bp->advertising & ADVERTISED_100baseT_Half)
1630 new_adv_reg |= ADVERTISE_100HALF;
1631 if (bp->advertising & ADVERTISED_100baseT_Full)
1632 new_adv_reg |= ADVERTISE_100FULL;
1633 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001635
Michael Chanb6016b72005-05-26 13:03:09 -07001636 new_adv_reg |= ADVERTISE_CSMA;
1637
1638 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1639
1640 if ((adv1000_reg != new_adv1000_reg) ||
1641 (adv_reg != new_adv_reg) ||
1642 ((bmcr & BMCR_ANENABLE) == 0)) {
1643
Michael Chanca58c3a2007-05-03 13:22:52 -07001644 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001645 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001647 BMCR_ANENABLE);
1648 }
1649 else if (bp->link_up) {
1650 /* Flow ctrl may have changed from auto to forced */
1651 /* or vice-versa. */
1652
1653 bnx2_resolve_flow_ctrl(bp);
1654 bnx2_set_mac_link(bp);
1655 }
1656 return 0;
1657 }
1658
1659 new_bmcr = 0;
1660 if (bp->req_line_speed == SPEED_100) {
1661 new_bmcr |= BMCR_SPEED100;
1662 }
1663 if (bp->req_duplex == DUPLEX_FULL) {
1664 new_bmcr |= BMCR_FULLDPLX;
1665 }
1666 if (new_bmcr != bmcr) {
1667 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001668
Michael Chanca58c3a2007-05-03 13:22:52 -07001669 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001671
Michael Chanb6016b72005-05-26 13:03:09 -07001672 if (bmsr & BMSR_LSTATUS) {
1673 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001674 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001675 spin_unlock_bh(&bp->phy_lock);
1676 msleep(50);
1677 spin_lock_bh(&bp->phy_lock);
1678
Michael Chanca58c3a2007-05-03 13:22:52 -07001679 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001681 }
1682
Michael Chanca58c3a2007-05-03 13:22:52 -07001683 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001684
1685 /* Normally, the new speed is setup after the link has
1686 * gone down and up again. In some cases, link will not go
1687 * down so we need to set up the new speed here.
1688 */
1689 if (bmsr & BMSR_LSTATUS) {
1690 bp->line_speed = bp->req_line_speed;
1691 bp->duplex = bp->req_duplex;
1692 bnx2_resolve_flow_ctrl(bp);
1693 bnx2_set_mac_link(bp);
1694 }
Michael Chan27a005b2007-05-03 13:23:41 -07001695 } else {
1696 bnx2_resolve_flow_ctrl(bp);
1697 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001698 }
1699 return 0;
1700}
1701
1702static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001703bnx2_setup_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001704{
1705 if (bp->loopback == MAC_LOOPBACK)
1706 return 0;
1707
1708 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07001709 return (bnx2_setup_serdes_phy(bp, port));
Michael Chanb6016b72005-05-26 13:03:09 -07001710 }
1711 else {
1712 return (bnx2_setup_copper_phy(bp));
1713 }
1714}
1715
1716static int
Michael Chan27a005b2007-05-03 13:23:41 -07001717bnx2_init_5709s_phy(struct bnx2 *bp)
1718{
1719 u32 val;
1720
1721 bp->mii_bmcr = MII_BMCR + 0x10;
1722 bp->mii_bmsr = MII_BMSR + 0x10;
1723 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724 bp->mii_adv = MII_ADVERTISE + 0x10;
1725 bp->mii_lpa = MII_LPA + 0x10;
1726 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1727
1728 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1730
1731 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1732 bnx2_reset_phy(bp);
1733
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1735
1736 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1740
1741 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744 val |= BCM5708S_UP1_2G5;
1745 else
1746 val &= ~BCM5708S_UP1_2G5;
1747 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1748
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1753
1754 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1755
1756 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1759
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761
1762 return 0;
1763}
1764
1765static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001766bnx2_init_5708s_phy(struct bnx2 *bp)
1767{
1768 u32 val;
1769
Michael Chan27a005b2007-05-03 13:23:41 -07001770 bnx2_reset_phy(bp);
1771
1772 bp->mii_up1 = BCM5708S_UP1;
1773
Michael Chan5b0c76a2005-11-04 08:45:49 -08001774 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777
1778 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1781
1782 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1785
1786 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788 val |= BCM5708S_UP1_2G5;
1789 bnx2_write_phy(bp, BCM5708S_UP1, val);
1790 }
1791
1792 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001793 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001795 /* increase tx signal amplitude */
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797 BCM5708S_BLK_ADDR_TX_MISC);
1798 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1802 }
1803
Michael Chane3648b32005-11-04 08:51:21 -08001804 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001805 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1806
1807 if (val) {
1808 u32 is_backplane;
1809
Michael Chane3648b32005-11-04 08:51:21 -08001810 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001811 BNX2_SHARED_HW_CFG_CONFIG);
1812 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814 BCM5708S_BLK_ADDR_TX_MISC);
1815 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817 BCM5708S_BLK_ADDR_DIG);
1818 }
1819 }
1820 return 0;
1821}
1822
1823static int
1824bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001825{
Michael Chan27a005b2007-05-03 13:23:41 -07001826 bnx2_reset_phy(bp);
1827
Michael Chanb6016b72005-05-26 13:03:09 -07001828 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1829
Michael Chan59b47d82006-11-19 14:10:45 -08001830 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001832
1833 if (bp->dev->mtu > 1500) {
1834 u32 val;
1835
1836 /* Set extended packet length bit */
1837 bnx2_write_phy(bp, 0x18, 0x7);
1838 bnx2_read_phy(bp, 0x18, &val);
1839 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1840
1841 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842 bnx2_read_phy(bp, 0x1c, &val);
1843 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1844 }
1845 else {
1846 u32 val;
1847
1848 bnx2_write_phy(bp, 0x18, 0x7);
1849 bnx2_read_phy(bp, 0x18, &val);
1850 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1851
1852 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853 bnx2_read_phy(bp, 0x1c, &val);
1854 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1855 }
1856
1857 return 0;
1858}
1859
1860static int
1861bnx2_init_copper_phy(struct bnx2 *bp)
1862{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001863 u32 val;
1864
Michael Chan27a005b2007-05-03 13:23:41 -07001865 bnx2_reset_phy(bp);
1866
Michael Chanb6016b72005-05-26 13:03:09 -07001867 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868 bnx2_write_phy(bp, 0x18, 0x0c00);
1869 bnx2_write_phy(bp, 0x17, 0x000a);
1870 bnx2_write_phy(bp, 0x15, 0x310b);
1871 bnx2_write_phy(bp, 0x17, 0x201f);
1872 bnx2_write_phy(bp, 0x15, 0x9506);
1873 bnx2_write_phy(bp, 0x17, 0x401f);
1874 bnx2_write_phy(bp, 0x15, 0x14e2);
1875 bnx2_write_phy(bp, 0x18, 0x0400);
1876 }
1877
Michael Chanb659f442007-02-02 00:46:35 -08001878 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880 MII_BNX2_DSP_EXPAND_REG | 0x8);
1881 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1882 val &= ~(1 << 8);
1883 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1884 }
1885
Michael Chanb6016b72005-05-26 13:03:09 -07001886 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001887 /* Set extended packet length bit */
1888 bnx2_write_phy(bp, 0x18, 0x7);
1889 bnx2_read_phy(bp, 0x18, &val);
1890 bnx2_write_phy(bp, 0x18, val | 0x4000);
1891
1892 bnx2_read_phy(bp, 0x10, &val);
1893 bnx2_write_phy(bp, 0x10, val | 0x1);
1894 }
1895 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1899
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val & ~0x1);
1902 }
1903
Michael Chan5b0c76a2005-11-04 08:45:49 -08001904 /* ethernet@wirespeed */
1905 bnx2_write_phy(bp, 0x18, 0x7007);
1906 bnx2_read_phy(bp, 0x18, &val);
1907 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001908 return 0;
1909}
1910
1911
1912static int
1913bnx2_init_phy(struct bnx2 *bp)
1914{
1915 u32 val;
1916 int rc = 0;
1917
1918 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1920
Michael Chanca58c3a2007-05-03 13:22:52 -07001921 bp->mii_bmcr = MII_BMCR;
1922 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001923 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001924 bp->mii_adv = MII_ADVERTISE;
1925 bp->mii_lpa = MII_LPA;
1926
Michael Chanb6016b72005-05-26 13:03:09 -07001927 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1928
Michael Chan0d8a6572007-07-07 22:49:43 -07001929 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1930 goto setup_phy;
1931
Michael Chanb6016b72005-05-26 13:03:09 -07001932 bnx2_read_phy(bp, MII_PHYSID1, &val);
1933 bp->phy_id = val << 16;
1934 bnx2_read_phy(bp, MII_PHYSID2, &val);
1935 bp->phy_id |= val & 0xffff;
1936
1937 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001938 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939 rc = bnx2_init_5706s_phy(bp);
1940 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001942 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001944 }
1945 else {
1946 rc = bnx2_init_copper_phy(bp);
1947 }
1948
Michael Chan0d8a6572007-07-07 22:49:43 -07001949setup_phy:
1950 if (!rc)
1951 rc = bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07001952
1953 return rc;
1954}
1955
1956static int
1957bnx2_set_mac_loopback(struct bnx2 *bp)
1958{
1959 u32 mac_mode;
1960
1961 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1965 bp->link_up = 1;
1966 return 0;
1967}
1968
Michael Chanbc5a0692006-01-23 16:13:22 -08001969static int bnx2_test_link(struct bnx2 *);
1970
1971static int
1972bnx2_set_phy_loopback(struct bnx2 *bp)
1973{
1974 u32 mac_mode;
1975 int rc, i;
1976
1977 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001978 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001979 BMCR_SPEED1000);
1980 spin_unlock_bh(&bp->phy_lock);
1981 if (rc)
1982 return rc;
1983
1984 for (i = 0; i < 10; i++) {
1985 if (bnx2_test_link(bp) == 0)
1986 break;
Michael Chan80be4432006-11-19 14:07:28 -08001987 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001988 }
1989
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001993 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001994
1995 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1997 bp->link_up = 1;
1998 return 0;
1999}
2000
Michael Chanb6016b72005-05-26 13:03:09 -07002001static int
Michael Chanb090ae22006-01-23 16:07:10 -08002002bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07002003{
2004 int i;
2005 u32 val;
2006
Michael Chanb6016b72005-05-26 13:03:09 -07002007 bp->fw_wr_seq++;
2008 msg_data |= bp->fw_wr_seq;
2009
Michael Chane3648b32005-11-04 08:51:21 -08002010 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002011
2012 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08002013 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2014 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07002015
Michael Chane3648b32005-11-04 08:51:21 -08002016 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07002017
2018 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2019 break;
2020 }
Michael Chanb090ae22006-01-23 16:07:10 -08002021 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2022 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002023
2024 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08002025 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2026 if (!silent)
2027 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2028 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002029
2030 msg_data &= ~BNX2_DRV_MSG_CODE;
2031 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2032
Michael Chane3648b32005-11-04 08:51:21 -08002033 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002034
Michael Chanb6016b72005-05-26 13:03:09 -07002035 return -EBUSY;
2036 }
2037
Michael Chanb090ae22006-01-23 16:07:10 -08002038 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2039 return -EIO;
2040
Michael Chanb6016b72005-05-26 13:03:09 -07002041 return 0;
2042}
2043
Michael Chan59b47d82006-11-19 14:10:45 -08002044static int
2045bnx2_init_5709_context(struct bnx2 *bp)
2046{
2047 int i, ret = 0;
2048 u32 val;
2049
2050 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051 val |= (BCM_PAGE_BITS - 8) << 16;
2052 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07002053 for (i = 0; i < 10; i++) {
2054 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2056 break;
2057 udelay(2);
2058 }
2059 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2060 return -EBUSY;
2061
Michael Chan59b47d82006-11-19 14:10:45 -08002062 for (i = 0; i < bp->ctx_pages; i++) {
2063 int j;
2064
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069 (u64) bp->ctx_blk_mapping[i] >> 32);
2070 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072 for (j = 0; j < 10; j++) {
2073
2074 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2076 break;
2077 udelay(5);
2078 }
2079 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2080 ret = -EBUSY;
2081 break;
2082 }
2083 }
2084 return ret;
2085}
2086
Michael Chanb6016b72005-05-26 13:03:09 -07002087static void
2088bnx2_init_context(struct bnx2 *bp)
2089{
2090 u32 vcid;
2091
2092 vcid = 96;
2093 while (vcid) {
2094 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07002095 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002096
2097 vcid--;
2098
2099 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2100 u32 new_vcid;
2101
2102 vcid_addr = GET_PCID_ADDR(vcid);
2103 if (vcid & 0x8) {
2104 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2105 }
2106 else {
2107 new_vcid = vcid;
2108 }
2109 pcid_addr = GET_PCID_ADDR(new_vcid);
2110 }
2111 else {
2112 vcid_addr = GET_CID_ADDR(vcid);
2113 pcid_addr = vcid_addr;
2114 }
2115
Michael Chan7947b202007-06-04 21:17:10 -07002116 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117 vcid_addr += (i << PHY_CTX_SHIFT);
2118 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07002119
Michael Chan7947b202007-06-04 21:17:10 -07002120 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2121 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2122
2123 /* Zero out the context. */
2124 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125 CTX_WR(bp, 0x00, offset, 0);
2126
2127 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
Michael Chanb6016b72005-05-26 13:03:09 -07002129 }
Michael Chanb6016b72005-05-26 13:03:09 -07002130 }
2131}
2132
2133static int
2134bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2135{
2136 u16 *good_mbuf;
2137 u32 good_mbuf_cnt;
2138 u32 val;
2139
2140 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2141 if (good_mbuf == NULL) {
2142 printk(KERN_ERR PFX "Failed to allocate memory in "
2143 "bnx2_alloc_bad_rbuf\n");
2144 return -ENOMEM;
2145 }
2146
2147 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2148 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2149
2150 good_mbuf_cnt = 0;
2151
2152 /* Allocate a bunch of mbufs and save the good ones in an array. */
2153 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2154 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2155 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2156
2157 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2158
2159 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2160
2161 /* The addresses with Bit 9 set are bad memory blocks. */
2162 if (!(val & (1 << 9))) {
2163 good_mbuf[good_mbuf_cnt] = (u16) val;
2164 good_mbuf_cnt++;
2165 }
2166
2167 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2168 }
2169
2170 /* Free the good ones back to the mbuf pool thus discarding
2171 * all the bad ones. */
2172 while (good_mbuf_cnt) {
2173 good_mbuf_cnt--;
2174
2175 val = good_mbuf[good_mbuf_cnt];
2176 val = (val << 9) | val | 1;
2177
2178 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2179 }
2180 kfree(good_mbuf);
2181 return 0;
2182}
2183
2184static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002185bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07002186{
2187 u32 val;
2188 u8 *mac_addr = bp->dev->dev_addr;
2189
2190 val = (mac_addr[0] << 8) | mac_addr[1];
2191
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2193
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002194 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07002195 (mac_addr[4] << 8) | mac_addr[5];
2196
2197 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2198}
2199
2200static inline int
2201bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2202{
2203 struct sk_buff *skb;
2204 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2205 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08002206 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07002207 unsigned long align;
2208
Michael Chan932f3772006-08-15 01:39:36 -07002209 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07002210 if (skb == NULL) {
2211 return -ENOMEM;
2212 }
2213
Michael Chan59b47d82006-11-19 14:10:45 -08002214 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2215 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07002216
Michael Chanb6016b72005-05-26 13:03:09 -07002217 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2218 PCI_DMA_FROMDEVICE);
2219
2220 rx_buf->skb = skb;
2221 pci_unmap_addr_set(rx_buf, mapping, mapping);
2222
2223 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2224 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2225
2226 bp->rx_prod_bseq += bp->rx_buf_use_size;
2227
2228 return 0;
2229}
2230
Michael Chanda3e4fb2007-05-03 13:24:23 -07002231static int
2232bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2233{
2234 struct status_block *sblk = bp->status_blk;
2235 u32 new_link_state, old_link_state;
2236 int is_set = 1;
2237
2238 new_link_state = sblk->status_attn_bits & event;
2239 old_link_state = sblk->status_attn_bits_ack & event;
2240 if (new_link_state != old_link_state) {
2241 if (new_link_state)
2242 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2243 else
2244 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2245 } else
2246 is_set = 0;
2247
2248 return is_set;
2249}
2250
Michael Chanb6016b72005-05-26 13:03:09 -07002251static void
2252bnx2_phy_int(struct bnx2 *bp)
2253{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2255 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002256 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002257 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002258 }
Michael Chan0d8a6572007-07-07 22:49:43 -07002259 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2260 bnx2_set_remote_link(bp);
2261
Michael Chanb6016b72005-05-26 13:03:09 -07002262}
2263
2264static void
2265bnx2_tx_int(struct bnx2 *bp)
2266{
Michael Chanf4e418f2005-11-04 08:53:48 -08002267 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002268 u16 hw_cons, sw_cons, sw_ring_cons;
2269 int tx_free_bd = 0;
2270
Michael Chanf4e418f2005-11-04 08:53:48 -08002271 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002272 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2273 hw_cons++;
2274 }
2275 sw_cons = bp->tx_cons;
2276
2277 while (sw_cons != hw_cons) {
2278 struct sw_bd *tx_buf;
2279 struct sk_buff *skb;
2280 int i, last;
2281
2282 sw_ring_cons = TX_RING_IDX(sw_cons);
2283
2284 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2285 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002286
Michael Chanb6016b72005-05-26 13:03:09 -07002287 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002288 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002289 u16 last_idx, last_ring_idx;
2290
2291 last_idx = sw_cons +
2292 skb_shinfo(skb)->nr_frags + 1;
2293 last_ring_idx = sw_ring_cons +
2294 skb_shinfo(skb)->nr_frags + 1;
2295 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2296 last_idx++;
2297 }
2298 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2299 break;
2300 }
2301 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002302
Michael Chanb6016b72005-05-26 13:03:09 -07002303 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2304 skb_headlen(skb), PCI_DMA_TODEVICE);
2305
2306 tx_buf->skb = NULL;
2307 last = skb_shinfo(skb)->nr_frags;
2308
2309 for (i = 0; i < last; i++) {
2310 sw_cons = NEXT_TX_BD(sw_cons);
2311
2312 pci_unmap_page(bp->pdev,
2313 pci_unmap_addr(
2314 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2315 mapping),
2316 skb_shinfo(skb)->frags[i].size,
2317 PCI_DMA_TODEVICE);
2318 }
2319
2320 sw_cons = NEXT_TX_BD(sw_cons);
2321
2322 tx_free_bd += last + 1;
2323
Michael Chan745720e2006-06-29 12:37:41 -07002324 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002325
Michael Chanf4e418f2005-11-04 08:53:48 -08002326 hw_cons = bp->hw_tx_cons =
2327 sblk->status_tx_quick_consumer_index0;
2328
Michael Chanb6016b72005-05-26 13:03:09 -07002329 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2330 hw_cons++;
2331 }
2332 }
2333
Michael Chane89bbf12005-08-25 15:36:58 -07002334 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002335 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2336 * before checking for netif_queue_stopped(). Without the
2337 * memory barrier, there is a small possibility that bnx2_start_xmit()
2338 * will miss it and cause the queue to be stopped forever.
2339 */
2340 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002341
Michael Chan2f8af122006-08-15 01:39:10 -07002342 if (unlikely(netif_queue_stopped(bp->dev)) &&
2343 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2344 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002345 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002346 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002347 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002348 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002349 }
Michael Chanb6016b72005-05-26 13:03:09 -07002350}
2351
2352static inline void
2353bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2354 u16 cons, u16 prod)
2355{
Michael Chan236b6392006-03-20 17:49:02 -08002356 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2357 struct rx_bd *cons_bd, *prod_bd;
2358
2359 cons_rx_buf = &bp->rx_buf_ring[cons];
2360 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002361
2362 pci_dma_sync_single_for_device(bp->pdev,
2363 pci_unmap_addr(cons_rx_buf, mapping),
2364 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2365
Michael Chan236b6392006-03-20 17:49:02 -08002366 bp->rx_prod_bseq += bp->rx_buf_use_size;
2367
2368 prod_rx_buf->skb = skb;
2369
2370 if (cons == prod)
2371 return;
2372
Michael Chanb6016b72005-05-26 13:03:09 -07002373 pci_unmap_addr_set(prod_rx_buf, mapping,
2374 pci_unmap_addr(cons_rx_buf, mapping));
2375
Michael Chan3fdfcc22006-03-20 17:49:49 -08002376 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2377 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002378 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2379 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002380}
2381
Michael Chan85833c62007-12-12 11:17:01 -08002382static int
2383bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2384 dma_addr_t dma_addr, u32 ring_idx)
2385{
2386 int err;
2387 u16 prod = ring_idx & 0xffff;
2388
2389 err = bnx2_alloc_rx_skb(bp, prod);
2390 if (unlikely(err)) {
2391 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2392 return err;
2393 }
2394
2395 skb_reserve(skb, bp->rx_offset);
2396 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2397 PCI_DMA_FROMDEVICE);
2398
2399 skb_put(skb, len);
2400 return 0;
2401}
2402
Michael Chanc09c2622007-12-10 17:18:37 -08002403static inline u16
2404bnx2_get_hw_rx_cons(struct bnx2 *bp)
2405{
2406 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2407
2408 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2409 cons++;
2410 return cons;
2411}
2412
Michael Chanb6016b72005-05-26 13:03:09 -07002413static int
2414bnx2_rx_int(struct bnx2 *bp, int budget)
2415{
2416 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2417 struct l2_fhdr *rx_hdr;
2418 int rx_pkt = 0;
2419
Michael Chanc09c2622007-12-10 17:18:37 -08002420 hw_cons = bnx2_get_hw_rx_cons(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002421 sw_cons = bp->rx_cons;
2422 sw_prod = bp->rx_prod;
2423
2424 /* Memory barrier necessary as speculative reads of the rx
2425 * buffer can be ahead of the index in the status block
2426 */
2427 rmb();
2428 while (sw_cons != hw_cons) {
2429 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002430 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002431 struct sw_bd *rx_buf;
2432 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002433 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002434
2435 sw_ring_cons = RX_RING_IDX(sw_cons);
2436 sw_ring_prod = RX_RING_IDX(sw_prod);
2437
2438 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2439 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002440
2441 rx_buf->skb = NULL;
2442
2443 dma_addr = pci_unmap_addr(rx_buf, mapping);
2444
2445 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002446 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2447
2448 rx_hdr = (struct l2_fhdr *) skb->data;
2449 len = rx_hdr->l2_fhdr_pkt_len - 4;
2450
Michael Chanade2bfe2006-01-23 16:09:51 -08002451 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002452 (L2_FHDR_ERRORS_BAD_CRC |
2453 L2_FHDR_ERRORS_PHY_DECODE |
2454 L2_FHDR_ERRORS_ALIGNMENT |
2455 L2_FHDR_ERRORS_TOO_SHORT |
2456 L2_FHDR_ERRORS_GIANT_FRAME)) {
2457
Michael Chan85833c62007-12-12 11:17:01 -08002458 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2459 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002460 }
2461
2462 /* Since we don't have a jumbo ring, copy small packets
2463 * if mtu > 1500
2464 */
2465 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2466 struct sk_buff *new_skb;
2467
Michael Chan932f3772006-08-15 01:39:36 -07002468 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chan85833c62007-12-12 11:17:01 -08002469 if (new_skb == NULL) {
2470 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2471 sw_ring_prod);
2472 goto next_rx;
2473 }
Michael Chanb6016b72005-05-26 13:03:09 -07002474
2475 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002476 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2477 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002478 skb_reserve(new_skb, 2);
2479 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002480
2481 bnx2_reuse_rx_skb(bp, skb,
2482 sw_ring_cons, sw_ring_prod);
2483
2484 skb = new_skb;
Michael Chan85833c62007-12-12 11:17:01 -08002485 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2486 (sw_ring_cons << 16) | sw_ring_prod)))
Michael Chanb6016b72005-05-26 13:03:09 -07002487 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002488
2489 skb->protocol = eth_type_trans(skb, bp->dev);
2490
2491 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002492 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002493
Michael Chan745720e2006-06-29 12:37:41 -07002494 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002495 goto next_rx;
2496
2497 }
2498
Michael Chanb6016b72005-05-26 13:03:09 -07002499 skb->ip_summed = CHECKSUM_NONE;
2500 if (bp->rx_csum &&
2501 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2502 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2503
Michael Chanade2bfe2006-01-23 16:09:51 -08002504 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2505 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002506 skb->ip_summed = CHECKSUM_UNNECESSARY;
2507 }
2508
2509#ifdef BCM_VLAN
2510 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2511 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2512 rx_hdr->l2_fhdr_vlan_tag);
2513 }
2514 else
2515#endif
2516 netif_receive_skb(skb);
2517
2518 bp->dev->last_rx = jiffies;
2519 rx_pkt++;
2520
2521next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002522 sw_cons = NEXT_RX_BD(sw_cons);
2523 sw_prod = NEXT_RX_BD(sw_prod);
2524
2525 if ((rx_pkt == budget))
2526 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002527
2528 /* Refresh hw_cons to see if there is new work */
2529 if (sw_cons == hw_cons) {
Michael Chanc09c2622007-12-10 17:18:37 -08002530 hw_cons = bnx2_get_hw_rx_cons(bp);
Michael Chanf4e418f2005-11-04 08:53:48 -08002531 rmb();
2532 }
Michael Chanb6016b72005-05-26 13:03:09 -07002533 }
2534 bp->rx_cons = sw_cons;
2535 bp->rx_prod = sw_prod;
2536
2537 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2538
2539 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2540
2541 mmiowb();
2542
2543 return rx_pkt;
2544
2545}
2546
2547/* MSI ISR - The only difference between this and the INTx ISR
2548 * is that the MSI interrupt is always serviced.
2549 */
2550static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002551bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002552{
2553 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002554 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002555
Michael Chanc921e4c2005-09-08 13:15:32 -07002556 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002557 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2558 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2559 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2560
2561 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002562 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2563 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002564
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002565 netif_rx_schedule(dev, &bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07002566
Michael Chan73eef4c2005-08-25 15:39:15 -07002567 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002568}
2569
2570static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002571bnx2_msi_1shot(int irq, void *dev_instance)
2572{
2573 struct net_device *dev = dev_instance;
2574 struct bnx2 *bp = netdev_priv(dev);
2575
2576 prefetch(bp->status_blk);
2577
2578 /* Return here if interrupt is disabled. */
2579 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2580 return IRQ_HANDLED;
2581
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002582 netif_rx_schedule(dev, &bp->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07002583
2584 return IRQ_HANDLED;
2585}
2586
2587static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002588bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002589{
2590 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002591 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002592 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002593
2594 /* When using INTx, it is possible for the interrupt to arrive
2595 * at the CPU before the status block posted prior to the
2596 * interrupt. Reading a register will flush the status block.
2597 * When using MSI, the MSI message will always complete after
2598 * the status block write.
2599 */
Michael Chanb8a7ce72007-07-07 22:51:03 -07002600 if ((sblk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002601 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2602 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002603 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002604
2605 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2606 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2607 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2608
Michael Chanb8a7ce72007-07-07 22:51:03 -07002609 /* Read back to deassert IRQ immediately to avoid too many
2610 * spurious interrupts.
2611 */
2612 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2613
Michael Chanb6016b72005-05-26 13:03:09 -07002614 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002615 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2616 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002617
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002618 if (netif_rx_schedule_prep(dev, &bp->napi)) {
Michael Chanb8a7ce72007-07-07 22:51:03 -07002619 bp->last_status_idx = sblk->status_idx;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002620 __netif_rx_schedule(dev, &bp->napi);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002621 }
Michael Chanb6016b72005-05-26 13:03:09 -07002622
Michael Chan73eef4c2005-08-25 15:39:15 -07002623 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002624}
2625
Michael Chan0d8a6572007-07-07 22:49:43 -07002626#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2627 STATUS_ATTN_BITS_TIMER_ABORT)
Michael Chanda3e4fb2007-05-03 13:24:23 -07002628
Michael Chanf4e418f2005-11-04 08:53:48 -08002629static inline int
2630bnx2_has_work(struct bnx2 *bp)
2631{
2632 struct status_block *sblk = bp->status_blk;
2633
Michael Chanc09c2622007-12-10 17:18:37 -08002634 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
Michael Chanf4e418f2005-11-04 08:53:48 -08002635 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2636 return 1;
2637
Michael Chanda3e4fb2007-05-03 13:24:23 -07002638 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2639 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002640 return 1;
2641
2642 return 0;
2643}
2644
David S. Miller6f535762007-10-11 18:08:29 -07002645static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
Michael Chanb6016b72005-05-26 13:03:09 -07002646{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002647 struct status_block *sblk = bp->status_blk;
2648 u32 status_attn_bits = sblk->status_attn_bits;
2649 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002650
Michael Chanda3e4fb2007-05-03 13:24:23 -07002651 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2652 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002653
Michael Chanb6016b72005-05-26 13:03:09 -07002654 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002655
2656 /* This is needed to take care of transient status
2657 * during link changes.
2658 */
2659 REG_WR(bp, BNX2_HC_COMMAND,
2660 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2661 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002662 }
2663
Michael Chan6dee6422007-10-12 01:40:38 -07002664 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002665 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002666
Michael Chanc09c2622007-12-10 17:18:37 -08002667 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
David S. Miller6f535762007-10-11 18:08:29 -07002668 work_done += bnx2_rx_int(bp, budget - work_done);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002669
David S. Miller6f535762007-10-11 18:08:29 -07002670 return work_done;
2671}
Michael Chanf4e418f2005-11-04 08:53:48 -08002672
David S. Miller6f535762007-10-11 18:08:29 -07002673static int bnx2_poll(struct napi_struct *napi, int budget)
2674{
2675 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2676 int work_done = 0;
Michael Chan6dee6422007-10-12 01:40:38 -07002677 struct status_block *sblk = bp->status_blk;
David S. Miller6f535762007-10-11 18:08:29 -07002678
2679 while (1) {
2680 work_done = bnx2_poll_work(bp, work_done, budget);
2681
2682 if (unlikely(work_done >= budget))
2683 break;
2684
Michael Chan6dee6422007-10-12 01:40:38 -07002685 /* bp->last_status_idx is used below to tell the hw how
2686 * much work has been processed, so we must read it before
2687 * checking for more work.
2688 */
2689 bp->last_status_idx = sblk->status_idx;
2690 rmb();
David S. Miller6f535762007-10-11 18:08:29 -07002691 if (likely(!bnx2_has_work(bp))) {
David S. Miller6f535762007-10-11 18:08:29 -07002692 netif_rx_complete(bp->dev, napi);
2693 if (likely(bp->flags & USING_MSI_FLAG)) {
2694 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2695 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2696 bp->last_status_idx);
Michael Chan6dee6422007-10-12 01:40:38 -07002697 break;
David S. Miller6f535762007-10-11 18:08:29 -07002698 }
2699 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2700 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2701 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2702 bp->last_status_idx);
2703
Michael Chan1269a8a2006-01-23 16:11:03 -08002704 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2705 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2706 bp->last_status_idx);
David S. Miller6f535762007-10-11 18:08:29 -07002707 break;
Michael Chan1269a8a2006-01-23 16:11:03 -08002708 }
Michael Chanb6016b72005-05-26 13:03:09 -07002709 }
2710
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002711 return work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002712}
2713
Herbert Xu932ff272006-06-09 12:20:56 -07002714/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002715 * from set_multicast.
2716 */
2717static void
2718bnx2_set_rx_mode(struct net_device *dev)
2719{
Michael Chan972ec0d2006-01-23 16:12:43 -08002720 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002721 u32 rx_mode, sort_mode;
2722 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002723
Michael Chanc770a652005-08-25 15:38:39 -07002724 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002725
2726 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2727 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2728 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2729#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002730 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002731 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002732#else
Michael Chane29054f2006-01-23 16:06:06 -08002733 if (!(bp->flags & ASF_ENABLE_FLAG))
2734 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002735#endif
2736 if (dev->flags & IFF_PROMISC) {
2737 /* Promiscuous mode. */
2738 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002739 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2740 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002741 }
2742 else if (dev->flags & IFF_ALLMULTI) {
2743 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2744 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2745 0xffffffff);
2746 }
2747 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2748 }
2749 else {
2750 /* Accept one or more multicast(s). */
2751 struct dev_mc_list *mclist;
2752 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2753 u32 regidx;
2754 u32 bit;
2755 u32 crc;
2756
2757 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2758
2759 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2760 i++, mclist = mclist->next) {
2761
2762 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2763 bit = crc & 0xff;
2764 regidx = (bit & 0xe0) >> 5;
2765 bit &= 0x1f;
2766 mc_filter[regidx] |= (1 << bit);
2767 }
2768
2769 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2770 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2771 mc_filter[i]);
2772 }
2773
2774 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2775 }
2776
2777 if (rx_mode != bp->rx_mode) {
2778 bp->rx_mode = rx_mode;
2779 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2780 }
2781
2782 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2783 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2784 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2785
Michael Chanc770a652005-08-25 15:38:39 -07002786 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002787}
2788
2789static void
2790load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2791 u32 rv2p_proc)
2792{
2793 int i;
2794 u32 val;
2795
2796
2797 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002798 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002799 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002800 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002801 rv2p_code++;
2802
2803 if (rv2p_proc == RV2P_PROC1) {
2804 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2805 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2806 }
2807 else {
2808 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2809 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2810 }
2811 }
2812
2813 /* Reset the processor, un-stall is done later. */
2814 if (rv2p_proc == RV2P_PROC1) {
2815 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2816 }
2817 else {
2818 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2819 }
2820}
2821
Michael Chanaf3ee512006-11-19 14:09:25 -08002822static int
Michael Chanb6016b72005-05-26 13:03:09 -07002823load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2824{
2825 u32 offset;
2826 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002827 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002828
2829 /* Halt the CPU. */
2830 val = REG_RD_IND(bp, cpu_reg->mode);
2831 val |= cpu_reg->mode_value_halt;
2832 REG_WR_IND(bp, cpu_reg->mode, val);
2833 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2834
2835 /* Load the Text area. */
2836 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002837 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002838 int j;
2839
Michael Chanea1f8d52007-10-02 16:27:35 -07002840 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2841 fw->gz_text_len);
2842 if (rc < 0)
Denys Vlasenkob3448b02007-09-30 17:55:51 -07002843 return rc;
Michael Chanea1f8d52007-10-02 16:27:35 -07002844
Michael Chanb6016b72005-05-26 13:03:09 -07002845 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07002846 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002847 }
2848 }
2849
2850 /* Load the Data area. */
2851 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2852 if (fw->data) {
2853 int j;
2854
2855 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2856 REG_WR_IND(bp, offset, fw->data[j]);
2857 }
2858 }
2859
2860 /* Load the SBSS area. */
2861 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07002862 if (fw->sbss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07002863 int j;
2864
2865 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07002866 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002867 }
2868 }
2869
2870 /* Load the BSS area. */
2871 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07002872 if (fw->bss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07002873 int j;
2874
2875 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07002876 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002877 }
2878 }
2879
2880 /* Load the Read-Only area. */
2881 offset = cpu_reg->spad_base +
2882 (fw->rodata_addr - cpu_reg->mips_view_base);
2883 if (fw->rodata) {
2884 int j;
2885
2886 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2887 REG_WR_IND(bp, offset, fw->rodata[j]);
2888 }
2889 }
2890
2891 /* Clear the pre-fetch instruction. */
2892 REG_WR_IND(bp, cpu_reg->inst, 0);
2893 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2894
2895 /* Start the CPU. */
2896 val = REG_RD_IND(bp, cpu_reg->mode);
2897 val &= ~cpu_reg->mode_value_halt;
2898 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2899 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002900
2901 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002902}
2903
Michael Chanfba9fe92006-06-12 22:21:25 -07002904static int
Michael Chanb6016b72005-05-26 13:03:09 -07002905bnx2_init_cpus(struct bnx2 *bp)
2906{
2907 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002908 struct fw_info *fw;
Denys Vlasenkob3448b02007-09-30 17:55:51 -07002909 int rc;
Michael Chanfba9fe92006-06-12 22:21:25 -07002910 void *text;
Michael Chanb6016b72005-05-26 13:03:09 -07002911
2912 /* Initialize the RV2P processor. */
Denys Vlasenkob3448b02007-09-30 17:55:51 -07002913 text = vmalloc(FW_BUF_SIZE);
2914 if (!text)
2915 return -ENOMEM;
Denys Vlasenko83367932007-09-30 17:56:49 -07002916 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
Michael Chanea1f8d52007-10-02 16:27:35 -07002917 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07002918 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07002919
Denys Vlasenkob3448b02007-09-30 17:55:51 -07002920 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
Michael Chanfba9fe92006-06-12 22:21:25 -07002921
Denys Vlasenko83367932007-09-30 17:56:49 -07002922 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
Michael Chanea1f8d52007-10-02 16:27:35 -07002923 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07002924 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07002925
Denys Vlasenkob3448b02007-09-30 17:55:51 -07002926 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002927
2928 /* Initialize the RX Processor. */
2929 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2930 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2931 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2932 cpu_reg.state = BNX2_RXP_CPU_STATE;
2933 cpu_reg.state_value_clear = 0xffffff;
2934 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2935 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2936 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2937 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2938 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2939 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2940 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002941
Michael Chand43584c2006-11-19 14:14:35 -08002942 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2943 fw = &bnx2_rxp_fw_09;
2944 else
2945 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002946
Michael Chanea1f8d52007-10-02 16:27:35 -07002947 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08002948 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002949 if (rc)
2950 goto init_cpu_err;
2951
Michael Chanb6016b72005-05-26 13:03:09 -07002952 /* Initialize the TX Processor. */
2953 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2954 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2955 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2956 cpu_reg.state = BNX2_TXP_CPU_STATE;
2957 cpu_reg.state_value_clear = 0xffffff;
2958 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2959 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2960 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2961 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2962 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2963 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2964 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002965
Michael Chand43584c2006-11-19 14:14:35 -08002966 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2967 fw = &bnx2_txp_fw_09;
2968 else
2969 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002970
Michael Chanea1f8d52007-10-02 16:27:35 -07002971 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08002972 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002973 if (rc)
2974 goto init_cpu_err;
2975
Michael Chanb6016b72005-05-26 13:03:09 -07002976 /* Initialize the TX Patch-up Processor. */
2977 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2978 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2979 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2980 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2981 cpu_reg.state_value_clear = 0xffffff;
2982 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2983 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2984 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2985 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2986 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2987 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2988 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002989
Michael Chand43584c2006-11-19 14:14:35 -08002990 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2991 fw = &bnx2_tpat_fw_09;
2992 else
2993 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002994
Michael Chanea1f8d52007-10-02 16:27:35 -07002995 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08002996 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002997 if (rc)
2998 goto init_cpu_err;
2999
Michael Chanb6016b72005-05-26 13:03:09 -07003000 /* Initialize the Completion Processor. */
3001 cpu_reg.mode = BNX2_COM_CPU_MODE;
3002 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3003 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3004 cpu_reg.state = BNX2_COM_CPU_STATE;
3005 cpu_reg.state_value_clear = 0xffffff;
3006 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3007 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3008 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3009 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3010 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3011 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3012 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003013
Michael Chand43584c2006-11-19 14:14:35 -08003014 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015 fw = &bnx2_com_fw_09;
3016 else
3017 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003018
Michael Chanea1f8d52007-10-02 16:27:35 -07003019 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003020 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003021 if (rc)
3022 goto init_cpu_err;
3023
Michael Chand43584c2006-11-19 14:14:35 -08003024 /* Initialize the Command Processor. */
3025 cpu_reg.mode = BNX2_CP_CPU_MODE;
3026 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3027 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3028 cpu_reg.state = BNX2_CP_CPU_STATE;
3029 cpu_reg.state_value_clear = 0xffffff;
3030 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3031 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3032 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3033 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3034 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3035 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3036 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07003037
Michael Chand43584c2006-11-19 14:14:35 -08003038 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3039 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07003040
Michael Chanea1f8d52007-10-02 16:27:35 -07003041 fw->text = text;
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08003042 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08003043 if (rc)
3044 goto init_cpu_err;
3045 }
Michael Chanfba9fe92006-06-12 22:21:25 -07003046init_cpu_err:
Michael Chanea1f8d52007-10-02 16:27:35 -07003047 vfree(text);
Michael Chanfba9fe92006-06-12 22:21:25 -07003048 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003049}
3050
3051static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07003052bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07003053{
3054 u16 pmcsr;
3055
3056 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3057
3058 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07003059 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07003060 u32 val;
3061
3062 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3063 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3064 PCI_PM_CTRL_PME_STATUS);
3065
3066 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3067 /* delay required during transition out of D3hot */
3068 msleep(20);
3069
3070 val = REG_RD(bp, BNX2_EMAC_MODE);
3071 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3072 val &= ~BNX2_EMAC_MODE_MPKT;
3073 REG_WR(bp, BNX2_EMAC_MODE, val);
3074
3075 val = REG_RD(bp, BNX2_RPM_CONFIG);
3076 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3077 REG_WR(bp, BNX2_RPM_CONFIG, val);
3078 break;
3079 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07003080 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07003081 int i;
3082 u32 val, wol_msg;
3083
3084 if (bp->wol) {
3085 u32 advertising;
3086 u8 autoneg;
3087
3088 autoneg = bp->autoneg;
3089 advertising = bp->advertising;
3090
Michael Chan239cd342007-10-17 19:26:15 -07003091 if (bp->phy_port == PORT_TP) {
3092 bp->autoneg = AUTONEG_SPEED;
3093 bp->advertising = ADVERTISED_10baseT_Half |
3094 ADVERTISED_10baseT_Full |
3095 ADVERTISED_100baseT_Half |
3096 ADVERTISED_100baseT_Full |
3097 ADVERTISED_Autoneg;
3098 }
Michael Chanb6016b72005-05-26 13:03:09 -07003099
Michael Chan239cd342007-10-17 19:26:15 -07003100 spin_lock_bh(&bp->phy_lock);
3101 bnx2_setup_phy(bp, bp->phy_port);
3102 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003103
3104 bp->autoneg = autoneg;
3105 bp->advertising = advertising;
3106
3107 bnx2_set_mac_addr(bp);
3108
3109 val = REG_RD(bp, BNX2_EMAC_MODE);
3110
3111 /* Enable port mode. */
3112 val &= ~BNX2_EMAC_MODE_PORT;
Michael Chan239cd342007-10-17 19:26:15 -07003113 val |= BNX2_EMAC_MODE_MPKT_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003114 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003115 BNX2_EMAC_MODE_MPKT;
Michael Chan239cd342007-10-17 19:26:15 -07003116 if (bp->phy_port == PORT_TP)
3117 val |= BNX2_EMAC_MODE_PORT_MII;
3118 else {
3119 val |= BNX2_EMAC_MODE_PORT_GMII;
3120 if (bp->line_speed == SPEED_2500)
3121 val |= BNX2_EMAC_MODE_25G_MODE;
3122 }
Michael Chanb6016b72005-05-26 13:03:09 -07003123
3124 REG_WR(bp, BNX2_EMAC_MODE, val);
3125
3126 /* receive all multicast */
3127 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3128 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3129 0xffffffff);
3130 }
3131 REG_WR(bp, BNX2_EMAC_RX_MODE,
3132 BNX2_EMAC_RX_MODE_SORT_MODE);
3133
3134 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3135 BNX2_RPM_SORT_USER0_MC_EN;
3136 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3137 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3138 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3139 BNX2_RPM_SORT_USER0_ENA);
3140
3141 /* Need to enable EMAC and RPM for WOL. */
3142 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3143 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3144 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3145 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3146
3147 val = REG_RD(bp, BNX2_RPM_CONFIG);
3148 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3149 REG_WR(bp, BNX2_RPM_CONFIG, val);
3150
3151 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3152 }
3153 else {
3154 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3155 }
3156
Michael Chandda1e392006-01-23 16:08:14 -08003157 if (!(bp->flags & NO_WOL_FLAG))
3158 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003159
3160 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3161 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3162 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3163
3164 if (bp->wol)
3165 pmcsr |= 3;
3166 }
3167 else {
3168 pmcsr |= 3;
3169 }
3170 if (bp->wol) {
3171 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3172 }
3173 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3174 pmcsr);
3175
3176 /* No more memory access after this point until
3177 * device is brought back to D0.
3178 */
3179 udelay(50);
3180 break;
3181 }
3182 default:
3183 return -EINVAL;
3184 }
3185 return 0;
3186}
3187
3188static int
3189bnx2_acquire_nvram_lock(struct bnx2 *bp)
3190{
3191 u32 val;
3192 int j;
3193
3194 /* Request access to the flash interface. */
3195 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3196 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3197 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3198 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3199 break;
3200
3201 udelay(5);
3202 }
3203
3204 if (j >= NVRAM_TIMEOUT_COUNT)
3205 return -EBUSY;
3206
3207 return 0;
3208}
3209
3210static int
3211bnx2_release_nvram_lock(struct bnx2 *bp)
3212{
3213 int j;
3214 u32 val;
3215
3216 /* Relinquish nvram interface. */
3217 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3218
3219 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3220 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3221 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3222 break;
3223
3224 udelay(5);
3225 }
3226
3227 if (j >= NVRAM_TIMEOUT_COUNT)
3228 return -EBUSY;
3229
3230 return 0;
3231}
3232
3233
3234static int
3235bnx2_enable_nvram_write(struct bnx2 *bp)
3236{
3237 u32 val;
3238
3239 val = REG_RD(bp, BNX2_MISC_CFG);
3240 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3241
Michael Chane30372c2007-07-16 18:26:23 -07003242 if (bp->flash_info->flags & BNX2_NV_WREN) {
Michael Chanb6016b72005-05-26 13:03:09 -07003243 int j;
3244
3245 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3246 REG_WR(bp, BNX2_NVM_COMMAND,
3247 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3248
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250 udelay(5);
3251
3252 val = REG_RD(bp, BNX2_NVM_COMMAND);
3253 if (val & BNX2_NVM_COMMAND_DONE)
3254 break;
3255 }
3256
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3258 return -EBUSY;
3259 }
3260 return 0;
3261}
3262
3263static void
3264bnx2_disable_nvram_write(struct bnx2 *bp)
3265{
3266 u32 val;
3267
3268 val = REG_RD(bp, BNX2_MISC_CFG);
3269 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3270}
3271
3272
3273static void
3274bnx2_enable_nvram_access(struct bnx2 *bp)
3275{
3276 u32 val;
3277
3278 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3279 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003280 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003281 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3282}
3283
3284static void
3285bnx2_disable_nvram_access(struct bnx2 *bp)
3286{
3287 u32 val;
3288
3289 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003291 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003292 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3293 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3294}
3295
3296static int
3297bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3298{
3299 u32 cmd;
3300 int j;
3301
Michael Chane30372c2007-07-16 18:26:23 -07003302 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
Michael Chanb6016b72005-05-26 13:03:09 -07003303 /* Buffered flash, no erase needed */
3304 return 0;
3305
3306 /* Build an erase command */
3307 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3308 BNX2_NVM_COMMAND_DOIT;
3309
3310 /* Need to clear DONE bit separately. */
3311 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3312
3313 /* Address of the NVRAM to read from. */
3314 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3315
3316 /* Issue an erase command. */
3317 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3318
3319 /* Wait for completion. */
3320 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3321 u32 val;
3322
3323 udelay(5);
3324
3325 val = REG_RD(bp, BNX2_NVM_COMMAND);
3326 if (val & BNX2_NVM_COMMAND_DONE)
3327 break;
3328 }
3329
3330 if (j >= NVRAM_TIMEOUT_COUNT)
3331 return -EBUSY;
3332
3333 return 0;
3334}
3335
3336static int
3337bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3338{
3339 u32 cmd;
3340 int j;
3341
3342 /* Build the command word. */
3343 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3344
Michael Chane30372c2007-07-16 18:26:23 -07003345 /* Calculate an offset of a buffered flash, not needed for 5709. */
3346 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003347 offset = ((offset / bp->flash_info->page_size) <<
3348 bp->flash_info->page_bits) +
3349 (offset % bp->flash_info->page_size);
3350 }
3351
3352 /* Need to clear DONE bit separately. */
3353 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3354
3355 /* Address of the NVRAM to read from. */
3356 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3357
3358 /* Issue a read command. */
3359 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3360
3361 /* Wait for completion. */
3362 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3363 u32 val;
3364
3365 udelay(5);
3366
3367 val = REG_RD(bp, BNX2_NVM_COMMAND);
3368 if (val & BNX2_NVM_COMMAND_DONE) {
3369 val = REG_RD(bp, BNX2_NVM_READ);
3370
3371 val = be32_to_cpu(val);
3372 memcpy(ret_val, &val, 4);
3373 break;
3374 }
3375 }
3376 if (j >= NVRAM_TIMEOUT_COUNT)
3377 return -EBUSY;
3378
3379 return 0;
3380}
3381
3382
3383static int
3384bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3385{
3386 u32 cmd, val32;
3387 int j;
3388
3389 /* Build the command word. */
3390 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3391
Michael Chane30372c2007-07-16 18:26:23 -07003392 /* Calculate an offset of a buffered flash, not needed for 5709. */
3393 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003394 offset = ((offset / bp->flash_info->page_size) <<
3395 bp->flash_info->page_bits) +
3396 (offset % bp->flash_info->page_size);
3397 }
3398
3399 /* Need to clear DONE bit separately. */
3400 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3401
3402 memcpy(&val32, val, 4);
3403 val32 = cpu_to_be32(val32);
3404
3405 /* Write the data. */
3406 REG_WR(bp, BNX2_NVM_WRITE, val32);
3407
3408 /* Address of the NVRAM to write to. */
3409 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3410
3411 /* Issue the write command. */
3412 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3413
3414 /* Wait for completion. */
3415 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3416 udelay(5);
3417
3418 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3419 break;
3420 }
3421 if (j >= NVRAM_TIMEOUT_COUNT)
3422 return -EBUSY;
3423
3424 return 0;
3425}
3426
3427static int
3428bnx2_init_nvram(struct bnx2 *bp)
3429{
3430 u32 val;
Michael Chane30372c2007-07-16 18:26:23 -07003431 int j, entry_count, rc = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003432 struct flash_spec *flash;
3433
Michael Chane30372c2007-07-16 18:26:23 -07003434 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3435 bp->flash_info = &flash_5709;
3436 goto get_flash_size;
3437 }
3438
Michael Chanb6016b72005-05-26 13:03:09 -07003439 /* Determine the selected interface. */
3440 val = REG_RD(bp, BNX2_NVM_CFG1);
3441
Denis Chengff8ac602007-09-02 18:30:18 +08003442 entry_count = ARRAY_SIZE(flash_table);
Michael Chanb6016b72005-05-26 13:03:09 -07003443
Michael Chanb6016b72005-05-26 13:03:09 -07003444 if (val & 0x40000000) {
3445
3446 /* Flash interface has been reconfigured */
3447 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003448 j++, flash++) {
3449 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3450 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003451 bp->flash_info = flash;
3452 break;
3453 }
3454 }
3455 }
3456 else {
Michael Chan37137702005-11-04 08:49:17 -08003457 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003458 /* Not yet been reconfigured */
3459
Michael Chan37137702005-11-04 08:49:17 -08003460 if (val & (1 << 23))
3461 mask = FLASH_BACKUP_STRAP_MASK;
3462 else
3463 mask = FLASH_STRAP_MASK;
3464
Michael Chanb6016b72005-05-26 13:03:09 -07003465 for (j = 0, flash = &flash_table[0]; j < entry_count;
3466 j++, flash++) {
3467
Michael Chan37137702005-11-04 08:49:17 -08003468 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003469 bp->flash_info = flash;
3470
3471 /* Request access to the flash interface. */
3472 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3473 return rc;
3474
3475 /* Enable access to flash interface */
3476 bnx2_enable_nvram_access(bp);
3477
3478 /* Reconfigure the flash interface */
3479 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3480 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3481 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3482 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3483
3484 /* Disable access to flash interface */
3485 bnx2_disable_nvram_access(bp);
3486 bnx2_release_nvram_lock(bp);
3487
3488 break;
3489 }
3490 }
3491 } /* if (val & 0x40000000) */
3492
3493 if (j == entry_count) {
3494 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003495 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003496 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003497 }
3498
Michael Chane30372c2007-07-16 18:26:23 -07003499get_flash_size:
Michael Chan1122db72006-01-23 16:11:42 -08003500 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3501 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3502 if (val)
3503 bp->flash_size = val;
3504 else
3505 bp->flash_size = bp->flash_info->total_size;
3506
Michael Chanb6016b72005-05-26 13:03:09 -07003507 return rc;
3508}
3509
3510static int
3511bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3512 int buf_size)
3513{
3514 int rc = 0;
3515 u32 cmd_flags, offset32, len32, extra;
3516
3517 if (buf_size == 0)
3518 return 0;
3519
3520 /* Request access to the flash interface. */
3521 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3522 return rc;
3523
3524 /* Enable access to flash interface */
3525 bnx2_enable_nvram_access(bp);
3526
3527 len32 = buf_size;
3528 offset32 = offset;
3529 extra = 0;
3530
3531 cmd_flags = 0;
3532
3533 if (offset32 & 3) {
3534 u8 buf[4];
3535 u32 pre_len;
3536
3537 offset32 &= ~3;
3538 pre_len = 4 - (offset & 3);
3539
3540 if (pre_len >= len32) {
3541 pre_len = len32;
3542 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3543 BNX2_NVM_COMMAND_LAST;
3544 }
3545 else {
3546 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3547 }
3548
3549 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3550
3551 if (rc)
3552 return rc;
3553
3554 memcpy(ret_buf, buf + (offset & 3), pre_len);
3555
3556 offset32 += 4;
3557 ret_buf += pre_len;
3558 len32 -= pre_len;
3559 }
3560 if (len32 & 3) {
3561 extra = 4 - (len32 & 3);
3562 len32 = (len32 + 4) & ~3;
3563 }
3564
3565 if (len32 == 4) {
3566 u8 buf[4];
3567
3568 if (cmd_flags)
3569 cmd_flags = BNX2_NVM_COMMAND_LAST;
3570 else
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3572 BNX2_NVM_COMMAND_LAST;
3573
3574 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3575
3576 memcpy(ret_buf, buf, 4 - extra);
3577 }
3578 else if (len32 > 0) {
3579 u8 buf[4];
3580
3581 /* Read the first word. */
3582 if (cmd_flags)
3583 cmd_flags = 0;
3584 else
3585 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3586
3587 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3588
3589 /* Advance to the next dword. */
3590 offset32 += 4;
3591 ret_buf += 4;
3592 len32 -= 4;
3593
3594 while (len32 > 4 && rc == 0) {
3595 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3596
3597 /* Advance to the next dword. */
3598 offset32 += 4;
3599 ret_buf += 4;
3600 len32 -= 4;
3601 }
3602
3603 if (rc)
3604 return rc;
3605
3606 cmd_flags = BNX2_NVM_COMMAND_LAST;
3607 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3608
3609 memcpy(ret_buf, buf, 4 - extra);
3610 }
3611
3612 /* Disable access to flash interface */
3613 bnx2_disable_nvram_access(bp);
3614
3615 bnx2_release_nvram_lock(bp);
3616
3617 return rc;
3618}
3619
3620static int
3621bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3622 int buf_size)
3623{
3624 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003625 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003626 int rc = 0;
3627 int align_start, align_end;
3628
3629 buf = data_buf;
3630 offset32 = offset;
3631 len32 = buf_size;
3632 align_start = align_end = 0;
3633
3634 if ((align_start = (offset32 & 3))) {
3635 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003636 len32 += align_start;
3637 if (len32 < 4)
3638 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003639 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3640 return rc;
3641 }
3642
3643 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003644 align_end = 4 - (len32 & 3);
3645 len32 += align_end;
3646 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3647 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003648 }
3649
3650 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003651 align_buf = kmalloc(len32, GFP_KERNEL);
3652 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003653 return -ENOMEM;
3654 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003655 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003656 }
3657 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003658 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003659 }
Michael Chane6be7632007-01-08 19:56:13 -08003660 memcpy(align_buf + align_start, data_buf, buf_size);
3661 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003662 }
3663
Michael Chane30372c2007-07-16 18:26:23 -07003664 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanae181bc2006-05-22 16:39:20 -07003665 flash_buffer = kmalloc(264, GFP_KERNEL);
3666 if (flash_buffer == NULL) {
3667 rc = -ENOMEM;
3668 goto nvram_write_end;
3669 }
3670 }
3671
Michael Chanb6016b72005-05-26 13:03:09 -07003672 written = 0;
3673 while ((written < len32) && (rc == 0)) {
3674 u32 page_start, page_end, data_start, data_end;
3675 u32 addr, cmd_flags;
3676 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003677
3678 /* Find the page_start addr */
3679 page_start = offset32 + written;
3680 page_start -= (page_start % bp->flash_info->page_size);
3681 /* Find the page_end addr */
3682 page_end = page_start + bp->flash_info->page_size;
3683 /* Find the data_start addr */
3684 data_start = (written == 0) ? offset32 : page_start;
3685 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003686 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003687 (offset32 + len32) : page_end;
3688
3689 /* Request access to the flash interface. */
3690 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3691 goto nvram_write_end;
3692
3693 /* Enable access to flash interface */
3694 bnx2_enable_nvram_access(bp);
3695
3696 cmd_flags = BNX2_NVM_COMMAND_FIRST;
Michael Chane30372c2007-07-16 18:26:23 -07003697 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003698 int j;
3699
3700 /* Read the whole page into the buffer
3701 * (non-buffer flash only) */
3702 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3703 if (j == (bp->flash_info->page_size - 4)) {
3704 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3705 }
3706 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003707 page_start + j,
3708 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003709 cmd_flags);
3710
3711 if (rc)
3712 goto nvram_write_end;
3713
3714 cmd_flags = 0;
3715 }
3716 }
3717
3718 /* Enable writes to flash interface (unlock write-protect) */
3719 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3720 goto nvram_write_end;
3721
Michael Chanb6016b72005-05-26 13:03:09 -07003722 /* Loop to write back the buffer data from page_start to
3723 * data_start */
3724 i = 0;
Michael Chane30372c2007-07-16 18:26:23 -07003725 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanc8738792007-03-30 14:53:06 -07003726 /* Erase the page */
3727 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3728 goto nvram_write_end;
3729
3730 /* Re-enable the write again for the actual write */
3731 bnx2_enable_nvram_write(bp);
3732
Michael Chanb6016b72005-05-26 13:03:09 -07003733 for (addr = page_start; addr < data_start;
3734 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003735
Michael Chanb6016b72005-05-26 13:03:09 -07003736 rc = bnx2_nvram_write_dword(bp, addr,
3737 &flash_buffer[i], cmd_flags);
3738
3739 if (rc != 0)
3740 goto nvram_write_end;
3741
3742 cmd_flags = 0;
3743 }
3744 }
3745
3746 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003747 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003748 if ((addr == page_end - 4) ||
Michael Chane30372c2007-07-16 18:26:23 -07003749 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
Michael Chanb6016b72005-05-26 13:03:09 -07003750 (addr == data_end - 4))) {
3751
3752 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3753 }
3754 rc = bnx2_nvram_write_dword(bp, addr, buf,
3755 cmd_flags);
3756
3757 if (rc != 0)
3758 goto nvram_write_end;
3759
3760 cmd_flags = 0;
3761 buf += 4;
3762 }
3763
3764 /* Loop to write back the buffer data from data_end
3765 * to page_end */
Michael Chane30372c2007-07-16 18:26:23 -07003766 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003767 for (addr = data_end; addr < page_end;
3768 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003769
Michael Chanb6016b72005-05-26 13:03:09 -07003770 if (addr == page_end-4) {
3771 cmd_flags = BNX2_NVM_COMMAND_LAST;
3772 }
3773 rc = bnx2_nvram_write_dword(bp, addr,
3774 &flash_buffer[i], cmd_flags);
3775
3776 if (rc != 0)
3777 goto nvram_write_end;
3778
3779 cmd_flags = 0;
3780 }
3781 }
3782
3783 /* Disable writes to flash interface (lock write-protect) */
3784 bnx2_disable_nvram_write(bp);
3785
3786 /* Disable access to flash interface */
3787 bnx2_disable_nvram_access(bp);
3788 bnx2_release_nvram_lock(bp);
3789
3790 /* Increment written */
3791 written += data_end - data_start;
3792 }
3793
3794nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003795 kfree(flash_buffer);
3796 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003797 return rc;
3798}
3799
Michael Chan0d8a6572007-07-07 22:49:43 -07003800static void
3801bnx2_init_remote_phy(struct bnx2 *bp)
3802{
3803 u32 val;
3804
3805 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3806 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3807 return;
3808
3809 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3810 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3811 return;
3812
3813 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
Michael Chan0d8a6572007-07-07 22:49:43 -07003814 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3815
3816 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3817 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3818 bp->phy_port = PORT_FIBRE;
3819 else
3820 bp->phy_port = PORT_TP;
Michael Chan489310a2007-10-10 16:16:31 -07003821
3822 if (netif_running(bp->dev)) {
3823 u32 sig;
3824
3825 if (val & BNX2_LINK_STATUS_LINK_UP) {
3826 bp->link_up = 1;
3827 netif_carrier_on(bp->dev);
3828 } else {
3829 bp->link_up = 0;
3830 netif_carrier_off(bp->dev);
3831 }
3832 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3833 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3834 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3835 sig);
3836 }
Michael Chan0d8a6572007-07-07 22:49:43 -07003837 }
3838}
3839
Michael Chanb6016b72005-05-26 13:03:09 -07003840static int
3841bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3842{
3843 u32 val;
3844 int i, rc = 0;
Michael Chan489310a2007-10-10 16:16:31 -07003845 u8 old_port;
Michael Chanb6016b72005-05-26 13:03:09 -07003846
3847 /* Wait for the current PCI transaction to complete before
3848 * issuing a reset. */
3849 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3850 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3851 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3852 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3853 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3854 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3855 udelay(5);
3856
Michael Chanb090ae22006-01-23 16:07:10 -08003857 /* Wait for the firmware to tell us it is ok to issue a reset. */
3858 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3859
Michael Chanb6016b72005-05-26 13:03:09 -07003860 /* Deposit a driver reset signature so the firmware knows that
3861 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003862 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003863 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3864
Michael Chanb6016b72005-05-26 13:03:09 -07003865 /* Do a dummy read to force the chip to complete all current transaction
3866 * before we issue a reset. */
3867 val = REG_RD(bp, BNX2_MISC_ID);
3868
Michael Chan234754d2006-11-19 14:11:41 -08003869 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3870 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3871 REG_RD(bp, BNX2_MISC_COMMAND);
3872 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003873
Michael Chan234754d2006-11-19 14:11:41 -08003874 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3875 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003876
Michael Chan234754d2006-11-19 14:11:41 -08003877 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003878
Michael Chan234754d2006-11-19 14:11:41 -08003879 } else {
3880 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3881 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3882 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3883
3884 /* Chip reset. */
3885 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3886
Michael Chan594a9df2007-08-28 15:39:42 -07003887 /* Reading back any register after chip reset will hang the
3888 * bus on 5706 A0 and A1. The msleep below provides plenty
3889 * of margin for write posting.
3890 */
Michael Chan234754d2006-11-19 14:11:41 -08003891 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
Arjan van de Ven8e545882007-08-28 14:34:43 -07003892 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3893 msleep(20);
Michael Chanb6016b72005-05-26 13:03:09 -07003894
Michael Chan234754d2006-11-19 14:11:41 -08003895 /* Reset takes approximate 30 usec */
3896 for (i = 0; i < 10; i++) {
3897 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3898 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3899 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3900 break;
3901 udelay(10);
3902 }
3903
3904 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3905 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3906 printk(KERN_ERR PFX "Chip reset did not complete\n");
3907 return -EBUSY;
3908 }
Michael Chanb6016b72005-05-26 13:03:09 -07003909 }
3910
3911 /* Make sure byte swapping is properly configured. */
3912 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3913 if (val != 0x01020304) {
3914 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3915 return -ENODEV;
3916 }
3917
Michael Chanb6016b72005-05-26 13:03:09 -07003918 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003919 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3920 if (rc)
3921 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003922
Michael Chan0d8a6572007-07-07 22:49:43 -07003923 spin_lock_bh(&bp->phy_lock);
Michael Chan489310a2007-10-10 16:16:31 -07003924 old_port = bp->phy_port;
Michael Chan0d8a6572007-07-07 22:49:43 -07003925 bnx2_init_remote_phy(bp);
Michael Chan489310a2007-10-10 16:16:31 -07003926 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
Michael Chan0d8a6572007-07-07 22:49:43 -07003927 bnx2_set_default_remote_link(bp);
3928 spin_unlock_bh(&bp->phy_lock);
3929
Michael Chanb6016b72005-05-26 13:03:09 -07003930 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3931 /* Adjust the voltage regular to two steps lower. The default
3932 * of this register is 0x0000000e. */
3933 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3934
3935 /* Remove bad rbuf memory from the free pool. */
3936 rc = bnx2_alloc_bad_rbuf(bp);
3937 }
3938
3939 return rc;
3940}
3941
3942static int
3943bnx2_init_chip(struct bnx2 *bp)
3944{
3945 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003946 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003947
3948 /* Make sure the interrupt is not active. */
3949 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3950
3951 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3952 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3953#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003954 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003955#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003956 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003957 DMA_READ_CHANS << 12 |
3958 DMA_WRITE_CHANS << 16;
3959
3960 val |= (0x2 << 20) | (1 << 11);
3961
Michael Chandda1e392006-01-23 16:08:14 -08003962 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003963 val |= (1 << 23);
3964
3965 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3966 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3967 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3968
3969 REG_WR(bp, BNX2_DMA_CONFIG, val);
3970
3971 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3972 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3973 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3974 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3975 }
3976
3977 if (bp->flags & PCIX_FLAG) {
3978 u16 val16;
3979
3980 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3981 &val16);
3982 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3983 val16 & ~PCI_X_CMD_ERO);
3984 }
3985
3986 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3987 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3988 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3989 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3990
3991 /* Initialize context mapping and zero out the quick contexts. The
3992 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07003993 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3994 rc = bnx2_init_5709_context(bp);
3995 if (rc)
3996 return rc;
3997 } else
Michael Chan59b47d82006-11-19 14:10:45 -08003998 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003999
Michael Chanfba9fe92006-06-12 22:21:25 -07004000 if ((rc = bnx2_init_cpus(bp)) != 0)
4001 return rc;
4002
Michael Chanb6016b72005-05-26 13:03:09 -07004003 bnx2_init_nvram(bp);
4004
4005 bnx2_set_mac_addr(bp);
4006
4007 val = REG_RD(bp, BNX2_MQ_CONFIG);
4008 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4009 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07004010 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4011 val |= BNX2_MQ_CONFIG_HALT_DIS;
4012
Michael Chanb6016b72005-05-26 13:03:09 -07004013 REG_WR(bp, BNX2_MQ_CONFIG, val);
4014
4015 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4016 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4017 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4018
4019 val = (BCM_PAGE_BITS - 8) << 24;
4020 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4021
4022 /* Configure page size. */
4023 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4024 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4025 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4026 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4027
4028 val = bp->mac_addr[0] +
4029 (bp->mac_addr[1] << 8) +
4030 (bp->mac_addr[2] << 16) +
4031 bp->mac_addr[3] +
4032 (bp->mac_addr[4] << 8) +
4033 (bp->mac_addr[5] << 16);
4034 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4035
4036 /* Program the MTU. Also include 4 bytes for CRC32. */
4037 val = bp->dev->mtu + ETH_HLEN + 4;
4038 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4039 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4040 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4041
4042 bp->last_status_idx = 0;
4043 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4044
4045 /* Set up how to generate a link change interrupt. */
4046 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4047
4048 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4049 (u64) bp->status_blk_mapping & 0xffffffff);
4050 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4051
4052 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4053 (u64) bp->stats_blk_mapping & 0xffffffff);
4054 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4055 (u64) bp->stats_blk_mapping >> 32);
4056
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004057 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07004058 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4059
4060 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4061 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4062
4063 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4064 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4065
4066 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4067
4068 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4069
4070 REG_WR(bp, BNX2_HC_COM_TICKS,
4071 (bp->com_ticks_int << 16) | bp->com_ticks);
4072
4073 REG_WR(bp, BNX2_HC_CMD_TICKS,
4074 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4075
Michael Chan02537b062007-06-04 21:24:07 -07004076 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4077 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4078 else
Michael Chan7ea69202007-07-16 18:27:10 -07004079 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
Michael Chanb6016b72005-05-26 13:03:09 -07004080 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4081
4082 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07004083 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004084 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004085 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4086 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004087 }
4088
Michael Chan8e6a72c2007-05-03 13:24:48 -07004089 if (bp->flags & ONE_SHOT_MSI_FLAG)
4090 val |= BNX2_HC_CONFIG_ONE_SHOT;
4091
4092 REG_WR(bp, BNX2_HC_CONFIG, val);
4093
Michael Chanb6016b72005-05-26 13:03:09 -07004094 /* Clear internal stats counters. */
4095 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4096
Michael Chanda3e4fb2007-05-03 13:24:23 -07004097 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07004098
4099 /* Initialize the receive filter. */
4100 bnx2_set_rx_mode(bp->dev);
4101
Michael Chan0aa38df2007-06-04 21:23:06 -07004102 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4103 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4104 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4105 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4106 }
Michael Chanb090ae22006-01-23 16:07:10 -08004107 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4108 0);
Michael Chanb6016b72005-05-26 13:03:09 -07004109
Michael Chandf149d72007-07-07 22:51:36 -07004110 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
Michael Chanb6016b72005-05-26 13:03:09 -07004111 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4112
4113 udelay(20);
4114
Michael Chanbf5295b2006-03-23 01:11:56 -08004115 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4116
Michael Chanb090ae22006-01-23 16:07:10 -08004117 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004118}
4119
Michael Chan59b47d82006-11-19 14:10:45 -08004120static void
4121bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4122{
4123 u32 val, offset0, offset1, offset2, offset3;
4124
4125 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4126 offset0 = BNX2_L2CTX_TYPE_XI;
4127 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4128 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4129 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4130 } else {
4131 offset0 = BNX2_L2CTX_TYPE;
4132 offset1 = BNX2_L2CTX_CMD_TYPE;
4133 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4134 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4135 }
4136 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4137 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4138
4139 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4140 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4141
4142 val = (u64) bp->tx_desc_mapping >> 32;
4143 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4144
4145 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4146 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4147}
Michael Chanb6016b72005-05-26 13:03:09 -07004148
4149static void
4150bnx2_init_tx_ring(struct bnx2 *bp)
4151{
4152 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08004153 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07004154
Michael Chan2f8af122006-08-15 01:39:10 -07004155 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4156
Michael Chanb6016b72005-05-26 13:03:09 -07004157 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004158
Michael Chanb6016b72005-05-26 13:03:09 -07004159 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4160 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4161
4162 bp->tx_prod = 0;
4163 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08004164 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004165 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004166
Michael Chan59b47d82006-11-19 14:10:45 -08004167 cid = TX_CID;
4168 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4169 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07004170
Michael Chan59b47d82006-11-19 14:10:45 -08004171 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07004172}
4173
4174static void
4175bnx2_init_rx_ring(struct bnx2 *bp)
4176{
4177 struct rx_bd *rxbd;
4178 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004179 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07004180 u32 val;
4181
4182 /* 8 for CRC and VLAN */
4183 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08004184 /* hw alignment */
4185 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07004186
4187 ring_prod = prod = bp->rx_prod = 0;
4188 bp->rx_cons = 0;
4189 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004190
Michael Chan13daffa2006-03-20 17:49:20 -08004191 for (i = 0; i < bp->rx_max_ring; i++) {
4192 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07004193
Michael Chan13daffa2006-03-20 17:49:20 -08004194 rxbd = &bp->rx_desc_ring[i][0];
4195 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4196 rxbd->rx_bd_len = bp->rx_buf_use_size;
4197 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4198 }
4199 if (i == (bp->rx_max_ring - 1))
4200 j = 0;
4201 else
4202 j = i + 1;
4203 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4204 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4205 0xffffffff;
4206 }
Michael Chanb6016b72005-05-26 13:03:09 -07004207
4208 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4209 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4210 val |= 0x02 << 8;
4211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4212
Michael Chan13daffa2006-03-20 17:49:20 -08004213 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07004214 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4215
Michael Chan13daffa2006-03-20 17:49:20 -08004216 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07004217 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4218
Michael Chan236b6392006-03-20 17:49:02 -08004219 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004220 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4221 break;
4222 }
4223 prod = NEXT_RX_BD(prod);
4224 ring_prod = RX_RING_IDX(prod);
4225 }
4226 bp->rx_prod = prod;
4227
4228 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4229
4230 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4231}
4232
4233static void
Michael Chan13daffa2006-03-20 17:49:20 -08004234bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4235{
4236 u32 num_rings, max;
4237
4238 bp->rx_ring_size = size;
4239 num_rings = 1;
4240 while (size > MAX_RX_DESC_CNT) {
4241 size -= MAX_RX_DESC_CNT;
4242 num_rings++;
4243 }
4244 /* round to next power of 2 */
4245 max = MAX_RX_RINGS;
4246 while ((max & num_rings) == 0)
4247 max >>= 1;
4248
4249 if (num_rings != max)
4250 max <<= 1;
4251
4252 bp->rx_max_ring = max;
4253 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4254}
4255
4256static void
Michael Chanb6016b72005-05-26 13:03:09 -07004257bnx2_free_tx_skbs(struct bnx2 *bp)
4258{
4259 int i;
4260
4261 if (bp->tx_buf_ring == NULL)
4262 return;
4263
4264 for (i = 0; i < TX_DESC_CNT; ) {
4265 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4266 struct sk_buff *skb = tx_buf->skb;
4267 int j, last;
4268
4269 if (skb == NULL) {
4270 i++;
4271 continue;
4272 }
4273
4274 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4275 skb_headlen(skb), PCI_DMA_TODEVICE);
4276
4277 tx_buf->skb = NULL;
4278
4279 last = skb_shinfo(skb)->nr_frags;
4280 for (j = 0; j < last; j++) {
4281 tx_buf = &bp->tx_buf_ring[i + j + 1];
4282 pci_unmap_page(bp->pdev,
4283 pci_unmap_addr(tx_buf, mapping),
4284 skb_shinfo(skb)->frags[j].size,
4285 PCI_DMA_TODEVICE);
4286 }
Michael Chan745720e2006-06-29 12:37:41 -07004287 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004288 i += j + 1;
4289 }
4290
4291}
4292
4293static void
4294bnx2_free_rx_skbs(struct bnx2 *bp)
4295{
4296 int i;
4297
4298 if (bp->rx_buf_ring == NULL)
4299 return;
4300
Michael Chan13daffa2006-03-20 17:49:20 -08004301 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004302 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4303 struct sk_buff *skb = rx_buf->skb;
4304
Michael Chan05d0f1c2005-11-04 08:53:48 -08004305 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004306 continue;
4307
4308 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4309 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4310
4311 rx_buf->skb = NULL;
4312
Michael Chan745720e2006-06-29 12:37:41 -07004313 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004314 }
4315}
4316
4317static void
4318bnx2_free_skbs(struct bnx2 *bp)
4319{
4320 bnx2_free_tx_skbs(bp);
4321 bnx2_free_rx_skbs(bp);
4322}
4323
4324static int
4325bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4326{
4327 int rc;
4328
4329 rc = bnx2_reset_chip(bp, reset_code);
4330 bnx2_free_skbs(bp);
4331 if (rc)
4332 return rc;
4333
Michael Chanfba9fe92006-06-12 22:21:25 -07004334 if ((rc = bnx2_init_chip(bp)) != 0)
4335 return rc;
4336
Michael Chanb6016b72005-05-26 13:03:09 -07004337 bnx2_init_tx_ring(bp);
4338 bnx2_init_rx_ring(bp);
4339 return 0;
4340}
4341
4342static int
4343bnx2_init_nic(struct bnx2 *bp)
4344{
4345 int rc;
4346
4347 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4348 return rc;
4349
Michael Chan80be4432006-11-19 14:07:28 -08004350 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004351 bnx2_init_phy(bp);
4352 bnx2_set_link(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07004353 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004354 return 0;
4355}
4356
4357static int
4358bnx2_test_registers(struct bnx2 *bp)
4359{
4360 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004361 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004362 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004363 u16 offset;
4364 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004365#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004366 u32 rw_mask;
4367 u32 ro_mask;
4368 } reg_tbl[] = {
4369 { 0x006c, 0, 0x00000000, 0x0000003f },
4370 { 0x0090, 0, 0xffffffff, 0x00000000 },
4371 { 0x0094, 0, 0x00000000, 0x00000000 },
4372
Michael Chan5bae30c2007-05-03 13:18:46 -07004373 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4374 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4375 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4376 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4377 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4378 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4379 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4380 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004382
Michael Chan5bae30c2007-05-03 13:18:46 -07004383 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4384 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4385 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4386 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4387 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4388 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004389
Michael Chan5bae30c2007-05-03 13:18:46 -07004390 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4391 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4392 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004393
4394 { 0x1000, 0, 0x00000000, 0x00000001 },
4395 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004396
4397 { 0x1408, 0, 0x01c00800, 0x00000000 },
4398 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4399 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004400 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004401 { 0x14b0, 0, 0x00000002, 0x00000001 },
4402 { 0x14b8, 0, 0x00000000, 0x00000000 },
4403 { 0x14c0, 0, 0x00000000, 0x00000009 },
4404 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4405 { 0x14cc, 0, 0x00000000, 0x00000001 },
4406 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004407
4408 { 0x1800, 0, 0x00000000, 0x00000001 },
4409 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004410
4411 { 0x2800, 0, 0x00000000, 0x00000001 },
4412 { 0x2804, 0, 0x00000000, 0x00003f01 },
4413 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4414 { 0x2810, 0, 0xffff0000, 0x00000000 },
4415 { 0x2814, 0, 0xffff0000, 0x00000000 },
4416 { 0x2818, 0, 0xffff0000, 0x00000000 },
4417 { 0x281c, 0, 0xffff0000, 0x00000000 },
4418 { 0x2834, 0, 0xffffffff, 0x00000000 },
4419 { 0x2840, 0, 0x00000000, 0xffffffff },
4420 { 0x2844, 0, 0x00000000, 0xffffffff },
4421 { 0x2848, 0, 0xffffffff, 0x00000000 },
4422 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4423
4424 { 0x2c00, 0, 0x00000000, 0x00000011 },
4425 { 0x2c04, 0, 0x00000000, 0x00030007 },
4426
Michael Chanb6016b72005-05-26 13:03:09 -07004427 { 0x3c00, 0, 0x00000000, 0x00000001 },
4428 { 0x3c04, 0, 0x00000000, 0x00070000 },
4429 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4430 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4431 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4432 { 0x3c14, 0, 0x00000000, 0xffffffff },
4433 { 0x3c18, 0, 0x00000000, 0xffffffff },
4434 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4435 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004436
4437 { 0x5004, 0, 0x00000000, 0x0000007f },
4438 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004439
Michael Chanb6016b72005-05-26 13:03:09 -07004440 { 0x5c00, 0, 0x00000000, 0x00000001 },
4441 { 0x5c04, 0, 0x00000000, 0x0003000f },
4442 { 0x5c08, 0, 0x00000003, 0x00000000 },
4443 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4444 { 0x5c10, 0, 0x00000000, 0xffffffff },
4445 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4446 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4447 { 0x5c88, 0, 0x00000000, 0x00077373 },
4448 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4449
4450 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4451 { 0x680c, 0, 0xffffffff, 0x00000000 },
4452 { 0x6810, 0, 0xffffffff, 0x00000000 },
4453 { 0x6814, 0, 0xffffffff, 0x00000000 },
4454 { 0x6818, 0, 0xffffffff, 0x00000000 },
4455 { 0x681c, 0, 0xffffffff, 0x00000000 },
4456 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4457 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4458 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4459 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4460 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4461 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4462 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4463 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4464 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4465 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4466 { 0x684c, 0, 0xffffffff, 0x00000000 },
4467 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4468 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4469 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4470 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4471 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4472 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4473
4474 { 0xffff, 0, 0x00000000, 0x00000000 },
4475 };
4476
4477 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004478 is_5709 = 0;
4479 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4480 is_5709 = 1;
4481
Michael Chanb6016b72005-05-26 13:03:09 -07004482 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4483 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004484 u16 flags = reg_tbl[i].flags;
4485
4486 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4487 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004488
4489 offset = (u32) reg_tbl[i].offset;
4490 rw_mask = reg_tbl[i].rw_mask;
4491 ro_mask = reg_tbl[i].ro_mask;
4492
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004493 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004494
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004495 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004496
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004497 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004498 if ((val & rw_mask) != 0) {
4499 goto reg_test_err;
4500 }
4501
4502 if ((val & ro_mask) != (save_val & ro_mask)) {
4503 goto reg_test_err;
4504 }
4505
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004506 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004507
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004508 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004509 if ((val & rw_mask) != rw_mask) {
4510 goto reg_test_err;
4511 }
4512
4513 if ((val & ro_mask) != (save_val & ro_mask)) {
4514 goto reg_test_err;
4515 }
4516
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004517 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004518 continue;
4519
4520reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004521 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004522 ret = -ENODEV;
4523 break;
4524 }
4525 return ret;
4526}
4527
4528static int
4529bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4530{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004531 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004532 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4533 int i;
4534
4535 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4536 u32 offset;
4537
4538 for (offset = 0; offset < size; offset += 4) {
4539
4540 REG_WR_IND(bp, start + offset, test_pattern[i]);
4541
4542 if (REG_RD_IND(bp, start + offset) !=
4543 test_pattern[i]) {
4544 return -ENODEV;
4545 }
4546 }
4547 }
4548 return 0;
4549}
4550
4551static int
4552bnx2_test_memory(struct bnx2 *bp)
4553{
4554 int ret = 0;
4555 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004556 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004557 u32 offset;
4558 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004559 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004560 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004561 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004562 { 0xe0000, 0x4000 },
4563 { 0x120000, 0x4000 },
4564 { 0x1a0000, 0x4000 },
4565 { 0x160000, 0x4000 },
4566 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004567 },
4568 mem_tbl_5709[] = {
4569 { 0x60000, 0x4000 },
4570 { 0xa0000, 0x3000 },
4571 { 0xe0000, 0x4000 },
4572 { 0x120000, 0x4000 },
4573 { 0x1a0000, 0x4000 },
4574 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004575 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004576 struct mem_entry *mem_tbl;
4577
4578 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4579 mem_tbl = mem_tbl_5709;
4580 else
4581 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004582
4583 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4584 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4585 mem_tbl[i].len)) != 0) {
4586 return ret;
4587 }
4588 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004589
Michael Chanb6016b72005-05-26 13:03:09 -07004590 return ret;
4591}
4592
Michael Chanbc5a0692006-01-23 16:13:22 -08004593#define BNX2_MAC_LOOPBACK 0
4594#define BNX2_PHY_LOOPBACK 1
4595
Michael Chanb6016b72005-05-26 13:03:09 -07004596static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004597bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004598{
4599 unsigned int pkt_size, num_pkts, i;
4600 struct sk_buff *skb, *rx_skb;
4601 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004602 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004603 dma_addr_t map;
4604 struct tx_bd *txbd;
4605 struct sw_bd *rx_buf;
4606 struct l2_fhdr *rx_hdr;
4607 int ret = -ENODEV;
4608
Michael Chanbc5a0692006-01-23 16:13:22 -08004609 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4610 bp->loopback = MAC_LOOPBACK;
4611 bnx2_set_mac_loopback(bp);
4612 }
4613 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan489310a2007-10-10 16:16:31 -07004614 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4615 return 0;
4616
Michael Chan80be4432006-11-19 14:07:28 -08004617 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004618 bnx2_set_phy_loopback(bp);
4619 }
4620 else
4621 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004622
4623 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004624 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004625 if (!skb)
4626 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004627 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004628 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004629 memset(packet + 6, 0x0, 8);
4630 for (i = 14; i < pkt_size; i++)
4631 packet[i] = (unsigned char) (i & 0xff);
4632
4633 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4634 PCI_DMA_TODEVICE);
4635
Michael Chanbf5295b2006-03-23 01:11:56 -08004636 REG_WR(bp, BNX2_HC_COMMAND,
4637 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4638
Michael Chanb6016b72005-05-26 13:03:09 -07004639 REG_RD(bp, BNX2_HC_COMMAND);
4640
4641 udelay(5);
4642 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4643
Michael Chanb6016b72005-05-26 13:03:09 -07004644 num_pkts = 0;
4645
Michael Chanbc5a0692006-01-23 16:13:22 -08004646 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004647
4648 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4649 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4650 txbd->tx_bd_mss_nbytes = pkt_size;
4651 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4652
4653 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004654 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4655 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004656
Michael Chan234754d2006-11-19 14:11:41 -08004657 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4658 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004659
4660 udelay(100);
4661
Michael Chanbf5295b2006-03-23 01:11:56 -08004662 REG_WR(bp, BNX2_HC_COMMAND,
4663 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4664
Michael Chanb6016b72005-05-26 13:03:09 -07004665 REG_RD(bp, BNX2_HC_COMMAND);
4666
4667 udelay(5);
4668
4669 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004670 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004671
Michael Chanbc5a0692006-01-23 16:13:22 -08004672 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004673 goto loopback_test_done;
4674 }
4675
4676 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4677 if (rx_idx != rx_start_idx + num_pkts) {
4678 goto loopback_test_done;
4679 }
4680
4681 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4682 rx_skb = rx_buf->skb;
4683
4684 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4685 skb_reserve(rx_skb, bp->rx_offset);
4686
4687 pci_dma_sync_single_for_cpu(bp->pdev,
4688 pci_unmap_addr(rx_buf, mapping),
4689 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4690
Michael Chanade2bfe2006-01-23 16:09:51 -08004691 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004692 (L2_FHDR_ERRORS_BAD_CRC |
4693 L2_FHDR_ERRORS_PHY_DECODE |
4694 L2_FHDR_ERRORS_ALIGNMENT |
4695 L2_FHDR_ERRORS_TOO_SHORT |
4696 L2_FHDR_ERRORS_GIANT_FRAME)) {
4697
4698 goto loopback_test_done;
4699 }
4700
4701 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4702 goto loopback_test_done;
4703 }
4704
4705 for (i = 14; i < pkt_size; i++) {
4706 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4707 goto loopback_test_done;
4708 }
4709 }
4710
4711 ret = 0;
4712
4713loopback_test_done:
4714 bp->loopback = 0;
4715 return ret;
4716}
4717
Michael Chanbc5a0692006-01-23 16:13:22 -08004718#define BNX2_MAC_LOOPBACK_FAILED 1
4719#define BNX2_PHY_LOOPBACK_FAILED 2
4720#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4721 BNX2_PHY_LOOPBACK_FAILED)
4722
4723static int
4724bnx2_test_loopback(struct bnx2 *bp)
4725{
4726 int rc = 0;
4727
4728 if (!netif_running(bp->dev))
4729 return BNX2_LOOPBACK_FAILED;
4730
4731 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4732 spin_lock_bh(&bp->phy_lock);
4733 bnx2_init_phy(bp);
4734 spin_unlock_bh(&bp->phy_lock);
4735 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4736 rc |= BNX2_MAC_LOOPBACK_FAILED;
4737 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4738 rc |= BNX2_PHY_LOOPBACK_FAILED;
4739 return rc;
4740}
4741
Michael Chanb6016b72005-05-26 13:03:09 -07004742#define NVRAM_SIZE 0x200
4743#define CRC32_RESIDUAL 0xdebb20e3
4744
4745static int
4746bnx2_test_nvram(struct bnx2 *bp)
4747{
4748 u32 buf[NVRAM_SIZE / 4];
4749 u8 *data = (u8 *) buf;
4750 int rc = 0;
4751 u32 magic, csum;
4752
4753 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4754 goto test_nvram_done;
4755
4756 magic = be32_to_cpu(buf[0]);
4757 if (magic != 0x669955aa) {
4758 rc = -ENODEV;
4759 goto test_nvram_done;
4760 }
4761
4762 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4763 goto test_nvram_done;
4764
4765 csum = ether_crc_le(0x100, data);
4766 if (csum != CRC32_RESIDUAL) {
4767 rc = -ENODEV;
4768 goto test_nvram_done;
4769 }
4770
4771 csum = ether_crc_le(0x100, data + 0x100);
4772 if (csum != CRC32_RESIDUAL) {
4773 rc = -ENODEV;
4774 }
4775
4776test_nvram_done:
4777 return rc;
4778}
4779
4780static int
4781bnx2_test_link(struct bnx2 *bp)
4782{
4783 u32 bmsr;
4784
Michael Chan489310a2007-10-10 16:16:31 -07004785 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4786 if (bp->link_up)
4787 return 0;
4788 return -ENODEV;
4789 }
Michael Chanc770a652005-08-25 15:38:39 -07004790 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004791 bnx2_enable_bmsr1(bp);
4792 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4793 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4794 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004795 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004796
Michael Chanb6016b72005-05-26 13:03:09 -07004797 if (bmsr & BMSR_LSTATUS) {
4798 return 0;
4799 }
4800 return -ENODEV;
4801}
4802
4803static int
4804bnx2_test_intr(struct bnx2 *bp)
4805{
4806 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004807 u16 status_idx;
4808
4809 if (!netif_running(bp->dev))
4810 return -ENODEV;
4811
4812 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4813
4814 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004815 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004816 REG_RD(bp, BNX2_HC_COMMAND);
4817
4818 for (i = 0; i < 10; i++) {
4819 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4820 status_idx) {
4821
4822 break;
4823 }
4824
4825 msleep_interruptible(10);
4826 }
4827 if (i < 10)
4828 return 0;
4829
4830 return -ENODEV;
4831}
4832
4833static void
Michael Chan48b01e22006-11-19 14:08:00 -08004834bnx2_5706_serdes_timer(struct bnx2 *bp)
4835{
4836 spin_lock(&bp->phy_lock);
4837 if (bp->serdes_an_pending)
4838 bp->serdes_an_pending--;
4839 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4840 u32 bmcr;
4841
4842 bp->current_interval = bp->timer_interval;
4843
Michael Chanca58c3a2007-05-03 13:22:52 -07004844 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004845
4846 if (bmcr & BMCR_ANENABLE) {
4847 u32 phy1, phy2;
4848
4849 bnx2_write_phy(bp, 0x1c, 0x7c00);
4850 bnx2_read_phy(bp, 0x1c, &phy1);
4851
4852 bnx2_write_phy(bp, 0x17, 0x0f01);
4853 bnx2_read_phy(bp, 0x15, &phy2);
4854 bnx2_write_phy(bp, 0x17, 0x0f01);
4855 bnx2_read_phy(bp, 0x15, &phy2);
4856
4857 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4858 !(phy2 & 0x20)) { /* no CONFIG */
4859
4860 bmcr &= ~BMCR_ANENABLE;
4861 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004862 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004863 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4864 }
4865 }
4866 }
4867 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4868 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4869 u32 phy2;
4870
4871 bnx2_write_phy(bp, 0x17, 0x0f01);
4872 bnx2_read_phy(bp, 0x15, &phy2);
4873 if (phy2 & 0x20) {
4874 u32 bmcr;
4875
Michael Chanca58c3a2007-05-03 13:22:52 -07004876 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004877 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004878 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004879
4880 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4881 }
4882 } else
4883 bp->current_interval = bp->timer_interval;
4884
4885 spin_unlock(&bp->phy_lock);
4886}
4887
4888static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004889bnx2_5708_serdes_timer(struct bnx2 *bp)
4890{
Michael Chan0d8a6572007-07-07 22:49:43 -07004891 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4892 return;
4893
Michael Chanf8dd0642006-11-19 14:08:29 -08004894 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4895 bp->serdes_an_pending = 0;
4896 return;
4897 }
4898
4899 spin_lock(&bp->phy_lock);
4900 if (bp->serdes_an_pending)
4901 bp->serdes_an_pending--;
4902 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4903 u32 bmcr;
4904
Michael Chanca58c3a2007-05-03 13:22:52 -07004905 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004906 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004907 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004908 bp->current_interval = SERDES_FORCED_TIMEOUT;
4909 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004910 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004911 bp->serdes_an_pending = 2;
4912 bp->current_interval = bp->timer_interval;
4913 }
4914
4915 } else
4916 bp->current_interval = bp->timer_interval;
4917
4918 spin_unlock(&bp->phy_lock);
4919}
4920
4921static void
Michael Chanb6016b72005-05-26 13:03:09 -07004922bnx2_timer(unsigned long data)
4923{
4924 struct bnx2 *bp = (struct bnx2 *) data;
Michael Chanb6016b72005-05-26 13:03:09 -07004925
Michael Chancd339a02005-08-25 15:35:24 -07004926 if (!netif_running(bp->dev))
4927 return;
4928
Michael Chanb6016b72005-05-26 13:03:09 -07004929 if (atomic_read(&bp->intr_sem) != 0)
4930 goto bnx2_restart_timer;
4931
Michael Chandf149d72007-07-07 22:51:36 -07004932 bnx2_send_heart_beat(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004933
Michael Chancea94db2006-06-12 22:16:13 -07004934 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4935
Michael Chan02537b062007-06-04 21:24:07 -07004936 /* workaround occasional corrupted counters */
4937 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4938 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4939 BNX2_HC_COMMAND_STATS_NOW);
4940
Michael Chanf8dd0642006-11-19 14:08:29 -08004941 if (bp->phy_flags & PHY_SERDES_FLAG) {
4942 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4943 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004944 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004945 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004946 }
4947
4948bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004949 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004950}
4951
Michael Chan8e6a72c2007-05-03 13:24:48 -07004952static int
4953bnx2_request_irq(struct bnx2 *bp)
4954{
4955 struct net_device *dev = bp->dev;
4956 int rc = 0;
4957
4958 if (bp->flags & USING_MSI_FLAG) {
4959 irq_handler_t fn = bnx2_msi;
4960
4961 if (bp->flags & ONE_SHOT_MSI_FLAG)
4962 fn = bnx2_msi_1shot;
4963
4964 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4965 } else
4966 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4967 IRQF_SHARED, dev->name, dev);
4968 return rc;
4969}
4970
4971static void
4972bnx2_free_irq(struct bnx2 *bp)
4973{
4974 struct net_device *dev = bp->dev;
4975
4976 if (bp->flags & USING_MSI_FLAG) {
4977 free_irq(bp->pdev->irq, dev);
4978 pci_disable_msi(bp->pdev);
4979 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4980 } else
4981 free_irq(bp->pdev->irq, dev);
4982}
4983
Michael Chanb6016b72005-05-26 13:03:09 -07004984/* Called with rtnl_lock */
4985static int
4986bnx2_open(struct net_device *dev)
4987{
Michael Chan972ec0d2006-01-23 16:12:43 -08004988 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004989 int rc;
4990
Michael Chan1b2f9222007-05-03 13:20:19 -07004991 netif_carrier_off(dev);
4992
Pavel Machek829ca9a2005-09-03 15:56:56 -07004993 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004994 bnx2_disable_int(bp);
4995
4996 rc = bnx2_alloc_mem(bp);
4997 if (rc)
4998 return rc;
4999
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005000 napi_enable(&bp->napi);
5001
Michael Chan8e6a72c2007-05-03 13:24:48 -07005002 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07005003 if (pci_enable_msi(bp->pdev) == 0) {
5004 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07005005 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5006 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005007 }
5008 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07005009 rc = bnx2_request_irq(bp);
5010
Michael Chanb6016b72005-05-26 13:03:09 -07005011 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005012 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005013 bnx2_free_mem(bp);
5014 return rc;
5015 }
5016
5017 rc = bnx2_init_nic(bp);
5018
5019 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005020 napi_disable(&bp->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005021 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005022 bnx2_free_skbs(bp);
5023 bnx2_free_mem(bp);
5024 return rc;
5025 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005026
Michael Chancd339a02005-08-25 15:35:24 -07005027 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005028
5029 atomic_set(&bp->intr_sem, 0);
5030
5031 bnx2_enable_int(bp);
5032
5033 if (bp->flags & USING_MSI_FLAG) {
5034 /* Test MSI to make sure it is working
5035 * If MSI test fails, go back to INTx mode
5036 */
5037 if (bnx2_test_intr(bp) != 0) {
5038 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5039 " using MSI, switching to INTx mode. Please"
5040 " report this failure to the PCI maintainer"
5041 " and include system chipset information.\n",
5042 bp->dev->name);
5043
5044 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005045 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005046
5047 rc = bnx2_init_nic(bp);
5048
Michael Chan8e6a72c2007-05-03 13:24:48 -07005049 if (!rc)
5050 rc = bnx2_request_irq(bp);
5051
Michael Chanb6016b72005-05-26 13:03:09 -07005052 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005053 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005054 bnx2_free_skbs(bp);
5055 bnx2_free_mem(bp);
5056 del_timer_sync(&bp->timer);
5057 return rc;
5058 }
5059 bnx2_enable_int(bp);
5060 }
5061 }
5062 if (bp->flags & USING_MSI_FLAG) {
5063 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5064 }
5065
5066 netif_start_queue(dev);
5067
5068 return 0;
5069}
5070
5071static void
David Howellsc4028952006-11-22 14:57:56 +00005072bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07005073{
David Howellsc4028952006-11-22 14:57:56 +00005074 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005075
Michael Chanafdc08b2005-08-25 15:34:29 -07005076 if (!netif_running(bp->dev))
5077 return;
5078
5079 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07005080 bnx2_netif_stop(bp);
5081
5082 bnx2_init_nic(bp);
5083
5084 atomic_set(&bp->intr_sem, 1);
5085 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07005086 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005087}
5088
5089static void
5090bnx2_tx_timeout(struct net_device *dev)
5091{
Michael Chan972ec0d2006-01-23 16:12:43 -08005092 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005093
5094 /* This allows the netif to be shutdown gracefully before resetting */
5095 schedule_work(&bp->reset_task);
5096}
5097
5098#ifdef BCM_VLAN
5099/* Called with rtnl_lock */
5100static void
5101bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5102{
Michael Chan972ec0d2006-01-23 16:12:43 -08005103 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005104
5105 bnx2_netif_stop(bp);
5106
5107 bp->vlgrp = vlgrp;
5108 bnx2_set_rx_mode(dev);
5109
5110 bnx2_netif_start(bp);
5111}
Michael Chanb6016b72005-05-26 13:03:09 -07005112#endif
5113
Herbert Xu932ff272006-06-09 12:20:56 -07005114/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07005115 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5116 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07005117 */
5118static int
5119bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5120{
Michael Chan972ec0d2006-01-23 16:12:43 -08005121 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005122 dma_addr_t mapping;
5123 struct tx_bd *txbd;
5124 struct sw_bd *tx_buf;
5125 u32 len, vlan_tag_flags, last_frag, mss;
5126 u16 prod, ring_prod;
5127 int i;
5128
Michael Chane89bbf12005-08-25 15:36:58 -07005129 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07005130 netif_stop_queue(dev);
5131 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5132 dev->name);
5133
5134 return NETDEV_TX_BUSY;
5135 }
5136 len = skb_headlen(skb);
5137 prod = bp->tx_prod;
5138 ring_prod = TX_RING_IDX(prod);
5139
5140 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005141 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07005142 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5143 }
5144
5145 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5146 vlan_tag_flags |=
5147 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5148 }
Michael Chanfde82052007-05-03 17:23:35 -07005149 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07005150 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005151 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07005152
Michael Chanb6016b72005-05-26 13:03:09 -07005153 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5154
Michael Chan4666f872007-05-03 13:22:28 -07005155 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005156
Michael Chan4666f872007-05-03 13:22:28 -07005157 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5158 u32 tcp_off = skb_transport_offset(skb) -
5159 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07005160
Michael Chan4666f872007-05-03 13:22:28 -07005161 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5162 TX_BD_FLAGS_SW_FLAGS;
5163 if (likely(tcp_off == 0))
5164 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5165 else {
5166 tcp_off >>= 3;
5167 vlan_tag_flags |= ((tcp_off & 0x3) <<
5168 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5169 ((tcp_off & 0x10) <<
5170 TX_BD_FLAGS_TCP6_OFF4_SHL);
5171 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5172 }
5173 } else {
5174 if (skb_header_cloned(skb) &&
5175 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5176 dev_kfree_skb(skb);
5177 return NETDEV_TX_OK;
5178 }
5179
5180 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5181
5182 iph = ip_hdr(skb);
5183 iph->check = 0;
5184 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5185 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5186 iph->daddr, 0,
5187 IPPROTO_TCP,
5188 0);
5189 if (tcp_opt_len || (iph->ihl > 5)) {
5190 vlan_tag_flags |= ((iph->ihl - 5) +
5191 (tcp_opt_len >> 2)) << 8;
5192 }
Michael Chanb6016b72005-05-26 13:03:09 -07005193 }
Michael Chan4666f872007-05-03 13:22:28 -07005194 } else
Michael Chanb6016b72005-05-26 13:03:09 -07005195 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005196
5197 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005198
Michael Chanb6016b72005-05-26 13:03:09 -07005199 tx_buf = &bp->tx_buf_ring[ring_prod];
5200 tx_buf->skb = skb;
5201 pci_unmap_addr_set(tx_buf, mapping, mapping);
5202
5203 txbd = &bp->tx_desc_ring[ring_prod];
5204
5205 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5206 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5207 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5208 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5209
5210 last_frag = skb_shinfo(skb)->nr_frags;
5211
5212 for (i = 0; i < last_frag; i++) {
5213 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5214
5215 prod = NEXT_TX_BD(prod);
5216 ring_prod = TX_RING_IDX(prod);
5217 txbd = &bp->tx_desc_ring[ring_prod];
5218
5219 len = frag->size;
5220 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5221 len, PCI_DMA_TODEVICE);
5222 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5223 mapping, mapping);
5224
5225 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5226 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5227 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5228 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5229
5230 }
5231 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5232
5233 prod = NEXT_TX_BD(prod);
5234 bp->tx_prod_bseq += skb->len;
5235
Michael Chan234754d2006-11-19 14:11:41 -08005236 REG_WR16(bp, bp->tx_bidx_addr, prod);
5237 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07005238
5239 mmiowb();
5240
5241 bp->tx_prod = prod;
5242 dev->trans_start = jiffies;
5243
Michael Chane89bbf12005-08-25 15:36:58 -07005244 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07005245 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07005246 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07005247 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005248 }
5249
5250 return NETDEV_TX_OK;
5251}
5252
5253/* Called with rtnl_lock */
5254static int
5255bnx2_close(struct net_device *dev)
5256{
Michael Chan972ec0d2006-01-23 16:12:43 -08005257 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005258 u32 reset_code;
5259
Michael Chanafdc08b2005-08-25 15:34:29 -07005260 /* Calling flush_scheduled_work() may deadlock because
5261 * linkwatch_event() may be on the workqueue and it will try to get
5262 * the rtnl_lock which we are holding.
5263 */
5264 while (bp->in_reset_task)
5265 msleep(1);
5266
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005267 bnx2_disable_int_sync(bp);
5268 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005269 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005270 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07005271 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005272 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005273 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5274 else
5275 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5276 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005277 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005278 bnx2_free_skbs(bp);
5279 bnx2_free_mem(bp);
5280 bp->link_up = 0;
5281 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005282 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07005283 return 0;
5284}
5285
5286#define GET_NET_STATS64(ctr) \
5287 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5288 (unsigned long) (ctr##_lo)
5289
5290#define GET_NET_STATS32(ctr) \
5291 (ctr##_lo)
5292
5293#if (BITS_PER_LONG == 64)
5294#define GET_NET_STATS GET_NET_STATS64
5295#else
5296#define GET_NET_STATS GET_NET_STATS32
5297#endif
5298
5299static struct net_device_stats *
5300bnx2_get_stats(struct net_device *dev)
5301{
Michael Chan972ec0d2006-01-23 16:12:43 -08005302 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005303 struct statistics_block *stats_blk = bp->stats_blk;
5304 struct net_device_stats *net_stats = &bp->net_stats;
5305
5306 if (bp->stats_blk == NULL) {
5307 return net_stats;
5308 }
5309 net_stats->rx_packets =
5310 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5311 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5312 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5313
5314 net_stats->tx_packets =
5315 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5316 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5317 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5318
5319 net_stats->rx_bytes =
5320 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5321
5322 net_stats->tx_bytes =
5323 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5324
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005325 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005326 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5327
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005328 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005329 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5330
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005331 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005332 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5333 stats_blk->stat_EtherStatsOverrsizePkts);
5334
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005335 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005336 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5337
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005338 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005339 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5340
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005341 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005342 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5343
5344 net_stats->rx_errors = net_stats->rx_length_errors +
5345 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5346 net_stats->rx_crc_errors;
5347
5348 net_stats->tx_aborted_errors =
5349 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5350 stats_blk->stat_Dot3StatsLateCollisions);
5351
Michael Chan5b0c76a2005-11-04 08:45:49 -08005352 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5353 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005354 net_stats->tx_carrier_errors = 0;
5355 else {
5356 net_stats->tx_carrier_errors =
5357 (unsigned long)
5358 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5359 }
5360
5361 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005362 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005363 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5364 +
5365 net_stats->tx_aborted_errors +
5366 net_stats->tx_carrier_errors;
5367
Michael Chancea94db2006-06-12 22:16:13 -07005368 net_stats->rx_missed_errors =
5369 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5370 stats_blk->stat_FwRxDrop);
5371
Michael Chanb6016b72005-05-26 13:03:09 -07005372 return net_stats;
5373}
5374
5375/* All ethtool functions called with rtnl_lock */
5376
5377static int
5378bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5379{
Michael Chan972ec0d2006-01-23 16:12:43 -08005380 struct bnx2 *bp = netdev_priv(dev);
Michael Chan7b6b8342007-07-07 22:50:15 -07005381 int support_serdes = 0, support_copper = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005382
5383 cmd->supported = SUPPORTED_Autoneg;
Michael Chan7b6b8342007-07-07 22:50:15 -07005384 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5385 support_serdes = 1;
5386 support_copper = 1;
5387 } else if (bp->phy_port == PORT_FIBRE)
5388 support_serdes = 1;
5389 else
5390 support_copper = 1;
5391
5392 if (support_serdes) {
Michael Chanb6016b72005-05-26 13:03:09 -07005393 cmd->supported |= SUPPORTED_1000baseT_Full |
5394 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005395 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5396 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005397
Michael Chanb6016b72005-05-26 13:03:09 -07005398 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005399 if (support_copper) {
Michael Chanb6016b72005-05-26 13:03:09 -07005400 cmd->supported |= SUPPORTED_10baseT_Half |
5401 SUPPORTED_10baseT_Full |
5402 SUPPORTED_100baseT_Half |
5403 SUPPORTED_100baseT_Full |
5404 SUPPORTED_1000baseT_Full |
5405 SUPPORTED_TP;
5406
Michael Chanb6016b72005-05-26 13:03:09 -07005407 }
5408
Michael Chan7b6b8342007-07-07 22:50:15 -07005409 spin_lock_bh(&bp->phy_lock);
5410 cmd->port = bp->phy_port;
Michael Chanb6016b72005-05-26 13:03:09 -07005411 cmd->advertising = bp->advertising;
5412
5413 if (bp->autoneg & AUTONEG_SPEED) {
5414 cmd->autoneg = AUTONEG_ENABLE;
5415 }
5416 else {
5417 cmd->autoneg = AUTONEG_DISABLE;
5418 }
5419
5420 if (netif_carrier_ok(dev)) {
5421 cmd->speed = bp->line_speed;
5422 cmd->duplex = bp->duplex;
5423 }
5424 else {
5425 cmd->speed = -1;
5426 cmd->duplex = -1;
5427 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005428 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005429
5430 cmd->transceiver = XCVR_INTERNAL;
5431 cmd->phy_address = bp->phy_addr;
5432
5433 return 0;
5434}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005435
Michael Chanb6016b72005-05-26 13:03:09 -07005436static int
5437bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5438{
Michael Chan972ec0d2006-01-23 16:12:43 -08005439 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005440 u8 autoneg = bp->autoneg;
5441 u8 req_duplex = bp->req_duplex;
5442 u16 req_line_speed = bp->req_line_speed;
5443 u32 advertising = bp->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005444 int err = -EINVAL;
5445
5446 spin_lock_bh(&bp->phy_lock);
5447
5448 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5449 goto err_out_unlock;
5450
5451 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5452 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005453
5454 if (cmd->autoneg == AUTONEG_ENABLE) {
5455 autoneg |= AUTONEG_SPEED;
5456
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005457 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005458
5459 /* allow advertising 1 speed */
5460 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5461 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5462 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5463 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5464
Michael Chan7b6b8342007-07-07 22:50:15 -07005465 if (cmd->port == PORT_FIBRE)
5466 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005467
5468 advertising = cmd->advertising;
5469
Michael Chan27a005b2007-05-03 13:23:41 -07005470 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
Michael Chan7b6b8342007-07-07 22:50:15 -07005471 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5472 (cmd->port == PORT_TP))
5473 goto err_out_unlock;
5474 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07005475 advertising = cmd->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005476 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5477 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005478 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005479 if (cmd->port == PORT_FIBRE)
Michael Chanb6016b72005-05-26 13:03:09 -07005480 advertising = ETHTOOL_ALL_FIBRE_SPEED;
Michael Chan7b6b8342007-07-07 22:50:15 -07005481 else
Michael Chanb6016b72005-05-26 13:03:09 -07005482 advertising = ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005483 }
5484 advertising |= ADVERTISED_Autoneg;
5485 }
5486 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005487 if (cmd->port == PORT_FIBRE) {
Michael Chan80be4432006-11-19 14:07:28 -08005488 if ((cmd->speed != SPEED_1000 &&
5489 cmd->speed != SPEED_2500) ||
5490 (cmd->duplex != DUPLEX_FULL))
Michael Chan7b6b8342007-07-07 22:50:15 -07005491 goto err_out_unlock;
Michael Chan80be4432006-11-19 14:07:28 -08005492
5493 if (cmd->speed == SPEED_2500 &&
5494 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
Michael Chan7b6b8342007-07-07 22:50:15 -07005495 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005496 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005497 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5498 goto err_out_unlock;
5499
Michael Chanb6016b72005-05-26 13:03:09 -07005500 autoneg &= ~AUTONEG_SPEED;
5501 req_line_speed = cmd->speed;
5502 req_duplex = cmd->duplex;
5503 advertising = 0;
5504 }
5505
5506 bp->autoneg = autoneg;
5507 bp->advertising = advertising;
5508 bp->req_line_speed = req_line_speed;
5509 bp->req_duplex = req_duplex;
5510
Michael Chan7b6b8342007-07-07 22:50:15 -07005511 err = bnx2_setup_phy(bp, cmd->port);
Michael Chanb6016b72005-05-26 13:03:09 -07005512
Michael Chan7b6b8342007-07-07 22:50:15 -07005513err_out_unlock:
Michael Chanc770a652005-08-25 15:38:39 -07005514 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005515
Michael Chan7b6b8342007-07-07 22:50:15 -07005516 return err;
Michael Chanb6016b72005-05-26 13:03:09 -07005517}
5518
5519static void
5520bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5521{
Michael Chan972ec0d2006-01-23 16:12:43 -08005522 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005523
5524 strcpy(info->driver, DRV_MODULE_NAME);
5525 strcpy(info->version, DRV_MODULE_VERSION);
5526 strcpy(info->bus_info, pci_name(bp->pdev));
Michael Chan58fc2ea2007-07-07 22:52:02 -07005527 strcpy(info->fw_version, bp->fw_version);
Michael Chanb6016b72005-05-26 13:03:09 -07005528}
5529
Michael Chan244ac4f2006-03-20 17:48:46 -08005530#define BNX2_REGDUMP_LEN (32 * 1024)
5531
5532static int
5533bnx2_get_regs_len(struct net_device *dev)
5534{
5535 return BNX2_REGDUMP_LEN;
5536}
5537
5538static void
5539bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5540{
5541 u32 *p = _p, i, offset;
5542 u8 *orig_p = _p;
5543 struct bnx2 *bp = netdev_priv(dev);
5544 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5545 0x0800, 0x0880, 0x0c00, 0x0c10,
5546 0x0c30, 0x0d08, 0x1000, 0x101c,
5547 0x1040, 0x1048, 0x1080, 0x10a4,
5548 0x1400, 0x1490, 0x1498, 0x14f0,
5549 0x1500, 0x155c, 0x1580, 0x15dc,
5550 0x1600, 0x1658, 0x1680, 0x16d8,
5551 0x1800, 0x1820, 0x1840, 0x1854,
5552 0x1880, 0x1894, 0x1900, 0x1984,
5553 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5554 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5555 0x2000, 0x2030, 0x23c0, 0x2400,
5556 0x2800, 0x2820, 0x2830, 0x2850,
5557 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5558 0x3c00, 0x3c94, 0x4000, 0x4010,
5559 0x4080, 0x4090, 0x43c0, 0x4458,
5560 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5561 0x4fc0, 0x5010, 0x53c0, 0x5444,
5562 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5563 0x5fc0, 0x6000, 0x6400, 0x6428,
5564 0x6800, 0x6848, 0x684c, 0x6860,
5565 0x6888, 0x6910, 0x8000 };
5566
5567 regs->version = 0;
5568
5569 memset(p, 0, BNX2_REGDUMP_LEN);
5570
5571 if (!netif_running(bp->dev))
5572 return;
5573
5574 i = 0;
5575 offset = reg_boundaries[0];
5576 p += offset;
5577 while (offset < BNX2_REGDUMP_LEN) {
5578 *p++ = REG_RD(bp, offset);
5579 offset += 4;
5580 if (offset == reg_boundaries[i + 1]) {
5581 offset = reg_boundaries[i + 2];
5582 p = (u32 *) (orig_p + offset);
5583 i += 2;
5584 }
5585 }
5586}
5587
Michael Chanb6016b72005-05-26 13:03:09 -07005588static void
5589bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5590{
Michael Chan972ec0d2006-01-23 16:12:43 -08005591 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005592
5593 if (bp->flags & NO_WOL_FLAG) {
5594 wol->supported = 0;
5595 wol->wolopts = 0;
5596 }
5597 else {
5598 wol->supported = WAKE_MAGIC;
5599 if (bp->wol)
5600 wol->wolopts = WAKE_MAGIC;
5601 else
5602 wol->wolopts = 0;
5603 }
5604 memset(&wol->sopass, 0, sizeof(wol->sopass));
5605}
5606
5607static int
5608bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5609{
Michael Chan972ec0d2006-01-23 16:12:43 -08005610 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005611
5612 if (wol->wolopts & ~WAKE_MAGIC)
5613 return -EINVAL;
5614
5615 if (wol->wolopts & WAKE_MAGIC) {
5616 if (bp->flags & NO_WOL_FLAG)
5617 return -EINVAL;
5618
5619 bp->wol = 1;
5620 }
5621 else {
5622 bp->wol = 0;
5623 }
5624 return 0;
5625}
5626
5627static int
5628bnx2_nway_reset(struct net_device *dev)
5629{
Michael Chan972ec0d2006-01-23 16:12:43 -08005630 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005631 u32 bmcr;
5632
5633 if (!(bp->autoneg & AUTONEG_SPEED)) {
5634 return -EINVAL;
5635 }
5636
Michael Chanc770a652005-08-25 15:38:39 -07005637 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005638
Michael Chan7b6b8342007-07-07 22:50:15 -07005639 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5640 int rc;
5641
5642 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5643 spin_unlock_bh(&bp->phy_lock);
5644 return rc;
5645 }
5646
Michael Chanb6016b72005-05-26 13:03:09 -07005647 /* Force a link down visible on the other side */
5648 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005649 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005650 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005651
5652 msleep(20);
5653
Michael Chanc770a652005-08-25 15:38:39 -07005654 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005655
5656 bp->current_interval = SERDES_AN_TIMEOUT;
5657 bp->serdes_an_pending = 1;
5658 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005659 }
5660
Michael Chanca58c3a2007-05-03 13:22:52 -07005661 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005662 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005663 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005664
Michael Chanc770a652005-08-25 15:38:39 -07005665 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005666
5667 return 0;
5668}
5669
5670static int
5671bnx2_get_eeprom_len(struct net_device *dev)
5672{
Michael Chan972ec0d2006-01-23 16:12:43 -08005673 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005674
Michael Chan1122db72006-01-23 16:11:42 -08005675 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005676 return 0;
5677
Michael Chan1122db72006-01-23 16:11:42 -08005678 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005679}
5680
5681static int
5682bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5683 u8 *eebuf)
5684{
Michael Chan972ec0d2006-01-23 16:12:43 -08005685 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005686 int rc;
5687
John W. Linville1064e942005-11-10 12:58:24 -08005688 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005689
5690 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5691
5692 return rc;
5693}
5694
5695static int
5696bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5697 u8 *eebuf)
5698{
Michael Chan972ec0d2006-01-23 16:12:43 -08005699 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005700 int rc;
5701
John W. Linville1064e942005-11-10 12:58:24 -08005702 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005703
5704 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5705
5706 return rc;
5707}
5708
5709static int
5710bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5711{
Michael Chan972ec0d2006-01-23 16:12:43 -08005712 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005713
5714 memset(coal, 0, sizeof(struct ethtool_coalesce));
5715
5716 coal->rx_coalesce_usecs = bp->rx_ticks;
5717 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5718 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5719 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5720
5721 coal->tx_coalesce_usecs = bp->tx_ticks;
5722 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5723 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5724 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5725
5726 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5727
5728 return 0;
5729}
5730
5731static int
5732bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5733{
Michael Chan972ec0d2006-01-23 16:12:43 -08005734 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005735
5736 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5737 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5738
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005739 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005740 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5741
5742 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5743 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5744
5745 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5746 if (bp->rx_quick_cons_trip_int > 0xff)
5747 bp->rx_quick_cons_trip_int = 0xff;
5748
5749 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5750 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5751
5752 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5753 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5754
5755 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5756 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5757
5758 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5759 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5760 0xff;
5761
5762 bp->stats_ticks = coal->stats_block_coalesce_usecs;
Michael Chan02537b062007-06-04 21:24:07 -07005763 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5764 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5765 bp->stats_ticks = USEC_PER_SEC;
5766 }
Michael Chan7ea69202007-07-16 18:27:10 -07005767 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5768 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5769 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07005770
5771 if (netif_running(bp->dev)) {
5772 bnx2_netif_stop(bp);
5773 bnx2_init_nic(bp);
5774 bnx2_netif_start(bp);
5775 }
5776
5777 return 0;
5778}
5779
5780static void
5781bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5782{
Michael Chan972ec0d2006-01-23 16:12:43 -08005783 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005784
Michael Chan13daffa2006-03-20 17:49:20 -08005785 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005786 ering->rx_mini_max_pending = 0;
5787 ering->rx_jumbo_max_pending = 0;
5788
5789 ering->rx_pending = bp->rx_ring_size;
5790 ering->rx_mini_pending = 0;
5791 ering->rx_jumbo_pending = 0;
5792
5793 ering->tx_max_pending = MAX_TX_DESC_CNT;
5794 ering->tx_pending = bp->tx_ring_size;
5795}
5796
5797static int
5798bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5799{
Michael Chan972ec0d2006-01-23 16:12:43 -08005800 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005801
Michael Chan13daffa2006-03-20 17:49:20 -08005802 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005803 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5804 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5805
5806 return -EINVAL;
5807 }
Michael Chan13daffa2006-03-20 17:49:20 -08005808 if (netif_running(bp->dev)) {
5809 bnx2_netif_stop(bp);
5810 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5811 bnx2_free_skbs(bp);
5812 bnx2_free_mem(bp);
5813 }
5814
5815 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005816 bp->tx_ring_size = ering->tx_pending;
5817
5818 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005819 int rc;
5820
5821 rc = bnx2_alloc_mem(bp);
5822 if (rc)
5823 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005824 bnx2_init_nic(bp);
5825 bnx2_netif_start(bp);
5826 }
5827
5828 return 0;
5829}
5830
5831static void
5832bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5833{
Michael Chan972ec0d2006-01-23 16:12:43 -08005834 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005835
5836 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5837 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5838 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5839}
5840
5841static int
5842bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5843{
Michael Chan972ec0d2006-01-23 16:12:43 -08005844 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005845
5846 bp->req_flow_ctrl = 0;
5847 if (epause->rx_pause)
5848 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5849 if (epause->tx_pause)
5850 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5851
5852 if (epause->autoneg) {
5853 bp->autoneg |= AUTONEG_FLOW_CTRL;
5854 }
5855 else {
5856 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5857 }
5858
Michael Chanc770a652005-08-25 15:38:39 -07005859 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005860
Michael Chan0d8a6572007-07-07 22:49:43 -07005861 bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07005862
Michael Chanc770a652005-08-25 15:38:39 -07005863 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005864
5865 return 0;
5866}
5867
5868static u32
5869bnx2_get_rx_csum(struct net_device *dev)
5870{
Michael Chan972ec0d2006-01-23 16:12:43 -08005871 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005872
5873 return bp->rx_csum;
5874}
5875
5876static int
5877bnx2_set_rx_csum(struct net_device *dev, u32 data)
5878{
Michael Chan972ec0d2006-01-23 16:12:43 -08005879 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005880
5881 bp->rx_csum = data;
5882 return 0;
5883}
5884
Michael Chanb11d6212006-06-29 12:31:21 -07005885static int
5886bnx2_set_tso(struct net_device *dev, u32 data)
5887{
Michael Chan4666f872007-05-03 13:22:28 -07005888 struct bnx2 *bp = netdev_priv(dev);
5889
5890 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005891 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005892 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5893 dev->features |= NETIF_F_TSO6;
5894 } else
5895 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5896 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005897 return 0;
5898}
5899
Michael Chancea94db2006-06-12 22:16:13 -07005900#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005901
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005902static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005903 char string[ETH_GSTRING_LEN];
5904} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5905 { "rx_bytes" },
5906 { "rx_error_bytes" },
5907 { "tx_bytes" },
5908 { "tx_error_bytes" },
5909 { "rx_ucast_packets" },
5910 { "rx_mcast_packets" },
5911 { "rx_bcast_packets" },
5912 { "tx_ucast_packets" },
5913 { "tx_mcast_packets" },
5914 { "tx_bcast_packets" },
5915 { "tx_mac_errors" },
5916 { "tx_carrier_errors" },
5917 { "rx_crc_errors" },
5918 { "rx_align_errors" },
5919 { "tx_single_collisions" },
5920 { "tx_multi_collisions" },
5921 { "tx_deferred" },
5922 { "tx_excess_collisions" },
5923 { "tx_late_collisions" },
5924 { "tx_total_collisions" },
5925 { "rx_fragments" },
5926 { "rx_jabbers" },
5927 { "rx_undersize_packets" },
5928 { "rx_oversize_packets" },
5929 { "rx_64_byte_packets" },
5930 { "rx_65_to_127_byte_packets" },
5931 { "rx_128_to_255_byte_packets" },
5932 { "rx_256_to_511_byte_packets" },
5933 { "rx_512_to_1023_byte_packets" },
5934 { "rx_1024_to_1522_byte_packets" },
5935 { "rx_1523_to_9022_byte_packets" },
5936 { "tx_64_byte_packets" },
5937 { "tx_65_to_127_byte_packets" },
5938 { "tx_128_to_255_byte_packets" },
5939 { "tx_256_to_511_byte_packets" },
5940 { "tx_512_to_1023_byte_packets" },
5941 { "tx_1024_to_1522_byte_packets" },
5942 { "tx_1523_to_9022_byte_packets" },
5943 { "rx_xon_frames" },
5944 { "rx_xoff_frames" },
5945 { "tx_xon_frames" },
5946 { "tx_xoff_frames" },
5947 { "rx_mac_ctrl_frames" },
5948 { "rx_filtered_packets" },
5949 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005950 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005951};
5952
5953#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5954
Arjan van de Venf71e1302006-03-03 21:33:57 -05005955static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005956 STATS_OFFSET32(stat_IfHCInOctets_hi),
5957 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5958 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5959 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5960 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5961 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5962 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5963 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5964 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5965 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5966 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005967 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5968 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5969 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5970 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5971 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5972 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5973 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5974 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5975 STATS_OFFSET32(stat_EtherStatsCollisions),
5976 STATS_OFFSET32(stat_EtherStatsFragments),
5977 STATS_OFFSET32(stat_EtherStatsJabbers),
5978 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5979 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5980 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5981 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5982 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5983 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5984 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5985 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5986 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5987 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5988 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5989 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5990 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5991 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5992 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5993 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5994 STATS_OFFSET32(stat_XonPauseFramesReceived),
5995 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5996 STATS_OFFSET32(stat_OutXonSent),
5997 STATS_OFFSET32(stat_OutXoffSent),
5998 STATS_OFFSET32(stat_MacControlFramesReceived),
5999 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6000 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07006001 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07006002};
6003
6004/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6005 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006006 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006007static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006008 8,0,8,8,8,8,8,8,8,8,
6009 4,0,4,4,4,4,4,4,4,4,
6010 4,4,4,4,4,4,4,4,4,4,
6011 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006012 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07006013};
6014
Michael Chan5b0c76a2005-11-04 08:45:49 -08006015static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6016 8,0,8,8,8,8,8,8,8,8,
6017 4,4,4,4,4,4,4,4,4,4,
6018 4,4,4,4,4,4,4,4,4,4,
6019 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006020 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08006021};
6022
Michael Chanb6016b72005-05-26 13:03:09 -07006023#define BNX2_NUM_TESTS 6
6024
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006025static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006026 char string[ETH_GSTRING_LEN];
6027} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6028 { "register_test (offline)" },
6029 { "memory_test (offline)" },
6030 { "loopback_test (offline)" },
6031 { "nvram_test (online)" },
6032 { "interrupt_test (online)" },
6033 { "link_test (online)" },
6034};
6035
6036static int
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006037bnx2_get_sset_count(struct net_device *dev, int sset)
Michael Chanb6016b72005-05-26 13:03:09 -07006038{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006039 switch (sset) {
6040 case ETH_SS_TEST:
6041 return BNX2_NUM_TESTS;
6042 case ETH_SS_STATS:
6043 return BNX2_NUM_STATS;
6044 default:
6045 return -EOPNOTSUPP;
6046 }
Michael Chanb6016b72005-05-26 13:03:09 -07006047}
6048
6049static void
6050bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6051{
Michael Chan972ec0d2006-01-23 16:12:43 -08006052 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006053
6054 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08006056 int i;
6057
Michael Chanb6016b72005-05-26 13:03:09 -07006058 bnx2_netif_stop(bp);
6059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6060 bnx2_free_skbs(bp);
6061
6062 if (bnx2_test_registers(bp) != 0) {
6063 buf[0] = 1;
6064 etest->flags |= ETH_TEST_FL_FAILED;
6065 }
6066 if (bnx2_test_memory(bp) != 0) {
6067 buf[1] = 1;
6068 etest->flags |= ETH_TEST_FL_FAILED;
6069 }
Michael Chanbc5a0692006-01-23 16:13:22 -08006070 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07006071 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07006072
6073 if (!netif_running(bp->dev)) {
6074 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6075 }
6076 else {
6077 bnx2_init_nic(bp);
6078 bnx2_netif_start(bp);
6079 }
6080
6081 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08006082 for (i = 0; i < 7; i++) {
6083 if (bp->link_up)
6084 break;
6085 msleep_interruptible(1000);
6086 }
Michael Chanb6016b72005-05-26 13:03:09 -07006087 }
6088
6089 if (bnx2_test_nvram(bp) != 0) {
6090 buf[3] = 1;
6091 etest->flags |= ETH_TEST_FL_FAILED;
6092 }
6093 if (bnx2_test_intr(bp) != 0) {
6094 buf[4] = 1;
6095 etest->flags |= ETH_TEST_FL_FAILED;
6096 }
6097
6098 if (bnx2_test_link(bp) != 0) {
6099 buf[5] = 1;
6100 etest->flags |= ETH_TEST_FL_FAILED;
6101
6102 }
6103}
6104
6105static void
6106bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6107{
6108 switch (stringset) {
6109 case ETH_SS_STATS:
6110 memcpy(buf, bnx2_stats_str_arr,
6111 sizeof(bnx2_stats_str_arr));
6112 break;
6113 case ETH_SS_TEST:
6114 memcpy(buf, bnx2_tests_str_arr,
6115 sizeof(bnx2_tests_str_arr));
6116 break;
6117 }
6118}
6119
Michael Chanb6016b72005-05-26 13:03:09 -07006120static void
6121bnx2_get_ethtool_stats(struct net_device *dev,
6122 struct ethtool_stats *stats, u64 *buf)
6123{
Michael Chan972ec0d2006-01-23 16:12:43 -08006124 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006125 int i;
6126 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006127 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006128
6129 if (hw_stats == NULL) {
6130 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6131 return;
6132 }
6133
Michael Chan5b0c76a2005-11-04 08:45:49 -08006134 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6135 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6136 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6137 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07006138 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006139 else
6140 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07006141
6142 for (i = 0; i < BNX2_NUM_STATS; i++) {
6143 if (stats_len_arr[i] == 0) {
6144 /* skip this counter */
6145 buf[i] = 0;
6146 continue;
6147 }
6148 if (stats_len_arr[i] == 4) {
6149 /* 4-byte counter */
6150 buf[i] = (u64)
6151 *(hw_stats + bnx2_stats_offset_arr[i]);
6152 continue;
6153 }
6154 /* 8-byte counter */
6155 buf[i] = (((u64) *(hw_stats +
6156 bnx2_stats_offset_arr[i])) << 32) +
6157 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6158 }
6159}
6160
6161static int
6162bnx2_phys_id(struct net_device *dev, u32 data)
6163{
Michael Chan972ec0d2006-01-23 16:12:43 -08006164 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006165 int i;
6166 u32 save;
6167
6168 if (data == 0)
6169 data = 2;
6170
6171 save = REG_RD(bp, BNX2_MISC_CFG);
6172 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6173
6174 for (i = 0; i < (data * 2); i++) {
6175 if ((i % 2) == 0) {
6176 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6177 }
6178 else {
6179 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6180 BNX2_EMAC_LED_1000MB_OVERRIDE |
6181 BNX2_EMAC_LED_100MB_OVERRIDE |
6182 BNX2_EMAC_LED_10MB_OVERRIDE |
6183 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6184 BNX2_EMAC_LED_TRAFFIC);
6185 }
6186 msleep_interruptible(500);
6187 if (signal_pending(current))
6188 break;
6189 }
6190 REG_WR(bp, BNX2_EMAC_LED, 0);
6191 REG_WR(bp, BNX2_MISC_CFG, save);
6192 return 0;
6193}
6194
Michael Chan4666f872007-05-03 13:22:28 -07006195static int
6196bnx2_set_tx_csum(struct net_device *dev, u32 data)
6197{
6198 struct bnx2 *bp = netdev_priv(dev);
6199
6200 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Michael Chan6460d942007-07-14 19:07:52 -07006201 return (ethtool_op_set_tx_ipv6_csum(dev, data));
Michael Chan4666f872007-05-03 13:22:28 -07006202 else
6203 return (ethtool_op_set_tx_csum(dev, data));
6204}
6205
Jeff Garzik7282d492006-09-13 14:30:00 -04006206static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07006207 .get_settings = bnx2_get_settings,
6208 .set_settings = bnx2_set_settings,
6209 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08006210 .get_regs_len = bnx2_get_regs_len,
6211 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07006212 .get_wol = bnx2_get_wol,
6213 .set_wol = bnx2_set_wol,
6214 .nway_reset = bnx2_nway_reset,
6215 .get_link = ethtool_op_get_link,
6216 .get_eeprom_len = bnx2_get_eeprom_len,
6217 .get_eeprom = bnx2_get_eeprom,
6218 .set_eeprom = bnx2_set_eeprom,
6219 .get_coalesce = bnx2_get_coalesce,
6220 .set_coalesce = bnx2_set_coalesce,
6221 .get_ringparam = bnx2_get_ringparam,
6222 .set_ringparam = bnx2_set_ringparam,
6223 .get_pauseparam = bnx2_get_pauseparam,
6224 .set_pauseparam = bnx2_set_pauseparam,
6225 .get_rx_csum = bnx2_get_rx_csum,
6226 .set_rx_csum = bnx2_set_rx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07006227 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07006228 .set_sg = ethtool_op_set_sg,
Michael Chanb11d6212006-06-29 12:31:21 -07006229 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07006230 .self_test = bnx2_self_test,
6231 .get_strings = bnx2_get_strings,
6232 .phys_id = bnx2_phys_id,
Michael Chanb6016b72005-05-26 13:03:09 -07006233 .get_ethtool_stats = bnx2_get_ethtool_stats,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006234 .get_sset_count = bnx2_get_sset_count,
Michael Chanb6016b72005-05-26 13:03:09 -07006235};
6236
6237/* Called with rtnl_lock */
6238static int
6239bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6240{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006241 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08006242 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006243 int err;
6244
6245 switch(cmd) {
6246 case SIOCGMIIPHY:
6247 data->phy_id = bp->phy_addr;
6248
6249 /* fallthru */
6250 case SIOCGMIIREG: {
6251 u32 mii_regval;
6252
Michael Chan7b6b8342007-07-07 22:50:15 -07006253 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6254 return -EOPNOTSUPP;
6255
Michael Chandad3e452007-05-03 13:18:03 -07006256 if (!netif_running(dev))
6257 return -EAGAIN;
6258
Michael Chanc770a652005-08-25 15:38:39 -07006259 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006260 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07006261 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006262
6263 data->val_out = mii_regval;
6264
6265 return err;
6266 }
6267
6268 case SIOCSMIIREG:
6269 if (!capable(CAP_NET_ADMIN))
6270 return -EPERM;
6271
Michael Chan7b6b8342007-07-07 22:50:15 -07006272 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6273 return -EOPNOTSUPP;
6274
Michael Chandad3e452007-05-03 13:18:03 -07006275 if (!netif_running(dev))
6276 return -EAGAIN;
6277
Michael Chanc770a652005-08-25 15:38:39 -07006278 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006279 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07006280 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006281
6282 return err;
6283
6284 default:
6285 /* do nothing */
6286 break;
6287 }
6288 return -EOPNOTSUPP;
6289}
6290
6291/* Called with rtnl_lock */
6292static int
6293bnx2_change_mac_addr(struct net_device *dev, void *p)
6294{
6295 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08006296 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006297
Michael Chan73eef4c2005-08-25 15:39:15 -07006298 if (!is_valid_ether_addr(addr->sa_data))
6299 return -EINVAL;
6300
Michael Chanb6016b72005-05-26 13:03:09 -07006301 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6302 if (netif_running(dev))
6303 bnx2_set_mac_addr(bp);
6304
6305 return 0;
6306}
6307
6308/* Called with rtnl_lock */
6309static int
6310bnx2_change_mtu(struct net_device *dev, int new_mtu)
6311{
Michael Chan972ec0d2006-01-23 16:12:43 -08006312 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006313
6314 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6315 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6316 return -EINVAL;
6317
6318 dev->mtu = new_mtu;
6319 if (netif_running(dev)) {
6320 bnx2_netif_stop(bp);
6321
6322 bnx2_init_nic(bp);
6323
6324 bnx2_netif_start(bp);
6325 }
6326 return 0;
6327}
6328
6329#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6330static void
6331poll_bnx2(struct net_device *dev)
6332{
Michael Chan972ec0d2006-01-23 16:12:43 -08006333 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006334
6335 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006336 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006337 enable_irq(bp->pdev->irq);
6338}
6339#endif
6340
Michael Chan253c8b72007-01-08 19:56:01 -08006341static void __devinit
6342bnx2_get_5709_media(struct bnx2 *bp)
6343{
6344 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6345 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6346 u32 strap;
6347
6348 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6349 return;
6350 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6351 bp->phy_flags |= PHY_SERDES_FLAG;
6352 return;
6353 }
6354
6355 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6356 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6357 else
6358 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6359
6360 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6361 switch (strap) {
6362 case 0x4:
6363 case 0x5:
6364 case 0x6:
6365 bp->phy_flags |= PHY_SERDES_FLAG;
6366 return;
6367 }
6368 } else {
6369 switch (strap) {
6370 case 0x1:
6371 case 0x2:
6372 case 0x4:
6373 bp->phy_flags |= PHY_SERDES_FLAG;
6374 return;
6375 }
6376 }
6377}
6378
Michael Chan883e5152007-05-03 13:25:11 -07006379static void __devinit
6380bnx2_get_pci_speed(struct bnx2 *bp)
6381{
6382 u32 reg;
6383
6384 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6385 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6386 u32 clkreg;
6387
6388 bp->flags |= PCIX_FLAG;
6389
6390 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6391
6392 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6393 switch (clkreg) {
6394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6395 bp->bus_speed_mhz = 133;
6396 break;
6397
6398 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6399 bp->bus_speed_mhz = 100;
6400 break;
6401
6402 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6404 bp->bus_speed_mhz = 66;
6405 break;
6406
6407 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6408 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6409 bp->bus_speed_mhz = 50;
6410 break;
6411
6412 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6413 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6414 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6415 bp->bus_speed_mhz = 33;
6416 break;
6417 }
6418 }
6419 else {
6420 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6421 bp->bus_speed_mhz = 66;
6422 else
6423 bp->bus_speed_mhz = 33;
6424 }
6425
6426 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6427 bp->flags |= PCI_32BIT_FLAG;
6428
6429}
6430
Michael Chanb6016b72005-05-26 13:03:09 -07006431static int __devinit
6432bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6433{
6434 struct bnx2 *bp;
6435 unsigned long mem_len;
Michael Chan58fc2ea2007-07-07 22:52:02 -07006436 int rc, i, j;
Michael Chanb6016b72005-05-26 13:03:09 -07006437 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006438 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006439
Michael Chanb6016b72005-05-26 13:03:09 -07006440 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006441 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006442
6443 bp->flags = 0;
6444 bp->phy_flags = 0;
6445
6446 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6447 rc = pci_enable_device(pdev);
6448 if (rc) {
Joe Perches898eb712007-10-18 03:06:30 -07006449 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006450 goto err_out;
6451 }
6452
6453 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006454 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006455 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006456 rc = -ENODEV;
6457 goto err_out_disable;
6458 }
6459
6460 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6461 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006462 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006463 goto err_out_disable;
6464 }
6465
6466 pci_set_master(pdev);
6467
6468 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6469 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006470 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006471 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006472 rc = -EIO;
6473 goto err_out_release;
6474 }
6475
Michael Chanb6016b72005-05-26 13:03:09 -07006476 bp->dev = dev;
6477 bp->pdev = pdev;
6478
6479 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006480 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006481 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006482
6483 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006484 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006485 dev->mem_end = dev->mem_start + mem_len;
6486 dev->irq = pdev->irq;
6487
6488 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6489
6490 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006491 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006492 rc = -ENOMEM;
6493 goto err_out_release;
6494 }
6495
6496 /* Configure byte swap and enable write to the reg_window registers.
6497 * Rely on CPU to do target byte swapping on big endian systems
6498 * The chip's target access swapping will not swap all accesses
6499 */
6500 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6501 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6502 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6503
Pavel Machek829ca9a2005-09-03 15:56:56 -07006504 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006505
6506 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6507
Michael Chan883e5152007-05-03 13:25:11 -07006508 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6509 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6510 dev_err(&pdev->dev,
6511 "Cannot find PCIE capability, aborting.\n");
6512 rc = -EIO;
6513 goto err_out_unmap;
6514 }
6515 bp->flags |= PCIE_FLAG;
6516 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006517 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6518 if (bp->pcix_cap == 0) {
6519 dev_err(&pdev->dev,
6520 "Cannot find PCIX capability, aborting.\n");
6521 rc = -EIO;
6522 goto err_out_unmap;
6523 }
6524 }
6525
Michael Chan8e6a72c2007-05-03 13:24:48 -07006526 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6527 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6528 bp->flags |= MSI_CAP_FLAG;
6529 }
6530
Michael Chan40453c82007-05-03 13:19:18 -07006531 /* 5708 cannot support DMA addresses > 40-bit. */
6532 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6533 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6534 else
6535 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6536
6537 /* Configure DMA attributes. */
6538 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6539 dev->features |= NETIF_F_HIGHDMA;
6540 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6541 if (rc) {
6542 dev_err(&pdev->dev,
6543 "pci_set_consistent_dma_mask failed, aborting.\n");
6544 goto err_out_unmap;
6545 }
6546 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6547 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6548 goto err_out_unmap;
6549 }
6550
Michael Chan883e5152007-05-03 13:25:11 -07006551 if (!(bp->flags & PCIE_FLAG))
6552 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006553
6554 /* 5706A0 may falsely detect SERR and PERR. */
6555 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6556 reg = REG_RD(bp, PCI_COMMAND);
6557 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6558 REG_WR(bp, PCI_COMMAND, reg);
6559 }
6560 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6561 !(bp->flags & PCIX_FLAG)) {
6562
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006563 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006564 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006565 goto err_out_unmap;
6566 }
6567
6568 bnx2_init_nvram(bp);
6569
Michael Chane3648b32005-11-04 08:51:21 -08006570 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6571
6572 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006573 BNX2_SHM_HDR_SIGNATURE_SIG) {
6574 u32 off = PCI_FUNC(pdev->devfn) << 2;
6575
6576 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6577 } else
Michael Chane3648b32005-11-04 08:51:21 -08006578 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6579
Michael Chanb6016b72005-05-26 13:03:09 -07006580 /* Get the permanent MAC address. First we need to make sure the
6581 * firmware is actually running.
6582 */
Michael Chane3648b32005-11-04 08:51:21 -08006583 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006584
6585 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6586 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006587 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006588 rc = -ENODEV;
6589 goto err_out_unmap;
6590 }
6591
Michael Chan58fc2ea2007-07-07 22:52:02 -07006592 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6593 for (i = 0, j = 0; i < 3; i++) {
6594 u8 num, k, skip0;
6595
6596 num = (u8) (reg >> (24 - (i * 8)));
6597 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6598 if (num >= k || !skip0 || k == 1) {
6599 bp->fw_version[j++] = (num / k) + '0';
6600 skip0 = 0;
6601 }
6602 }
6603 if (i != 2)
6604 bp->fw_version[j++] = '.';
6605 }
Michael Chan846f5c62007-10-10 16:16:51 -07006606 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6607 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6608 bp->wol = 1;
6609
6610 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
Michael Chanc2d3db82007-07-16 18:26:43 -07006611 bp->flags |= ASF_ENABLE_FLAG;
6612
6613 for (i = 0; i < 30; i++) {
6614 reg = REG_RD_IND(bp, bp->shmem_base +
6615 BNX2_BC_STATE_CONDITION);
6616 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6617 break;
6618 msleep(10);
6619 }
6620 }
Michael Chan58fc2ea2007-07-07 22:52:02 -07006621 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6622 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6623 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6624 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6625 int i;
6626 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6627
6628 bp->fw_version[j++] = ' ';
6629 for (i = 0; i < 3; i++) {
6630 reg = REG_RD_IND(bp, addr + i * 4);
6631 reg = swab32(reg);
6632 memcpy(&bp->fw_version[j], &reg, 4);
6633 j += 4;
6634 }
6635 }
Michael Chanb6016b72005-05-26 13:03:09 -07006636
Michael Chane3648b32005-11-04 08:51:21 -08006637 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006638 bp->mac_addr[0] = (u8) (reg >> 8);
6639 bp->mac_addr[1] = (u8) reg;
6640
Michael Chane3648b32005-11-04 08:51:21 -08006641 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006642 bp->mac_addr[2] = (u8) (reg >> 24);
6643 bp->mac_addr[3] = (u8) (reg >> 16);
6644 bp->mac_addr[4] = (u8) (reg >> 8);
6645 bp->mac_addr[5] = (u8) reg;
6646
6647 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006648 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006649
6650 bp->rx_csum = 1;
6651
6652 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6653
6654 bp->tx_quick_cons_trip_int = 20;
6655 bp->tx_quick_cons_trip = 20;
6656 bp->tx_ticks_int = 80;
6657 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006658
Michael Chanb6016b72005-05-26 13:03:09 -07006659 bp->rx_quick_cons_trip_int = 6;
6660 bp->rx_quick_cons_trip = 6;
6661 bp->rx_ticks_int = 18;
6662 bp->rx_ticks = 18;
6663
Michael Chan7ea69202007-07-16 18:27:10 -07006664 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07006665
6666 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006667 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006668
Michael Chan5b0c76a2005-11-04 08:45:49 -08006669 bp->phy_addr = 1;
6670
Michael Chanb6016b72005-05-26 13:03:09 -07006671 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006672 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6673 bnx2_get_5709_media(bp);
6674 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006675 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006676
Michael Chan0d8a6572007-07-07 22:49:43 -07006677 bp->phy_port = PORT_TP;
Michael Chanbac0dff2006-11-19 14:15:05 -08006678 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07006679 bp->phy_port = PORT_FIBRE;
Michael Chan846f5c62007-10-10 16:16:51 -07006680 reg = REG_RD_IND(bp, bp->shmem_base +
6681 BNX2_SHARED_HW_CFG_CONFIG);
6682 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6683 bp->flags |= NO_WOL_FLAG;
6684 bp->wol = 0;
6685 }
Michael Chanbac0dff2006-11-19 14:15:05 -08006686 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006687 bp->phy_addr = 2;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006688 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6689 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6690 }
Michael Chan0d8a6572007-07-07 22:49:43 -07006691 bnx2_init_remote_phy(bp);
6692
Michael Chan261dd5c2007-01-08 19:55:46 -08006693 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6694 CHIP_NUM(bp) == CHIP_NUM_5708)
6695 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanfb0c18b2007-12-10 17:18:23 -08006696 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6697 (CHIP_REV(bp) == CHIP_REV_Ax ||
6698 CHIP_REV(bp) == CHIP_REV_Bx))
Michael Chanb659f442007-02-02 00:46:35 -08006699 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006700
Michael Chan16088272006-06-12 22:16:43 -07006701 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6702 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
Michael Chan846f5c62007-10-10 16:16:51 -07006703 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chandda1e392006-01-23 16:08:14 -08006704 bp->flags |= NO_WOL_FLAG;
Michael Chan846f5c62007-10-10 16:16:51 -07006705 bp->wol = 0;
6706 }
Michael Chandda1e392006-01-23 16:08:14 -08006707
Michael Chanb6016b72005-05-26 13:03:09 -07006708 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6709 bp->tx_quick_cons_trip_int =
6710 bp->tx_quick_cons_trip;
6711 bp->tx_ticks_int = bp->tx_ticks;
6712 bp->rx_quick_cons_trip_int =
6713 bp->rx_quick_cons_trip;
6714 bp->rx_ticks_int = bp->rx_ticks;
6715 bp->comp_prod_trip_int = bp->comp_prod_trip;
6716 bp->com_ticks_int = bp->com_ticks;
6717 bp->cmd_ticks_int = bp->cmd_ticks;
6718 }
6719
Michael Chanf9317a42006-09-29 17:06:23 -07006720 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6721 *
6722 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6723 * with byte enables disabled on the unused 32-bit word. This is legal
6724 * but causes problems on the AMD 8132 which will eventually stop
6725 * responding after a while.
6726 *
6727 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006728 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006729 */
6730 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6731 struct pci_dev *amd_8132 = NULL;
6732
6733 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6734 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6735 amd_8132))) {
Michael Chanf9317a42006-09-29 17:06:23 -07006736
Auke Kok44c10132007-06-08 15:46:36 -07006737 if (amd_8132->revision >= 0x10 &&
6738 amd_8132->revision <= 0x13) {
Michael Chanf9317a42006-09-29 17:06:23 -07006739 disable_msi = 1;
6740 pci_dev_put(amd_8132);
6741 break;
6742 }
6743 }
6744 }
6745
Michael Chandeaf3912007-07-07 22:48:00 -07006746 bnx2_set_default_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006747 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6748
Michael Chancd339a02005-08-25 15:35:24 -07006749 init_timer(&bp->timer);
6750 bp->timer.expires = RUN_AT(bp->timer_interval);
6751 bp->timer.data = (unsigned long) bp;
6752 bp->timer.function = bnx2_timer;
6753
Michael Chanb6016b72005-05-26 13:03:09 -07006754 return 0;
6755
6756err_out_unmap:
6757 if (bp->regview) {
6758 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006759 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006760 }
6761
6762err_out_release:
6763 pci_release_regions(pdev);
6764
6765err_out_disable:
6766 pci_disable_device(pdev);
6767 pci_set_drvdata(pdev, NULL);
6768
6769err_out:
6770 return rc;
6771}
6772
Michael Chan883e5152007-05-03 13:25:11 -07006773static char * __devinit
6774bnx2_bus_string(struct bnx2 *bp, char *str)
6775{
6776 char *s = str;
6777
6778 if (bp->flags & PCIE_FLAG) {
6779 s += sprintf(s, "PCI Express");
6780 } else {
6781 s += sprintf(s, "PCI");
6782 if (bp->flags & PCIX_FLAG)
6783 s += sprintf(s, "-X");
6784 if (bp->flags & PCI_32BIT_FLAG)
6785 s += sprintf(s, " 32-bit");
6786 else
6787 s += sprintf(s, " 64-bit");
6788 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6789 }
6790 return str;
6791}
6792
Michael Chanb6016b72005-05-26 13:03:09 -07006793static int __devinit
6794bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6795{
6796 static int version_printed = 0;
6797 struct net_device *dev = NULL;
6798 struct bnx2 *bp;
Joe Perches0795af52007-10-03 17:59:30 -07006799 int rc;
Michael Chan883e5152007-05-03 13:25:11 -07006800 char str[40];
Joe Perches0795af52007-10-03 17:59:30 -07006801 DECLARE_MAC_BUF(mac);
Michael Chanb6016b72005-05-26 13:03:09 -07006802
6803 if (version_printed++ == 0)
6804 printk(KERN_INFO "%s", version);
6805
6806 /* dev zeroed in init_etherdev */
6807 dev = alloc_etherdev(sizeof(*bp));
6808
6809 if (!dev)
6810 return -ENOMEM;
6811
6812 rc = bnx2_init_board(pdev, dev);
6813 if (rc < 0) {
6814 free_netdev(dev);
6815 return rc;
6816 }
6817
6818 dev->open = bnx2_open;
6819 dev->hard_start_xmit = bnx2_start_xmit;
6820 dev->stop = bnx2_close;
6821 dev->get_stats = bnx2_get_stats;
6822 dev->set_multicast_list = bnx2_set_rx_mode;
6823 dev->do_ioctl = bnx2_ioctl;
6824 dev->set_mac_address = bnx2_change_mac_addr;
6825 dev->change_mtu = bnx2_change_mtu;
6826 dev->tx_timeout = bnx2_tx_timeout;
6827 dev->watchdog_timeo = TX_TIMEOUT;
6828#ifdef BCM_VLAN
6829 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07006830#endif
Michael Chanb6016b72005-05-26 13:03:09 -07006831 dev->ethtool_ops = &bnx2_ethtool_ops;
Michael Chanb6016b72005-05-26 13:03:09 -07006832
Michael Chan972ec0d2006-01-23 16:12:43 -08006833 bp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006834 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
Michael Chanb6016b72005-05-26 13:03:09 -07006835
6836#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6837 dev->poll_controller = poll_bnx2;
6838#endif
6839
Michael Chan1b2f9222007-05-03 13:20:19 -07006840 pci_set_drvdata(pdev, dev);
6841
6842 memcpy(dev->dev_addr, bp->mac_addr, 6);
6843 memcpy(dev->perm_addr, bp->mac_addr, 6);
6844 bp->name = board_info[ent->driver_data].name;
6845
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006846 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan4666f872007-05-03 13:22:28 -07006847 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006848 dev->features |= NETIF_F_IPV6_CSUM;
6849
Michael Chan1b2f9222007-05-03 13:20:19 -07006850#ifdef BCM_VLAN
6851 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6852#endif
6853 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006854 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6855 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006856
Michael Chanb6016b72005-05-26 13:03:09 -07006857 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006858 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006859 if (bp->regview)
6860 iounmap(bp->regview);
6861 pci_release_regions(pdev);
6862 pci_disable_device(pdev);
6863 pci_set_drvdata(pdev, NULL);
6864 free_netdev(dev);
6865 return rc;
6866 }
6867
Michael Chan883e5152007-05-03 13:25:11 -07006868 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Joe Perches0795af52007-10-03 17:59:30 -07006869 "IRQ %d, node addr %s\n",
Michael Chanb6016b72005-05-26 13:03:09 -07006870 dev->name,
6871 bp->name,
6872 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6873 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07006874 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07006875 dev->base_addr,
Joe Perches0795af52007-10-03 17:59:30 -07006876 bp->pdev->irq, print_mac(mac, dev->dev_addr));
Michael Chanb6016b72005-05-26 13:03:09 -07006877
Michael Chanb6016b72005-05-26 13:03:09 -07006878 return 0;
6879}
6880
6881static void __devexit
6882bnx2_remove_one(struct pci_dev *pdev)
6883{
6884 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006885 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006886
Michael Chanafdc08b2005-08-25 15:34:29 -07006887 flush_scheduled_work();
6888
Michael Chanb6016b72005-05-26 13:03:09 -07006889 unregister_netdev(dev);
6890
6891 if (bp->regview)
6892 iounmap(bp->regview);
6893
6894 free_netdev(dev);
6895 pci_release_regions(pdev);
6896 pci_disable_device(pdev);
6897 pci_set_drvdata(pdev, NULL);
6898}
6899
6900static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006901bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006902{
6903 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006904 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006905 u32 reset_code;
6906
Michael Chan6caebb02007-08-03 20:57:25 -07006907 /* PCI register 4 needs to be saved whether netif_running() or not.
6908 * MSI address and data need to be saved if using MSI and
6909 * netif_running().
6910 */
6911 pci_save_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07006912 if (!netif_running(dev))
6913 return 0;
6914
Michael Chan1d60290f2006-03-20 17:50:08 -08006915 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006916 bnx2_netif_stop(bp);
6917 netif_device_detach(dev);
6918 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006919 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006920 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006921 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006922 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6923 else
6924 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6925 bnx2_reset_chip(bp, reset_code);
6926 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006927 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006928 return 0;
6929}
6930
6931static int
6932bnx2_resume(struct pci_dev *pdev)
6933{
6934 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006935 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006936
Michael Chan6caebb02007-08-03 20:57:25 -07006937 pci_restore_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07006938 if (!netif_running(dev))
6939 return 0;
6940
Pavel Machek829ca9a2005-09-03 15:56:56 -07006941 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006942 netif_device_attach(dev);
6943 bnx2_init_nic(bp);
6944 bnx2_netif_start(bp);
6945 return 0;
6946}
6947
6948static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006949 .name = DRV_MODULE_NAME,
6950 .id_table = bnx2_pci_tbl,
6951 .probe = bnx2_init_one,
6952 .remove = __devexit_p(bnx2_remove_one),
6953 .suspend = bnx2_suspend,
6954 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006955};
6956
6957static int __init bnx2_init(void)
6958{
Jeff Garzik29917622006-08-19 17:48:59 -04006959 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006960}
6961
6962static void __exit bnx2_cleanup(void)
6963{
6964 pci_unregister_driver(&bnx2_pci_driver);
6965}
6966
6967module_init(bnx2_init);
6968module_exit(bnx2_cleanup);
6969
6970
6971