blob: 6c0fc8a99c67e67cd97dcdaaad9d788d3147b6bd [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070029#include <linux/bitops.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080030#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
Linus Torvaldsde081fa2007-07-12 16:40:08 -070043#include <net/tcp.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080044#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
Michael Chan110d0ef2007-12-12 11:18:34 -080055#define FW_BUF_SIZE 0x10000
Denys Vlasenkob3448b02007-09-30 17:55:51 -070056
Michael Chanb6016b72005-05-26 13:03:09 -070057#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chanbbe42972007-12-10 17:18:51 -080059#define DRV_MODULE_VERSION "1.6.9"
60#define DRV_MODULE_RELDATE "December 8, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080088 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070089 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070090} board_t;
91
92/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050093static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070094 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
Michael Chane30372c2007-07-16 18:26:23 -0700131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700216};
217
Michael Chane30372c2007-07-16 18:26:23 -0700218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
Michael Chanb6016b72005-05-26 13:03:09 -0700227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
Michael Chane89bbf12005-08-25 15:36:58 -0700229static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230{
Michael Chan2f8af122006-08-15 01:39:10 -0700231 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700232
Michael Chan2f8af122006-08-15 01:39:10 -0700233 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
Michael Chane89bbf12005-08-25 15:36:58 -0700244 return (bp->tx_ring_size - diff);
245}
246
Michael Chanb6016b72005-05-26 13:03:09 -0700247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
Michael Chan1b8227c2007-05-03 13:24:05 -0700250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
Michael Chan1b8227c2007-05-03 13:24:05 -0700262 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700265 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700272 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700290 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400370
Michael Chanb6016b72005-05-26 13:03:09 -0700371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
Michael Chanb6016b72005-05-26 13:03:09 -0700410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
Michael Chanbf5295b2006-03-23 01:11:56 -0800417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700418}
419
420static void
421bnx2_disable_int_sync(struct bnx2 *bp)
422{
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
426}
427
428static void
429bnx2_netif_stop(struct bnx2 *bp)
430{
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700433 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -0700434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436 }
437}
438
439static void
440bnx2_netif_start(struct bnx2 *bp)
441{
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700445 napi_enable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -0700446 bnx2_enable_int(bp);
447 }
448 }
449}
450
451static void
452bnx2_free_mem(struct bnx2 *bp)
453{
Michael Chan13daffa2006-03-20 17:49:20 -0800454 int i;
455
Michael Chan59b47d82006-11-19 14:10:45 -0800456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
462 }
463 }
Michael Chanb6016b72005-05-26 13:03:09 -0700464 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800465 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800468 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700469 }
470 if (bp->tx_desc_ring) {
Michael Chane343d552007-12-12 11:16:19 -0800471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
474 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
Michael Chane343d552007-12-12 11:16:19 -0800479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700483 }
Michael Chan13daffa2006-03-20 17:49:20 -0800484 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400485 bp->rx_buf_ring = NULL;
Michael Chan47bf4242007-12-12 11:19:12 -0800486 for (i = 0; i < bp->rx_max_pg_ring; i++) {
487 if (bp->rx_pg_desc_ring[i])
488 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489 bp->rx_pg_desc_ring[i],
490 bp->rx_pg_desc_mapping[i]);
491 bp->rx_pg_desc_ring[i] = NULL;
492 }
493 if (bp->rx_pg_ring)
494 vfree(bp->rx_pg_ring);
495 bp->rx_pg_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700496}
497
498static int
499bnx2_alloc_mem(struct bnx2 *bp)
500{
Michael Chan0f31f992006-03-23 01:12:38 -0800501 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800502
Michael Chane343d552007-12-12 11:16:19 -0800503 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700504 if (bp->tx_buf_ring == NULL)
505 return -ENOMEM;
506
Michael Chane343d552007-12-12 11:16:19 -0800507 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700508 &bp->tx_desc_mapping);
509 if (bp->tx_desc_ring == NULL)
510 goto alloc_mem_err;
511
Michael Chane343d552007-12-12 11:16:19 -0800512 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700513 if (bp->rx_buf_ring == NULL)
514 goto alloc_mem_err;
515
Michael Chane343d552007-12-12 11:16:19 -0800516 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chan13daffa2006-03-20 17:49:20 -0800517
518 for (i = 0; i < bp->rx_max_ring; i++) {
519 bp->rx_desc_ring[i] =
Michael Chane343d552007-12-12 11:16:19 -0800520 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800521 &bp->rx_desc_mapping[i]);
522 if (bp->rx_desc_ring[i] == NULL)
523 goto alloc_mem_err;
524
525 }
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan47bf4242007-12-12 11:19:12 -0800527 if (bp->rx_pg_ring_size) {
528 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
529 bp->rx_max_pg_ring);
530 if (bp->rx_pg_ring == NULL)
531 goto alloc_mem_err;
532
533 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
534 bp->rx_max_pg_ring);
535 }
536
537 for (i = 0; i < bp->rx_max_pg_ring; i++) {
538 bp->rx_pg_desc_ring[i] =
539 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540 &bp->rx_pg_desc_mapping[i]);
541 if (bp->rx_pg_desc_ring[i] == NULL)
542 goto alloc_mem_err;
543
544 }
545
Michael Chan0f31f992006-03-23 01:12:38 -0800546 /* Combine status and statistics blocks into one allocation. */
547 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548 bp->status_stats_size = status_blk_size +
549 sizeof(struct statistics_block);
550
551 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700552 &bp->status_blk_mapping);
553 if (bp->status_blk == NULL)
554 goto alloc_mem_err;
555
Michael Chan0f31f992006-03-23 01:12:38 -0800556 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700557
Michael Chan0f31f992006-03-23 01:12:38 -0800558 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
559 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700560
Michael Chan0f31f992006-03-23 01:12:38 -0800561 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700562
Michael Chan59b47d82006-11-19 14:10:45 -0800563 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565 if (bp->ctx_pages == 0)
566 bp->ctx_pages = 1;
567 for (i = 0; i < bp->ctx_pages; i++) {
568 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
569 BCM_PAGE_SIZE,
570 &bp->ctx_blk_mapping[i]);
571 if (bp->ctx_blk[i] == NULL)
572 goto alloc_mem_err;
573 }
574 }
Michael Chanb6016b72005-05-26 13:03:09 -0700575 return 0;
576
577alloc_mem_err:
578 bnx2_free_mem(bp);
579 return -ENOMEM;
580}
581
582static void
Michael Chane3648b32005-11-04 08:51:21 -0800583bnx2_report_fw_link(struct bnx2 *bp)
584{
585 u32 fw_link_status = 0;
586
Michael Chan0d8a6572007-07-07 22:49:43 -0700587 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
588 return;
589
Michael Chane3648b32005-11-04 08:51:21 -0800590 if (bp->link_up) {
591 u32 bmsr;
592
593 switch (bp->line_speed) {
594 case SPEED_10:
595 if (bp->duplex == DUPLEX_HALF)
596 fw_link_status = BNX2_LINK_STATUS_10HALF;
597 else
598 fw_link_status = BNX2_LINK_STATUS_10FULL;
599 break;
600 case SPEED_100:
601 if (bp->duplex == DUPLEX_HALF)
602 fw_link_status = BNX2_LINK_STATUS_100HALF;
603 else
604 fw_link_status = BNX2_LINK_STATUS_100FULL;
605 break;
606 case SPEED_1000:
607 if (bp->duplex == DUPLEX_HALF)
608 fw_link_status = BNX2_LINK_STATUS_1000HALF;
609 else
610 fw_link_status = BNX2_LINK_STATUS_1000FULL;
611 break;
612 case SPEED_2500:
613 if (bp->duplex == DUPLEX_HALF)
614 fw_link_status = BNX2_LINK_STATUS_2500HALF;
615 else
616 fw_link_status = BNX2_LINK_STATUS_2500FULL;
617 break;
618 }
619
620 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
621
622 if (bp->autoneg) {
623 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
624
Michael Chanca58c3a2007-05-03 13:22:52 -0700625 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800627
628 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
631 else
632 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
633 }
634 }
635 else
636 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
637
638 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
639}
640
Michael Chan9b1084b2007-07-07 22:50:37 -0700641static char *
642bnx2_xceiver_str(struct bnx2 *bp)
643{
644 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
646 "Copper"));
647}
648
Michael Chane3648b32005-11-04 08:51:21 -0800649static void
Michael Chanb6016b72005-05-26 13:03:09 -0700650bnx2_report_link(struct bnx2 *bp)
651{
652 if (bp->link_up) {
653 netif_carrier_on(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700654 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700656
657 printk("%d Mbps ", bp->line_speed);
658
659 if (bp->duplex == DUPLEX_FULL)
660 printk("full duplex");
661 else
662 printk("half duplex");
663
664 if (bp->flow_ctrl) {
665 if (bp->flow_ctrl & FLOW_CTRL_RX) {
666 printk(", receive ");
667 if (bp->flow_ctrl & FLOW_CTRL_TX)
668 printk("& transmit ");
669 }
670 else {
671 printk(", transmit ");
672 }
673 printk("flow control ON");
674 }
675 printk("\n");
676 }
677 else {
678 netif_carrier_off(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700679 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700681 }
Michael Chane3648b32005-11-04 08:51:21 -0800682
683 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700684}
685
686static void
687bnx2_resolve_flow_ctrl(struct bnx2 *bp)
688{
689 u32 local_adv, remote_adv;
690
691 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400692 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700693 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
694
695 if (bp->duplex == DUPLEX_FULL) {
696 bp->flow_ctrl = bp->req_flow_ctrl;
697 }
698 return;
699 }
700
701 if (bp->duplex != DUPLEX_FULL) {
702 return;
703 }
704
Michael Chan5b0c76a2005-11-04 08:45:49 -0800705 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
707 u32 val;
708
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711 bp->flow_ctrl |= FLOW_CTRL_TX;
712 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713 bp->flow_ctrl |= FLOW_CTRL_RX;
714 return;
715 }
716
Michael Chanca58c3a2007-05-03 13:22:52 -0700717 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700719
720 if (bp->phy_flags & PHY_SERDES_FLAG) {
721 u32 new_local_adv = 0;
722 u32 new_remote_adv = 0;
723
724 if (local_adv & ADVERTISE_1000XPAUSE)
725 new_local_adv |= ADVERTISE_PAUSE_CAP;
726 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727 new_local_adv |= ADVERTISE_PAUSE_ASYM;
728 if (remote_adv & ADVERTISE_1000XPAUSE)
729 new_remote_adv |= ADVERTISE_PAUSE_CAP;
730 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
732
733 local_adv = new_local_adv;
734 remote_adv = new_remote_adv;
735 }
736
737 /* See Table 28B-3 of 802.3ab-1999 spec. */
738 if (local_adv & ADVERTISE_PAUSE_CAP) {
739 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740 if (remote_adv & ADVERTISE_PAUSE_CAP) {
741 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
742 }
743 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744 bp->flow_ctrl = FLOW_CTRL_RX;
745 }
746 }
747 else {
748 if (remote_adv & ADVERTISE_PAUSE_CAP) {
749 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
750 }
751 }
752 }
753 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
756
757 bp->flow_ctrl = FLOW_CTRL_TX;
758 }
759 }
760}
761
762static int
Michael Chan27a005b2007-05-03 13:23:41 -0700763bnx2_5709s_linkup(struct bnx2 *bp)
764{
765 u32 val, speed;
766
767 bp->link_up = 1;
768
769 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
772
773 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774 bp->line_speed = bp->req_line_speed;
775 bp->duplex = bp->req_duplex;
776 return 0;
777 }
778 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
779 switch (speed) {
780 case MII_BNX2_GP_TOP_AN_SPEED_10:
781 bp->line_speed = SPEED_10;
782 break;
783 case MII_BNX2_GP_TOP_AN_SPEED_100:
784 bp->line_speed = SPEED_100;
785 break;
786 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788 bp->line_speed = SPEED_1000;
789 break;
790 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791 bp->line_speed = SPEED_2500;
792 break;
793 }
794 if (val & MII_BNX2_GP_TOP_AN_FD)
795 bp->duplex = DUPLEX_FULL;
796 else
797 bp->duplex = DUPLEX_HALF;
798 return 0;
799}
800
801static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800802bnx2_5708s_linkup(struct bnx2 *bp)
803{
804 u32 val;
805
806 bp->link_up = 1;
807 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809 case BCM5708S_1000X_STAT1_SPEED_10:
810 bp->line_speed = SPEED_10;
811 break;
812 case BCM5708S_1000X_STAT1_SPEED_100:
813 bp->line_speed = SPEED_100;
814 break;
815 case BCM5708S_1000X_STAT1_SPEED_1G:
816 bp->line_speed = SPEED_1000;
817 break;
818 case BCM5708S_1000X_STAT1_SPEED_2G5:
819 bp->line_speed = SPEED_2500;
820 break;
821 }
822 if (val & BCM5708S_1000X_STAT1_FD)
823 bp->duplex = DUPLEX_FULL;
824 else
825 bp->duplex = DUPLEX_HALF;
826
827 return 0;
828}
829
830static int
831bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700832{
833 u32 bmcr, local_adv, remote_adv, common;
834
835 bp->link_up = 1;
836 bp->line_speed = SPEED_1000;
837
Michael Chanca58c3a2007-05-03 13:22:52 -0700838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700839 if (bmcr & BMCR_FULLDPLX) {
840 bp->duplex = DUPLEX_FULL;
841 }
842 else {
843 bp->duplex = DUPLEX_HALF;
844 }
845
846 if (!(bmcr & BMCR_ANENABLE)) {
847 return 0;
848 }
849
Michael Chanca58c3a2007-05-03 13:22:52 -0700850 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700852
853 common = local_adv & remote_adv;
854 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
855
856 if (common & ADVERTISE_1000XFULL) {
857 bp->duplex = DUPLEX_FULL;
858 }
859 else {
860 bp->duplex = DUPLEX_HALF;
861 }
862 }
863
864 return 0;
865}
866
867static int
868bnx2_copper_linkup(struct bnx2 *bp)
869{
870 u32 bmcr;
871
Michael Chanca58c3a2007-05-03 13:22:52 -0700872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700873 if (bmcr & BMCR_ANENABLE) {
874 u32 local_adv, remote_adv, common;
875
876 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
878
879 common = local_adv & (remote_adv >> 2);
880 if (common & ADVERTISE_1000FULL) {
881 bp->line_speed = SPEED_1000;
882 bp->duplex = DUPLEX_FULL;
883 }
884 else if (common & ADVERTISE_1000HALF) {
885 bp->line_speed = SPEED_1000;
886 bp->duplex = DUPLEX_HALF;
887 }
888 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700889 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700891
892 common = local_adv & remote_adv;
893 if (common & ADVERTISE_100FULL) {
894 bp->line_speed = SPEED_100;
895 bp->duplex = DUPLEX_FULL;
896 }
897 else if (common & ADVERTISE_100HALF) {
898 bp->line_speed = SPEED_100;
899 bp->duplex = DUPLEX_HALF;
900 }
901 else if (common & ADVERTISE_10FULL) {
902 bp->line_speed = SPEED_10;
903 bp->duplex = DUPLEX_FULL;
904 }
905 else if (common & ADVERTISE_10HALF) {
906 bp->line_speed = SPEED_10;
907 bp->duplex = DUPLEX_HALF;
908 }
909 else {
910 bp->line_speed = 0;
911 bp->link_up = 0;
912 }
913 }
914 }
915 else {
916 if (bmcr & BMCR_SPEED100) {
917 bp->line_speed = SPEED_100;
918 }
919 else {
920 bp->line_speed = SPEED_10;
921 }
922 if (bmcr & BMCR_FULLDPLX) {
923 bp->duplex = DUPLEX_FULL;
924 }
925 else {
926 bp->duplex = DUPLEX_HALF;
927 }
928 }
929
930 return 0;
931}
932
933static int
934bnx2_set_mac_link(struct bnx2 *bp)
935{
936 u32 val;
937
938 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940 (bp->duplex == DUPLEX_HALF)) {
941 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
942 }
943
944 /* Configure the EMAC mode register. */
945 val = REG_RD(bp, BNX2_EMAC_MODE);
946
947 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800948 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800949 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700950
951 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800952 switch (bp->line_speed) {
953 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800954 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800956 break;
957 }
958 /* fall through */
959 case SPEED_100:
960 val |= BNX2_EMAC_MODE_PORT_MII;
961 break;
962 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800963 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800964 /* fall through */
965 case SPEED_1000:
966 val |= BNX2_EMAC_MODE_PORT_GMII;
967 break;
968 }
Michael Chanb6016b72005-05-26 13:03:09 -0700969 }
970 else {
971 val |= BNX2_EMAC_MODE_PORT_GMII;
972 }
973
974 /* Set the MAC to operate in the appropriate duplex mode. */
975 if (bp->duplex == DUPLEX_HALF)
976 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977 REG_WR(bp, BNX2_EMAC_MODE, val);
978
979 /* Enable/disable rx PAUSE. */
980 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
981
982 if (bp->flow_ctrl & FLOW_CTRL_RX)
983 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
985
986 /* Enable/disable tx PAUSE. */
987 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
989
990 if (bp->flow_ctrl & FLOW_CTRL_TX)
991 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
993
994 /* Acknowledge the interrupt. */
995 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
996
997 return 0;
998}
999
Michael Chan27a005b2007-05-03 13:23:41 -07001000static void
1001bnx2_enable_bmsr1(struct bnx2 *bp)
1002{
1003 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004 (CHIP_NUM(bp) == CHIP_NUM_5709))
1005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006 MII_BNX2_BLK_ADDR_GP_STATUS);
1007}
1008
1009static void
1010bnx2_disable_bmsr1(struct bnx2 *bp)
1011{
1012 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013 (CHIP_NUM(bp) == CHIP_NUM_5709))
1014 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1016}
1017
Michael Chanb6016b72005-05-26 13:03:09 -07001018static int
Michael Chan605a9e22007-05-03 13:23:13 -07001019bnx2_test_and_enable_2g5(struct bnx2 *bp)
1020{
1021 u32 up1;
1022 int ret = 1;
1023
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025 return 0;
1026
1027 if (bp->autoneg & AUTONEG_SPEED)
1028 bp->advertising |= ADVERTISED_2500baseX_Full;
1029
Michael Chan27a005b2007-05-03 13:23:41 -07001030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1032
Michael Chan605a9e22007-05-03 13:23:13 -07001033 bnx2_read_phy(bp, bp->mii_up1, &up1);
1034 if (!(up1 & BCM5708S_UP1_2G5)) {
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, bp->mii_up1, up1);
1037 ret = 0;
1038 }
1039
Michael Chan27a005b2007-05-03 13:23:41 -07001040 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1043
Michael Chan605a9e22007-05-03 13:23:13 -07001044 return ret;
1045}
1046
1047static int
1048bnx2_test_and_disable_2g5(struct bnx2 *bp)
1049{
1050 u32 up1;
1051 int ret = 0;
1052
1053 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1054 return 0;
1055
Michael Chan27a005b2007-05-03 13:23:41 -07001056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1058
Michael Chan605a9e22007-05-03 13:23:13 -07001059 bnx2_read_phy(bp, bp->mii_up1, &up1);
1060 if (up1 & BCM5708S_UP1_2G5) {
1061 up1 &= ~BCM5708S_UP1_2G5;
1062 bnx2_write_phy(bp, bp->mii_up1, up1);
1063 ret = 1;
1064 }
1065
Michael Chan27a005b2007-05-03 13:23:41 -07001066 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069
Michael Chan605a9e22007-05-03 13:23:13 -07001070 return ret;
1071}
1072
1073static void
1074bnx2_enable_forced_2g5(struct bnx2 *bp)
1075{
1076 u32 bmcr;
1077
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079 return;
1080
Michael Chan27a005b2007-05-03 13:23:41 -07001081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082 u32 val;
1083
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr |= BCM5708S_BMCR_FORCE_2500;
1098 }
1099
1100 if (bp->autoneg & AUTONEG_SPEED) {
1101 bmcr &= ~BMCR_ANENABLE;
1102 if (bp->req_duplex == DUPLEX_FULL)
1103 bmcr |= BMCR_FULLDPLX;
1104 }
1105 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106}
1107
1108static void
1109bnx2_disable_forced_2g5(struct bnx2 *bp)
1110{
1111 u32 bmcr;
1112
1113 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1114 return;
1115
Michael Chan27a005b2007-05-03 13:23:41 -07001116 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1117 u32 val;
1118
1119 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120 MII_BNX2_BLK_ADDR_SERDES_DIG);
1121 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1124
1125 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1128
1129 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001130 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1132 }
1133
1134 if (bp->autoneg & AUTONEG_SPEED)
1135 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1137}
1138
1139static int
Michael Chanb6016b72005-05-26 13:03:09 -07001140bnx2_set_link(struct bnx2 *bp)
1141{
1142 u32 bmsr;
1143 u8 link_up;
1144
Michael Chan80be4432006-11-19 14:07:28 -08001145 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001146 bp->link_up = 1;
1147 return 0;
1148 }
1149
Michael Chan0d8a6572007-07-07 22:49:43 -07001150 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1151 return 0;
1152
Michael Chanb6016b72005-05-26 13:03:09 -07001153 link_up = bp->link_up;
1154
Michael Chan27a005b2007-05-03 13:23:41 -07001155 bnx2_enable_bmsr1(bp);
1156 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001159
1160 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1162 u32 val;
1163
1164 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165 if (val & BNX2_EMAC_STATUS_LINK)
1166 bmsr |= BMSR_LSTATUS;
1167 else
1168 bmsr &= ~BMSR_LSTATUS;
1169 }
1170
1171 if (bmsr & BMSR_LSTATUS) {
1172 bp->link_up = 1;
1173
1174 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001175 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176 bnx2_5706s_linkup(bp);
1177 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001179 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001181 }
1182 else {
1183 bnx2_copper_linkup(bp);
1184 }
1185 bnx2_resolve_flow_ctrl(bp);
1186 }
1187 else {
1188 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001189 (bp->autoneg & AUTONEG_SPEED))
1190 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001191
Michael Chanb6016b72005-05-26 13:03:09 -07001192 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1193 bp->link_up = 0;
1194 }
1195
1196 if (bp->link_up != link_up) {
1197 bnx2_report_link(bp);
1198 }
1199
1200 bnx2_set_mac_link(bp);
1201
1202 return 0;
1203}
1204
1205static int
1206bnx2_reset_phy(struct bnx2 *bp)
1207{
1208 int i;
1209 u32 reg;
1210
Michael Chanca58c3a2007-05-03 13:22:52 -07001211 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001212
1213#define PHY_RESET_MAX_WAIT 100
1214 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1215 udelay(10);
1216
Michael Chanca58c3a2007-05-03 13:22:52 -07001217 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001218 if (!(reg & BMCR_RESET)) {
1219 udelay(20);
1220 break;
1221 }
1222 }
1223 if (i == PHY_RESET_MAX_WAIT) {
1224 return -EBUSY;
1225 }
1226 return 0;
1227}
1228
1229static u32
1230bnx2_phy_get_pause_adv(struct bnx2 *bp)
1231{
1232 u32 adv = 0;
1233
1234 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1236
1237 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238 adv = ADVERTISE_1000XPAUSE;
1239 }
1240 else {
1241 adv = ADVERTISE_PAUSE_CAP;
1242 }
1243 }
1244 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246 adv = ADVERTISE_1000XPSE_ASYM;
1247 }
1248 else {
1249 adv = ADVERTISE_PAUSE_ASYM;
1250 }
1251 }
1252 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1255 }
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1258 }
1259 }
1260 return adv;
1261}
1262
Michael Chan0d8a6572007-07-07 22:49:43 -07001263static int bnx2_fw_sync(struct bnx2 *, u32, int);
1264
Michael Chanb6016b72005-05-26 13:03:09 -07001265static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001266bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1267{
1268 u32 speed_arg = 0, pause_adv;
1269
1270 pause_adv = bnx2_phy_get_pause_adv(bp);
1271
1272 if (bp->autoneg & AUTONEG_SPEED) {
1273 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274 if (bp->advertising & ADVERTISED_10baseT_Half)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276 if (bp->advertising & ADVERTISED_10baseT_Full)
1277 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278 if (bp->advertising & ADVERTISED_100baseT_Half)
1279 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280 if (bp->advertising & ADVERTISED_100baseT_Full)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1286 } else {
1287 if (bp->req_line_speed == SPEED_2500)
1288 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289 else if (bp->req_line_speed == SPEED_1000)
1290 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291 else if (bp->req_line_speed == SPEED_100) {
1292 if (bp->req_duplex == DUPLEX_FULL)
1293 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1294 else
1295 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 } else if (bp->req_line_speed == SPEED_10) {
1297 if (bp->req_duplex == DUPLEX_FULL)
1298 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1299 else
1300 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1301 }
1302 }
1303
1304 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1308
1309 if (port == PORT_TP)
1310 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1312
1313 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1314
1315 spin_unlock_bh(&bp->phy_lock);
1316 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317 spin_lock_bh(&bp->phy_lock);
1318
1319 return 0;
1320}
1321
1322static int
1323bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001324{
Michael Chan605a9e22007-05-03 13:23:13 -07001325 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001326 u32 new_adv = 0;
1327
Michael Chan0d8a6572007-07-07 22:49:43 -07001328 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329 return (bnx2_setup_remote_phy(bp, port));
1330
Michael Chanb6016b72005-05-26 13:03:09 -07001331 if (!(bp->autoneg & AUTONEG_SPEED)) {
1332 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001333 int force_link_down = 0;
1334
Michael Chan605a9e22007-05-03 13:23:13 -07001335 if (bp->req_line_speed == SPEED_2500) {
1336 if (!bnx2_test_and_enable_2g5(bp))
1337 force_link_down = 1;
1338 } else if (bp->req_line_speed == SPEED_1000) {
1339 if (bnx2_test_and_disable_2g5(bp))
1340 force_link_down = 1;
1341 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001342 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001343 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1344
Michael Chanca58c3a2007-05-03 13:22:52 -07001345 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001346 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001347 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001348
Michael Chan27a005b2007-05-03 13:23:41 -07001349 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350 if (bp->req_line_speed == SPEED_2500)
1351 bnx2_enable_forced_2g5(bp);
1352 else if (bp->req_line_speed == SPEED_1000) {
1353 bnx2_disable_forced_2g5(bp);
1354 new_bmcr &= ~0x2000;
1355 }
1356
1357 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001358 if (bp->req_line_speed == SPEED_2500)
1359 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1360 else
1361 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001362 }
1363
Michael Chanb6016b72005-05-26 13:03:09 -07001364 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001365 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001366 new_bmcr |= BMCR_FULLDPLX;
1367 }
1368 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001369 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001370 new_bmcr &= ~BMCR_FULLDPLX;
1371 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001372 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001375 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001376 ~(ADVERTISE_1000XFULL |
1377 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001378 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001379 BMCR_ANRESTART | BMCR_ANENABLE);
1380
1381 bp->link_up = 0;
1382 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001383 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001384 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001385 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001386 bnx2_write_phy(bp, bp->mii_adv, adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001388 } else {
1389 bnx2_resolve_flow_ctrl(bp);
1390 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001391 }
1392 return 0;
1393 }
1394
Michael Chan605a9e22007-05-03 13:23:13 -07001395 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001396
Michael Chanb6016b72005-05-26 13:03:09 -07001397 if (bp->advertising & ADVERTISED_1000baseT_Full)
1398 new_adv |= ADVERTISE_1000XFULL;
1399
1400 new_adv |= bnx2_phy_get_pause_adv(bp);
1401
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_read_phy(bp, bp->mii_adv, &adv);
1403 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001404
1405 bp->serdes_an_pending = 0;
1406 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407 /* Force a link down visible on the other side */
1408 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001409 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001410 spin_unlock_bh(&bp->phy_lock);
1411 msleep(20);
1412 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001413 }
1414
Michael Chanca58c3a2007-05-03 13:22:52 -07001415 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001417 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001418 /* Speed up link-up time when the link partner
1419 * does not autonegotiate which is very common
1420 * in blade servers. Some blade servers use
1421 * IPMI for kerboard input and it's important
1422 * to minimize link disruptions. Autoneg. involves
1423 * exchanging base pages plus 3 next pages and
1424 * normally completes in about 120 msec.
1425 */
1426 bp->current_interval = SERDES_AN_TIMEOUT;
1427 bp->serdes_an_pending = 1;
1428 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001429 } else {
1430 bnx2_resolve_flow_ctrl(bp);
1431 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001432 }
1433
1434 return 0;
1435}
1436
1437#define ETHTOOL_ALL_FIBRE_SPEED \
Michael Chandeaf3912007-07-07 22:48:00 -07001438 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1439 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440 (ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07001441
1442#define ETHTOOL_ALL_COPPER_SPEED \
1443 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1444 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1445 ADVERTISED_1000baseT_Full)
1446
1447#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001449
Michael Chanb6016b72005-05-26 13:03:09 -07001450#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1451
Michael Chandeaf3912007-07-07 22:48:00 -07001452static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001453bnx2_set_default_remote_link(struct bnx2 *bp)
1454{
1455 u32 link;
1456
1457 if (bp->phy_port == PORT_TP)
1458 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1459 else
1460 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1461
1462 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463 bp->req_line_speed = 0;
1464 bp->autoneg |= AUTONEG_SPEED;
1465 bp->advertising = ADVERTISED_Autoneg;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467 bp->advertising |= ADVERTISED_10baseT_Half;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469 bp->advertising |= ADVERTISED_10baseT_Full;
1470 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471 bp->advertising |= ADVERTISED_100baseT_Half;
1472 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473 bp->advertising |= ADVERTISED_100baseT_Full;
1474 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475 bp->advertising |= ADVERTISED_1000baseT_Full;
1476 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477 bp->advertising |= ADVERTISED_2500baseX_Full;
1478 } else {
1479 bp->autoneg = 0;
1480 bp->advertising = 0;
1481 bp->req_duplex = DUPLEX_FULL;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483 bp->req_line_speed = SPEED_10;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485 bp->req_duplex = DUPLEX_HALF;
1486 }
1487 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488 bp->req_line_speed = SPEED_100;
1489 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490 bp->req_duplex = DUPLEX_HALF;
1491 }
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493 bp->req_line_speed = SPEED_1000;
1494 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495 bp->req_line_speed = SPEED_2500;
1496 }
1497}
1498
1499static void
Michael Chandeaf3912007-07-07 22:48:00 -07001500bnx2_set_default_link(struct bnx2 *bp)
1501{
Michael Chan0d8a6572007-07-07 22:49:43 -07001502 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503 return bnx2_set_default_remote_link(bp);
1504
Michael Chandeaf3912007-07-07 22:48:00 -07001505 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506 bp->req_line_speed = 0;
1507 if (bp->phy_flags & PHY_SERDES_FLAG) {
1508 u32 reg;
1509
1510 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1511
1512 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1515 bp->autoneg = 0;
1516 bp->req_line_speed = bp->line_speed = SPEED_1000;
1517 bp->req_duplex = DUPLEX_FULL;
1518 }
1519 } else
1520 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1521}
1522
Michael Chan0d8a6572007-07-07 22:49:43 -07001523static void
Michael Chandf149d72007-07-07 22:51:36 -07001524bnx2_send_heart_beat(struct bnx2 *bp)
1525{
1526 u32 msg;
1527 u32 addr;
1528
1529 spin_lock(&bp->indirect_lock);
1530 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534 spin_unlock(&bp->indirect_lock);
1535}
1536
1537static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001538bnx2_remote_phy_event(struct bnx2 *bp)
1539{
1540 u32 msg;
1541 u8 link_up = bp->link_up;
1542 u8 old_port;
1543
1544 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1545
Michael Chandf149d72007-07-07 22:51:36 -07001546 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547 bnx2_send_heart_beat(bp);
1548
1549 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1550
Michael Chan0d8a6572007-07-07 22:49:43 -07001551 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1552 bp->link_up = 0;
1553 else {
1554 u32 speed;
1555
1556 bp->link_up = 1;
1557 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558 bp->duplex = DUPLEX_FULL;
1559 switch (speed) {
1560 case BNX2_LINK_STATUS_10HALF:
1561 bp->duplex = DUPLEX_HALF;
1562 case BNX2_LINK_STATUS_10FULL:
1563 bp->line_speed = SPEED_10;
1564 break;
1565 case BNX2_LINK_STATUS_100HALF:
1566 bp->duplex = DUPLEX_HALF;
1567 case BNX2_LINK_STATUS_100BASE_T4:
1568 case BNX2_LINK_STATUS_100FULL:
1569 bp->line_speed = SPEED_100;
1570 break;
1571 case BNX2_LINK_STATUS_1000HALF:
1572 bp->duplex = DUPLEX_HALF;
1573 case BNX2_LINK_STATUS_1000FULL:
1574 bp->line_speed = SPEED_1000;
1575 break;
1576 case BNX2_LINK_STATUS_2500HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_2500FULL:
1579 bp->line_speed = SPEED_2500;
1580 break;
1581 default:
1582 bp->line_speed = 0;
1583 break;
1584 }
1585
1586 spin_lock(&bp->phy_lock);
1587 bp->flow_ctrl = 0;
1588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590 if (bp->duplex == DUPLEX_FULL)
1591 bp->flow_ctrl = bp->req_flow_ctrl;
1592 } else {
1593 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594 bp->flow_ctrl |= FLOW_CTRL_TX;
1595 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596 bp->flow_ctrl |= FLOW_CTRL_RX;
1597 }
1598
1599 old_port = bp->phy_port;
1600 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601 bp->phy_port = PORT_FIBRE;
1602 else
1603 bp->phy_port = PORT_TP;
1604
1605 if (old_port != bp->phy_port)
1606 bnx2_set_default_link(bp);
1607
1608 spin_unlock(&bp->phy_lock);
1609 }
1610 if (bp->link_up != link_up)
1611 bnx2_report_link(bp);
1612
1613 bnx2_set_mac_link(bp);
1614}
1615
1616static int
1617bnx2_set_remote_link(struct bnx2 *bp)
1618{
1619 u32 evt_code;
1620
1621 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1622 switch (evt_code) {
1623 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624 bnx2_remote_phy_event(bp);
1625 break;
1626 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1627 default:
Michael Chandf149d72007-07-07 22:51:36 -07001628 bnx2_send_heart_beat(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07001629 break;
1630 }
1631 return 0;
1632}
1633
Michael Chanb6016b72005-05-26 13:03:09 -07001634static int
1635bnx2_setup_copper_phy(struct bnx2 *bp)
1636{
1637 u32 bmcr;
1638 u32 new_bmcr;
1639
Michael Chanca58c3a2007-05-03 13:22:52 -07001640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001641
1642 if (bp->autoneg & AUTONEG_SPEED) {
1643 u32 adv_reg, adv1000_reg;
1644 u32 new_adv_reg = 0;
1645 u32 new_adv1000_reg = 0;
1646
Michael Chanca58c3a2007-05-03 13:22:52 -07001647 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001648 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649 ADVERTISE_PAUSE_ASYM);
1650
1651 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652 adv1000_reg &= PHY_ALL_1000_SPEED;
1653
1654 if (bp->advertising & ADVERTISED_10baseT_Half)
1655 new_adv_reg |= ADVERTISE_10HALF;
1656 if (bp->advertising & ADVERTISED_10baseT_Full)
1657 new_adv_reg |= ADVERTISE_10FULL;
1658 if (bp->advertising & ADVERTISED_100baseT_Half)
1659 new_adv_reg |= ADVERTISE_100HALF;
1660 if (bp->advertising & ADVERTISED_100baseT_Full)
1661 new_adv_reg |= ADVERTISE_100FULL;
1662 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001664
Michael Chanb6016b72005-05-26 13:03:09 -07001665 new_adv_reg |= ADVERTISE_CSMA;
1666
1667 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1668
1669 if ((adv1000_reg != new_adv1000_reg) ||
1670 (adv_reg != new_adv_reg) ||
1671 ((bmcr & BMCR_ANENABLE) == 0)) {
1672
Michael Chanca58c3a2007-05-03 13:22:52 -07001673 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001674 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001675 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001676 BMCR_ANENABLE);
1677 }
1678 else if (bp->link_up) {
1679 /* Flow ctrl may have changed from auto to forced */
1680 /* or vice-versa. */
1681
1682 bnx2_resolve_flow_ctrl(bp);
1683 bnx2_set_mac_link(bp);
1684 }
1685 return 0;
1686 }
1687
1688 new_bmcr = 0;
1689 if (bp->req_line_speed == SPEED_100) {
1690 new_bmcr |= BMCR_SPEED100;
1691 }
1692 if (bp->req_duplex == DUPLEX_FULL) {
1693 new_bmcr |= BMCR_FULLDPLX;
1694 }
1695 if (new_bmcr != bmcr) {
1696 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001697
Michael Chanca58c3a2007-05-03 13:22:52 -07001698 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001700
Michael Chanb6016b72005-05-26 13:03:09 -07001701 if (bmsr & BMSR_LSTATUS) {
1702 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001703 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001704 spin_unlock_bh(&bp->phy_lock);
1705 msleep(50);
1706 spin_lock_bh(&bp->phy_lock);
1707
Michael Chanca58c3a2007-05-03 13:22:52 -07001708 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001710 }
1711
Michael Chanca58c3a2007-05-03 13:22:52 -07001712 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001713
1714 /* Normally, the new speed is setup after the link has
1715 * gone down and up again. In some cases, link will not go
1716 * down so we need to set up the new speed here.
1717 */
1718 if (bmsr & BMSR_LSTATUS) {
1719 bp->line_speed = bp->req_line_speed;
1720 bp->duplex = bp->req_duplex;
1721 bnx2_resolve_flow_ctrl(bp);
1722 bnx2_set_mac_link(bp);
1723 }
Michael Chan27a005b2007-05-03 13:23:41 -07001724 } else {
1725 bnx2_resolve_flow_ctrl(bp);
1726 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001727 }
1728 return 0;
1729}
1730
1731static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001732bnx2_setup_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001733{
1734 if (bp->loopback == MAC_LOOPBACK)
1735 return 0;
1736
1737 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07001738 return (bnx2_setup_serdes_phy(bp, port));
Michael Chanb6016b72005-05-26 13:03:09 -07001739 }
1740 else {
1741 return (bnx2_setup_copper_phy(bp));
1742 }
1743}
1744
1745static int
Michael Chan27a005b2007-05-03 13:23:41 -07001746bnx2_init_5709s_phy(struct bnx2 *bp)
1747{
1748 u32 val;
1749
1750 bp->mii_bmcr = MII_BMCR + 0x10;
1751 bp->mii_bmsr = MII_BMSR + 0x10;
1752 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753 bp->mii_adv = MII_ADVERTISE + 0x10;
1754 bp->mii_lpa = MII_LPA + 0x10;
1755 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1756
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1759
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761 bnx2_reset_phy(bp);
1762
1763 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1764
1765 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1769
1770 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773 val |= BCM5708S_UP1_2G5;
1774 else
1775 val &= ~BCM5708S_UP1_2G5;
1776 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1777
1778 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1782
1783 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1784
1785 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1788
1789 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1790
1791 return 0;
1792}
1793
1794static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001795bnx2_init_5708s_phy(struct bnx2 *bp)
1796{
1797 u32 val;
1798
Michael Chan27a005b2007-05-03 13:23:41 -07001799 bnx2_reset_phy(bp);
1800
1801 bp->mii_up1 = BCM5708S_UP1;
1802
Michael Chan5b0c76a2005-11-04 08:45:49 -08001803 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1806
1807 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1810
1811 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1814
1815 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817 val |= BCM5708S_UP1_2G5;
1818 bnx2_write_phy(bp, BCM5708S_UP1, val);
1819 }
1820
1821 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001822 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001824 /* increase tx signal amplitude */
1825 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826 BCM5708S_BLK_ADDR_TX_MISC);
1827 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1831 }
1832
Michael Chane3648b32005-11-04 08:51:21 -08001833 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001834 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1835
1836 if (val) {
1837 u32 is_backplane;
1838
Michael Chane3648b32005-11-04 08:51:21 -08001839 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001840 BNX2_SHARED_HW_CFG_CONFIG);
1841 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843 BCM5708S_BLK_ADDR_TX_MISC);
1844 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846 BCM5708S_BLK_ADDR_DIG);
1847 }
1848 }
1849 return 0;
1850}
1851
1852static int
1853bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001854{
Michael Chan27a005b2007-05-03 13:23:41 -07001855 bnx2_reset_phy(bp);
1856
Michael Chanb6016b72005-05-26 13:03:09 -07001857 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1858
Michael Chan59b47d82006-11-19 14:10:45 -08001859 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001861
1862 if (bp->dev->mtu > 1500) {
1863 u32 val;
1864
1865 /* Set extended packet length bit */
1866 bnx2_write_phy(bp, 0x18, 0x7);
1867 bnx2_read_phy(bp, 0x18, &val);
1868 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1869
1870 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871 bnx2_read_phy(bp, 0x1c, &val);
1872 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1873 }
1874 else {
1875 u32 val;
1876
1877 bnx2_write_phy(bp, 0x18, 0x7);
1878 bnx2_read_phy(bp, 0x18, &val);
1879 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1880
1881 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882 bnx2_read_phy(bp, 0x1c, &val);
1883 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1884 }
1885
1886 return 0;
1887}
1888
1889static int
1890bnx2_init_copper_phy(struct bnx2 *bp)
1891{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001892 u32 val;
1893
Michael Chan27a005b2007-05-03 13:23:41 -07001894 bnx2_reset_phy(bp);
1895
Michael Chanb6016b72005-05-26 13:03:09 -07001896 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897 bnx2_write_phy(bp, 0x18, 0x0c00);
1898 bnx2_write_phy(bp, 0x17, 0x000a);
1899 bnx2_write_phy(bp, 0x15, 0x310b);
1900 bnx2_write_phy(bp, 0x17, 0x201f);
1901 bnx2_write_phy(bp, 0x15, 0x9506);
1902 bnx2_write_phy(bp, 0x17, 0x401f);
1903 bnx2_write_phy(bp, 0x15, 0x14e2);
1904 bnx2_write_phy(bp, 0x18, 0x0400);
1905 }
1906
Michael Chanb659f442007-02-02 00:46:35 -08001907 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909 MII_BNX2_DSP_EXPAND_REG | 0x8);
1910 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1911 val &= ~(1 << 8);
1912 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1913 }
1914
Michael Chanb6016b72005-05-26 13:03:09 -07001915 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001916 /* Set extended packet length bit */
1917 bnx2_write_phy(bp, 0x18, 0x7);
1918 bnx2_read_phy(bp, 0x18, &val);
1919 bnx2_write_phy(bp, 0x18, val | 0x4000);
1920
1921 bnx2_read_phy(bp, 0x10, &val);
1922 bnx2_write_phy(bp, 0x10, val | 0x1);
1923 }
1924 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001925 bnx2_write_phy(bp, 0x18, 0x7);
1926 bnx2_read_phy(bp, 0x18, &val);
1927 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1928
1929 bnx2_read_phy(bp, 0x10, &val);
1930 bnx2_write_phy(bp, 0x10, val & ~0x1);
1931 }
1932
Michael Chan5b0c76a2005-11-04 08:45:49 -08001933 /* ethernet@wirespeed */
1934 bnx2_write_phy(bp, 0x18, 0x7007);
1935 bnx2_read_phy(bp, 0x18, &val);
1936 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001937 return 0;
1938}
1939
1940
1941static int
1942bnx2_init_phy(struct bnx2 *bp)
1943{
1944 u32 val;
1945 int rc = 0;
1946
1947 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1949
Michael Chanca58c3a2007-05-03 13:22:52 -07001950 bp->mii_bmcr = MII_BMCR;
1951 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001952 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001953 bp->mii_adv = MII_ADVERTISE;
1954 bp->mii_lpa = MII_LPA;
1955
Michael Chanb6016b72005-05-26 13:03:09 -07001956 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1957
Michael Chan0d8a6572007-07-07 22:49:43 -07001958 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1959 goto setup_phy;
1960
Michael Chanb6016b72005-05-26 13:03:09 -07001961 bnx2_read_phy(bp, MII_PHYSID1, &val);
1962 bp->phy_id = val << 16;
1963 bnx2_read_phy(bp, MII_PHYSID2, &val);
1964 bp->phy_id |= val & 0xffff;
1965
1966 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001967 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968 rc = bnx2_init_5706s_phy(bp);
1969 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001971 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001973 }
1974 else {
1975 rc = bnx2_init_copper_phy(bp);
1976 }
1977
Michael Chan0d8a6572007-07-07 22:49:43 -07001978setup_phy:
1979 if (!rc)
1980 rc = bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07001981
1982 return rc;
1983}
1984
1985static int
1986bnx2_set_mac_loopback(struct bnx2 *bp)
1987{
1988 u32 mac_mode;
1989
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1994 bp->link_up = 1;
1995 return 0;
1996}
1997
Michael Chanbc5a0692006-01-23 16:13:22 -08001998static int bnx2_test_link(struct bnx2 *);
1999
2000static int
2001bnx2_set_phy_loopback(struct bnx2 *bp)
2002{
2003 u32 mac_mode;
2004 int rc, i;
2005
2006 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07002007 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08002008 BMCR_SPEED1000);
2009 spin_unlock_bh(&bp->phy_lock);
2010 if (rc)
2011 return rc;
2012
2013 for (i = 0; i < 10; i++) {
2014 if (bnx2_test_link(bp) == 0)
2015 break;
Michael Chan80be4432006-11-19 14:07:28 -08002016 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08002017 }
2018
2019 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08002022 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08002023
2024 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2026 bp->link_up = 1;
2027 return 0;
2028}
2029
Michael Chanb6016b72005-05-26 13:03:09 -07002030static int
Michael Chanb090ae22006-01-23 16:07:10 -08002031bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07002032{
2033 int i;
2034 u32 val;
2035
Michael Chanb6016b72005-05-26 13:03:09 -07002036 bp->fw_wr_seq++;
2037 msg_data |= bp->fw_wr_seq;
2038
Michael Chane3648b32005-11-04 08:51:21 -08002039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002040
2041 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08002042 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2043 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07002044
Michael Chane3648b32005-11-04 08:51:21 -08002045 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07002046
2047 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2048 break;
2049 }
Michael Chanb090ae22006-01-23 16:07:10 -08002050 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2051 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002052
2053 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08002054 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2055 if (!silent)
2056 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2057 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002058
2059 msg_data &= ~BNX2_DRV_MSG_CODE;
2060 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2061
Michael Chane3648b32005-11-04 08:51:21 -08002062 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002063
Michael Chanb6016b72005-05-26 13:03:09 -07002064 return -EBUSY;
2065 }
2066
Michael Chanb090ae22006-01-23 16:07:10 -08002067 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2068 return -EIO;
2069
Michael Chanb6016b72005-05-26 13:03:09 -07002070 return 0;
2071}
2072
Michael Chan59b47d82006-11-19 14:10:45 -08002073static int
2074bnx2_init_5709_context(struct bnx2 *bp)
2075{
2076 int i, ret = 0;
2077 u32 val;
2078
2079 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080 val |= (BCM_PAGE_BITS - 8) << 16;
2081 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07002082 for (i = 0; i < 10; i++) {
2083 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2085 break;
2086 udelay(2);
2087 }
2088 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2089 return -EBUSY;
2090
Michael Chan59b47d82006-11-19 14:10:45 -08002091 for (i = 0; i < bp->ctx_pages; i++) {
2092 int j;
2093
2094 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098 (u64) bp->ctx_blk_mapping[i] >> 32);
2099 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101 for (j = 0; j < 10; j++) {
2102
2103 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2105 break;
2106 udelay(5);
2107 }
2108 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2109 ret = -EBUSY;
2110 break;
2111 }
2112 }
2113 return ret;
2114}
2115
Michael Chanb6016b72005-05-26 13:03:09 -07002116static void
2117bnx2_init_context(struct bnx2 *bp)
2118{
2119 u32 vcid;
2120
2121 vcid = 96;
2122 while (vcid) {
2123 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07002124 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002125
2126 vcid--;
2127
2128 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2129 u32 new_vcid;
2130
2131 vcid_addr = GET_PCID_ADDR(vcid);
2132 if (vcid & 0x8) {
2133 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2134 }
2135 else {
2136 new_vcid = vcid;
2137 }
2138 pcid_addr = GET_PCID_ADDR(new_vcid);
2139 }
2140 else {
2141 vcid_addr = GET_CID_ADDR(vcid);
2142 pcid_addr = vcid_addr;
2143 }
2144
Michael Chan7947b202007-06-04 21:17:10 -07002145 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146 vcid_addr += (i << PHY_CTX_SHIFT);
2147 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07002148
Michael Chan5d5d0012007-12-12 11:17:43 -08002149 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
Michael Chan7947b202007-06-04 21:17:10 -07002150 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2151
2152 /* Zero out the context. */
2153 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
Michael Chan5d5d0012007-12-12 11:17:43 -08002154 CTX_WR(bp, vcid_addr, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002155 }
Michael Chanb6016b72005-05-26 13:03:09 -07002156 }
2157}
2158
2159static int
2160bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2161{
2162 u16 *good_mbuf;
2163 u32 good_mbuf_cnt;
2164 u32 val;
2165
2166 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167 if (good_mbuf == NULL) {
2168 printk(KERN_ERR PFX "Failed to allocate memory in "
2169 "bnx2_alloc_bad_rbuf\n");
2170 return -ENOMEM;
2171 }
2172
2173 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2175
2176 good_mbuf_cnt = 0;
2177
2178 /* Allocate a bunch of mbufs and save the good ones in an array. */
2179 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2182
2183 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2184
2185 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2186
2187 /* The addresses with Bit 9 set are bad memory blocks. */
2188 if (!(val & (1 << 9))) {
2189 good_mbuf[good_mbuf_cnt] = (u16) val;
2190 good_mbuf_cnt++;
2191 }
2192
2193 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2194 }
2195
2196 /* Free the good ones back to the mbuf pool thus discarding
2197 * all the bad ones. */
2198 while (good_mbuf_cnt) {
2199 good_mbuf_cnt--;
2200
2201 val = good_mbuf[good_mbuf_cnt];
2202 val = (val << 9) | val | 1;
2203
2204 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2205 }
2206 kfree(good_mbuf);
2207 return 0;
2208}
2209
2210static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002211bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07002212{
2213 u32 val;
2214 u8 *mac_addr = bp->dev->dev_addr;
2215
2216 val = (mac_addr[0] << 8) | mac_addr[1];
2217
2218 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2219
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002220 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07002221 (mac_addr[4] << 8) | mac_addr[5];
2222
2223 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2224}
2225
2226static inline int
Michael Chan47bf4242007-12-12 11:19:12 -08002227bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2228{
2229 dma_addr_t mapping;
2230 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231 struct rx_bd *rxbd =
2232 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233 struct page *page = alloc_page(GFP_ATOMIC);
2234
2235 if (!page)
2236 return -ENOMEM;
2237 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238 PCI_DMA_FROMDEVICE);
2239 rx_pg->page = page;
2240 pci_unmap_addr_set(rx_pg, mapping, mapping);
2241 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2243 return 0;
2244}
2245
2246static void
2247bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2248{
2249 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250 struct page *page = rx_pg->page;
2251
2252 if (!page)
2253 return;
2254
2255 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256 PCI_DMA_FROMDEVICE);
2257
2258 __free_page(page);
2259 rx_pg->page = NULL;
2260}
2261
2262static inline int
Michael Chanb6016b72005-05-26 13:03:09 -07002263bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2264{
2265 struct sk_buff *skb;
2266 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2267 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08002268 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07002269 unsigned long align;
2270
Michael Chan932f3772006-08-15 01:39:36 -07002271 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07002272 if (skb == NULL) {
2273 return -ENOMEM;
2274 }
2275
Michael Chan59b47d82006-11-19 14:10:45 -08002276 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07002278
Michael Chanb6016b72005-05-26 13:03:09 -07002279 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280 PCI_DMA_FROMDEVICE);
2281
2282 rx_buf->skb = skb;
2283 pci_unmap_addr_set(rx_buf, mapping, mapping);
2284
2285 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2287
2288 bp->rx_prod_bseq += bp->rx_buf_use_size;
2289
2290 return 0;
2291}
2292
Michael Chanda3e4fb2007-05-03 13:24:23 -07002293static int
2294bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2295{
2296 struct status_block *sblk = bp->status_blk;
2297 u32 new_link_state, old_link_state;
2298 int is_set = 1;
2299
2300 new_link_state = sblk->status_attn_bits & event;
2301 old_link_state = sblk->status_attn_bits_ack & event;
2302 if (new_link_state != old_link_state) {
2303 if (new_link_state)
2304 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2305 else
2306 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2307 } else
2308 is_set = 0;
2309
2310 return is_set;
2311}
2312
Michael Chanb6016b72005-05-26 13:03:09 -07002313static void
2314bnx2_phy_int(struct bnx2 *bp)
2315{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002316 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002318 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002319 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002320 }
Michael Chan0d8a6572007-07-07 22:49:43 -07002321 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322 bnx2_set_remote_link(bp);
2323
Michael Chanb6016b72005-05-26 13:03:09 -07002324}
2325
2326static void
2327bnx2_tx_int(struct bnx2 *bp)
2328{
Michael Chanf4e418f2005-11-04 08:53:48 -08002329 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002330 u16 hw_cons, sw_cons, sw_ring_cons;
2331 int tx_free_bd = 0;
2332
Michael Chanf4e418f2005-11-04 08:53:48 -08002333 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002334 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2335 hw_cons++;
2336 }
2337 sw_cons = bp->tx_cons;
2338
2339 while (sw_cons != hw_cons) {
2340 struct sw_bd *tx_buf;
2341 struct sk_buff *skb;
2342 int i, last;
2343
2344 sw_ring_cons = TX_RING_IDX(sw_cons);
2345
2346 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2347 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002348
Michael Chanb6016b72005-05-26 13:03:09 -07002349 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002350 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002351 u16 last_idx, last_ring_idx;
2352
2353 last_idx = sw_cons +
2354 skb_shinfo(skb)->nr_frags + 1;
2355 last_ring_idx = sw_ring_cons +
2356 skb_shinfo(skb)->nr_frags + 1;
2357 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2358 last_idx++;
2359 }
2360 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2361 break;
2362 }
2363 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002364
Michael Chanb6016b72005-05-26 13:03:09 -07002365 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2366 skb_headlen(skb), PCI_DMA_TODEVICE);
2367
2368 tx_buf->skb = NULL;
2369 last = skb_shinfo(skb)->nr_frags;
2370
2371 for (i = 0; i < last; i++) {
2372 sw_cons = NEXT_TX_BD(sw_cons);
2373
2374 pci_unmap_page(bp->pdev,
2375 pci_unmap_addr(
2376 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2377 mapping),
2378 skb_shinfo(skb)->frags[i].size,
2379 PCI_DMA_TODEVICE);
2380 }
2381
2382 sw_cons = NEXT_TX_BD(sw_cons);
2383
2384 tx_free_bd += last + 1;
2385
Michael Chan745720e2006-06-29 12:37:41 -07002386 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002387
Michael Chanf4e418f2005-11-04 08:53:48 -08002388 hw_cons = bp->hw_tx_cons =
2389 sblk->status_tx_quick_consumer_index0;
2390
Michael Chanb6016b72005-05-26 13:03:09 -07002391 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2392 hw_cons++;
2393 }
2394 }
2395
Michael Chane89bbf12005-08-25 15:36:58 -07002396 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002397 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2398 * before checking for netif_queue_stopped(). Without the
2399 * memory barrier, there is a small possibility that bnx2_start_xmit()
2400 * will miss it and cause the queue to be stopped forever.
2401 */
2402 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002403
Michael Chan2f8af122006-08-15 01:39:10 -07002404 if (unlikely(netif_queue_stopped(bp->dev)) &&
2405 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2406 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002407 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002408 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002409 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002410 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002411 }
Michael Chanb6016b72005-05-26 13:03:09 -07002412}
2413
Michael Chan1db82f22007-12-12 11:19:35 -08002414static void
2415bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2416{
2417 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2418 struct rx_bd *cons_bd, *prod_bd;
2419 dma_addr_t mapping;
2420 int i;
2421 u16 hw_prod = bp->rx_pg_prod, prod;
2422 u16 cons = bp->rx_pg_cons;
2423
2424 for (i = 0; i < count; i++) {
2425 prod = RX_PG_RING_IDX(hw_prod);
2426
2427 prod_rx_pg = &bp->rx_pg_ring[prod];
2428 cons_rx_pg = &bp->rx_pg_ring[cons];
2429 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2430 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2431
2432 if (i == 0 && skb) {
2433 struct page *page;
2434 struct skb_shared_info *shinfo;
2435
2436 shinfo = skb_shinfo(skb);
2437 shinfo->nr_frags--;
2438 page = shinfo->frags[shinfo->nr_frags].page;
2439 shinfo->frags[shinfo->nr_frags].page = NULL;
2440 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2441 PCI_DMA_FROMDEVICE);
2442 cons_rx_pg->page = page;
2443 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2444 dev_kfree_skb(skb);
2445 }
2446 if (prod != cons) {
2447 prod_rx_pg->page = cons_rx_pg->page;
2448 cons_rx_pg->page = NULL;
2449 pci_unmap_addr_set(prod_rx_pg, mapping,
2450 pci_unmap_addr(cons_rx_pg, mapping));
2451
2452 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2453 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2454
2455 }
2456 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2457 hw_prod = NEXT_RX_BD(hw_prod);
2458 }
2459 bp->rx_pg_prod = hw_prod;
2460 bp->rx_pg_cons = cons;
2461}
2462
Michael Chanb6016b72005-05-26 13:03:09 -07002463static inline void
2464bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2465 u16 cons, u16 prod)
2466{
Michael Chan236b6392006-03-20 17:49:02 -08002467 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2468 struct rx_bd *cons_bd, *prod_bd;
2469
2470 cons_rx_buf = &bp->rx_buf_ring[cons];
2471 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002472
2473 pci_dma_sync_single_for_device(bp->pdev,
2474 pci_unmap_addr(cons_rx_buf, mapping),
2475 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2476
Michael Chan236b6392006-03-20 17:49:02 -08002477 bp->rx_prod_bseq += bp->rx_buf_use_size;
2478
2479 prod_rx_buf->skb = skb;
2480
2481 if (cons == prod)
2482 return;
2483
Michael Chanb6016b72005-05-26 13:03:09 -07002484 pci_unmap_addr_set(prod_rx_buf, mapping,
2485 pci_unmap_addr(cons_rx_buf, mapping));
2486
Michael Chan3fdfcc22006-03-20 17:49:49 -08002487 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2488 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002489 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2490 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002491}
2492
Michael Chan85833c62007-12-12 11:17:01 -08002493static int
2494bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
Michael Chan1db82f22007-12-12 11:19:35 -08002495 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
Michael Chan85833c62007-12-12 11:17:01 -08002496{
2497 int err;
2498 u16 prod = ring_idx & 0xffff;
2499
2500 err = bnx2_alloc_rx_skb(bp, prod);
2501 if (unlikely(err)) {
2502 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
Michael Chan1db82f22007-12-12 11:19:35 -08002503 if (hdr_len) {
2504 unsigned int raw_len = len + 4;
2505 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2506
2507 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2508 }
Michael Chan85833c62007-12-12 11:17:01 -08002509 return err;
2510 }
2511
2512 skb_reserve(skb, bp->rx_offset);
2513 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2514 PCI_DMA_FROMDEVICE);
2515
Michael Chan1db82f22007-12-12 11:19:35 -08002516 if (hdr_len == 0) {
2517 skb_put(skb, len);
2518 return 0;
2519 } else {
2520 unsigned int i, frag_len, frag_size, pages;
2521 struct sw_pg *rx_pg;
2522 u16 pg_cons = bp->rx_pg_cons;
2523 u16 pg_prod = bp->rx_pg_prod;
2524
2525 frag_size = len + 4 - hdr_len;
2526 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2527 skb_put(skb, hdr_len);
2528
2529 for (i = 0; i < pages; i++) {
2530 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2531 if (unlikely(frag_len <= 4)) {
2532 unsigned int tail = 4 - frag_len;
2533
2534 bp->rx_pg_cons = pg_cons;
2535 bp->rx_pg_prod = pg_prod;
2536 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2537 skb->len -= tail;
2538 if (i == 0) {
2539 skb->tail -= tail;
2540 } else {
2541 skb_frag_t *frag =
2542 &skb_shinfo(skb)->frags[i - 1];
2543 frag->size -= tail;
2544 skb->data_len -= tail;
2545 skb->truesize -= tail;
2546 }
2547 return 0;
2548 }
2549 rx_pg = &bp->rx_pg_ring[pg_cons];
2550
2551 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2552 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2553
2554 if (i == pages - 1)
2555 frag_len -= 4;
2556
2557 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2558 rx_pg->page = NULL;
2559
2560 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2561 if (unlikely(err)) {
2562 bp->rx_pg_cons = pg_cons;
2563 bp->rx_pg_prod = pg_prod;
2564 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2565 return err;
2566 }
2567
2568 frag_size -= frag_len;
2569 skb->data_len += frag_len;
2570 skb->truesize += frag_len;
2571 skb->len += frag_len;
2572
2573 pg_prod = NEXT_RX_BD(pg_prod);
2574 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2575 }
2576 bp->rx_pg_prod = pg_prod;
2577 bp->rx_pg_cons = pg_cons;
2578 }
Michael Chan85833c62007-12-12 11:17:01 -08002579 return 0;
2580}
2581
Michael Chanc09c2622007-12-10 17:18:37 -08002582static inline u16
2583bnx2_get_hw_rx_cons(struct bnx2 *bp)
2584{
2585 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2586
2587 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2588 cons++;
2589 return cons;
2590}
2591
Michael Chanb6016b72005-05-26 13:03:09 -07002592static int
2593bnx2_rx_int(struct bnx2 *bp, int budget)
2594{
2595 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2596 struct l2_fhdr *rx_hdr;
Michael Chan1db82f22007-12-12 11:19:35 -08002597 int rx_pkt = 0, pg_ring_used = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002598
Michael Chanc09c2622007-12-10 17:18:37 -08002599 hw_cons = bnx2_get_hw_rx_cons(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002600 sw_cons = bp->rx_cons;
2601 sw_prod = bp->rx_prod;
2602
2603 /* Memory barrier necessary as speculative reads of the rx
2604 * buffer can be ahead of the index in the status block
2605 */
2606 rmb();
2607 while (sw_cons != hw_cons) {
Michael Chan1db82f22007-12-12 11:19:35 -08002608 unsigned int len, hdr_len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002609 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002610 struct sw_bd *rx_buf;
2611 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002612 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002613
2614 sw_ring_cons = RX_RING_IDX(sw_cons);
2615 sw_ring_prod = RX_RING_IDX(sw_prod);
2616
2617 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2618 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002619
2620 rx_buf->skb = NULL;
2621
2622 dma_addr = pci_unmap_addr(rx_buf, mapping);
2623
2624 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002625 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2626
2627 rx_hdr = (struct l2_fhdr *) skb->data;
Michael Chan1db82f22007-12-12 11:19:35 -08002628 len = rx_hdr->l2_fhdr_pkt_len;
Michael Chanb6016b72005-05-26 13:03:09 -07002629
Michael Chanade2bfe2006-01-23 16:09:51 -08002630 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002631 (L2_FHDR_ERRORS_BAD_CRC |
2632 L2_FHDR_ERRORS_PHY_DECODE |
2633 L2_FHDR_ERRORS_ALIGNMENT |
2634 L2_FHDR_ERRORS_TOO_SHORT |
2635 L2_FHDR_ERRORS_GIANT_FRAME)) {
2636
Michael Chan85833c62007-12-12 11:17:01 -08002637 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2638 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002639 }
Michael Chan1db82f22007-12-12 11:19:35 -08002640 hdr_len = 0;
2641 if (status & L2_FHDR_STATUS_SPLIT) {
2642 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2643 pg_ring_used = 1;
2644 } else if (len > bp->rx_jumbo_thresh) {
2645 hdr_len = bp->rx_jumbo_thresh;
2646 pg_ring_used = 1;
2647 }
2648
2649 len -= 4;
Michael Chanb6016b72005-05-26 13:03:09 -07002650
Michael Chan5d5d0012007-12-12 11:17:43 -08002651 if (len <= bp->rx_copy_thresh) {
Michael Chanb6016b72005-05-26 13:03:09 -07002652 struct sk_buff *new_skb;
2653
Michael Chan932f3772006-08-15 01:39:36 -07002654 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chan85833c62007-12-12 11:17:01 -08002655 if (new_skb == NULL) {
2656 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2657 sw_ring_prod);
2658 goto next_rx;
2659 }
Michael Chanb6016b72005-05-26 13:03:09 -07002660
2661 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002662 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2663 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002664 skb_reserve(new_skb, 2);
2665 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002666
2667 bnx2_reuse_rx_skb(bp, skb,
2668 sw_ring_cons, sw_ring_prod);
2669
2670 skb = new_skb;
Michael Chan1db82f22007-12-12 11:19:35 -08002671 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
Michael Chan85833c62007-12-12 11:17:01 -08002672 (sw_ring_cons << 16) | sw_ring_prod)))
Michael Chanb6016b72005-05-26 13:03:09 -07002673 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002674
2675 skb->protocol = eth_type_trans(skb, bp->dev);
2676
2677 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002678 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002679
Michael Chan745720e2006-06-29 12:37:41 -07002680 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002681 goto next_rx;
2682
2683 }
2684
Michael Chanb6016b72005-05-26 13:03:09 -07002685 skb->ip_summed = CHECKSUM_NONE;
2686 if (bp->rx_csum &&
2687 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2688 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2689
Michael Chanade2bfe2006-01-23 16:09:51 -08002690 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2691 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002692 skb->ip_summed = CHECKSUM_UNNECESSARY;
2693 }
2694
2695#ifdef BCM_VLAN
2696 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2697 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2698 rx_hdr->l2_fhdr_vlan_tag);
2699 }
2700 else
2701#endif
2702 netif_receive_skb(skb);
2703
2704 bp->dev->last_rx = jiffies;
2705 rx_pkt++;
2706
2707next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002708 sw_cons = NEXT_RX_BD(sw_cons);
2709 sw_prod = NEXT_RX_BD(sw_prod);
2710
2711 if ((rx_pkt == budget))
2712 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002713
2714 /* Refresh hw_cons to see if there is new work */
2715 if (sw_cons == hw_cons) {
Michael Chanc09c2622007-12-10 17:18:37 -08002716 hw_cons = bnx2_get_hw_rx_cons(bp);
Michael Chanf4e418f2005-11-04 08:53:48 -08002717 rmb();
2718 }
Michael Chanb6016b72005-05-26 13:03:09 -07002719 }
2720 bp->rx_cons = sw_cons;
2721 bp->rx_prod = sw_prod;
2722
Michael Chan1db82f22007-12-12 11:19:35 -08002723 if (pg_ring_used)
2724 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2725 bp->rx_pg_prod);
2726
Michael Chanb6016b72005-05-26 13:03:09 -07002727 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2728
2729 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2730
2731 mmiowb();
2732
2733 return rx_pkt;
2734
2735}
2736
2737/* MSI ISR - The only difference between this and the INTx ISR
2738 * is that the MSI interrupt is always serviced.
2739 */
2740static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002741bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002742{
2743 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002744 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002745
Michael Chanc921e4c2005-09-08 13:15:32 -07002746 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002747 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2748 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2749 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2750
2751 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002752 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2753 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002754
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002755 netif_rx_schedule(dev, &bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07002756
Michael Chan73eef4c2005-08-25 15:39:15 -07002757 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002758}
2759
2760static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002761bnx2_msi_1shot(int irq, void *dev_instance)
2762{
2763 struct net_device *dev = dev_instance;
2764 struct bnx2 *bp = netdev_priv(dev);
2765
2766 prefetch(bp->status_blk);
2767
2768 /* Return here if interrupt is disabled. */
2769 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2770 return IRQ_HANDLED;
2771
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002772 netif_rx_schedule(dev, &bp->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07002773
2774 return IRQ_HANDLED;
2775}
2776
2777static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002778bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002779{
2780 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002781 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002782 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002783
2784 /* When using INTx, it is possible for the interrupt to arrive
2785 * at the CPU before the status block posted prior to the
2786 * interrupt. Reading a register will flush the status block.
2787 * When using MSI, the MSI message will always complete after
2788 * the status block write.
2789 */
Michael Chanb8a7ce72007-07-07 22:51:03 -07002790 if ((sblk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002791 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2792 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002793 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002794
2795 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2796 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2797 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2798
Michael Chanb8a7ce72007-07-07 22:51:03 -07002799 /* Read back to deassert IRQ immediately to avoid too many
2800 * spurious interrupts.
2801 */
2802 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2803
Michael Chanb6016b72005-05-26 13:03:09 -07002804 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002805 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2806 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002807
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002808 if (netif_rx_schedule_prep(dev, &bp->napi)) {
Michael Chanb8a7ce72007-07-07 22:51:03 -07002809 bp->last_status_idx = sblk->status_idx;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002810 __netif_rx_schedule(dev, &bp->napi);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002811 }
Michael Chanb6016b72005-05-26 13:03:09 -07002812
Michael Chan73eef4c2005-08-25 15:39:15 -07002813 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002814}
2815
Michael Chan0d8a6572007-07-07 22:49:43 -07002816#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2817 STATUS_ATTN_BITS_TIMER_ABORT)
Michael Chanda3e4fb2007-05-03 13:24:23 -07002818
Michael Chanf4e418f2005-11-04 08:53:48 -08002819static inline int
2820bnx2_has_work(struct bnx2 *bp)
2821{
2822 struct status_block *sblk = bp->status_blk;
2823
Michael Chanc09c2622007-12-10 17:18:37 -08002824 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
Michael Chanf4e418f2005-11-04 08:53:48 -08002825 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2826 return 1;
2827
Michael Chanda3e4fb2007-05-03 13:24:23 -07002828 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2829 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002830 return 1;
2831
2832 return 0;
2833}
2834
David S. Miller6f535762007-10-11 18:08:29 -07002835static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
Michael Chanb6016b72005-05-26 13:03:09 -07002836{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002837 struct status_block *sblk = bp->status_blk;
2838 u32 status_attn_bits = sblk->status_attn_bits;
2839 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002840
Michael Chanda3e4fb2007-05-03 13:24:23 -07002841 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2842 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002843
Michael Chanb6016b72005-05-26 13:03:09 -07002844 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002845
2846 /* This is needed to take care of transient status
2847 * during link changes.
2848 */
2849 REG_WR(bp, BNX2_HC_COMMAND,
2850 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2851 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002852 }
2853
Michael Chan6dee6422007-10-12 01:40:38 -07002854 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002855 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002856
Michael Chanc09c2622007-12-10 17:18:37 -08002857 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
David S. Miller6f535762007-10-11 18:08:29 -07002858 work_done += bnx2_rx_int(bp, budget - work_done);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002859
David S. Miller6f535762007-10-11 18:08:29 -07002860 return work_done;
2861}
Michael Chanf4e418f2005-11-04 08:53:48 -08002862
David S. Miller6f535762007-10-11 18:08:29 -07002863static int bnx2_poll(struct napi_struct *napi, int budget)
2864{
2865 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2866 int work_done = 0;
Michael Chan6dee6422007-10-12 01:40:38 -07002867 struct status_block *sblk = bp->status_blk;
David S. Miller6f535762007-10-11 18:08:29 -07002868
2869 while (1) {
2870 work_done = bnx2_poll_work(bp, work_done, budget);
2871
2872 if (unlikely(work_done >= budget))
2873 break;
2874
Michael Chan6dee6422007-10-12 01:40:38 -07002875 /* bp->last_status_idx is used below to tell the hw how
2876 * much work has been processed, so we must read it before
2877 * checking for more work.
2878 */
2879 bp->last_status_idx = sblk->status_idx;
2880 rmb();
David S. Miller6f535762007-10-11 18:08:29 -07002881 if (likely(!bnx2_has_work(bp))) {
David S. Miller6f535762007-10-11 18:08:29 -07002882 netif_rx_complete(bp->dev, napi);
2883 if (likely(bp->flags & USING_MSI_FLAG)) {
2884 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2885 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2886 bp->last_status_idx);
Michael Chan6dee6422007-10-12 01:40:38 -07002887 break;
David S. Miller6f535762007-10-11 18:08:29 -07002888 }
2889 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2890 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2891 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2892 bp->last_status_idx);
2893
Michael Chan1269a8a2006-01-23 16:11:03 -08002894 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2895 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2896 bp->last_status_idx);
David S. Miller6f535762007-10-11 18:08:29 -07002897 break;
Michael Chan1269a8a2006-01-23 16:11:03 -08002898 }
Michael Chanb6016b72005-05-26 13:03:09 -07002899 }
2900
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002901 return work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002902}
2903
Herbert Xu932ff272006-06-09 12:20:56 -07002904/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002905 * from set_multicast.
2906 */
2907static void
2908bnx2_set_rx_mode(struct net_device *dev)
2909{
Michael Chan972ec0d2006-01-23 16:12:43 -08002910 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002911 u32 rx_mode, sort_mode;
2912 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002913
Michael Chanc770a652005-08-25 15:38:39 -07002914 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002915
2916 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2917 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2918 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2919#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002920 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002921 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002922#else
Michael Chane29054f2006-01-23 16:06:06 -08002923 if (!(bp->flags & ASF_ENABLE_FLAG))
2924 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002925#endif
2926 if (dev->flags & IFF_PROMISC) {
2927 /* Promiscuous mode. */
2928 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002929 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2930 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002931 }
2932 else if (dev->flags & IFF_ALLMULTI) {
2933 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2934 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2935 0xffffffff);
2936 }
2937 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2938 }
2939 else {
2940 /* Accept one or more multicast(s). */
2941 struct dev_mc_list *mclist;
2942 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2943 u32 regidx;
2944 u32 bit;
2945 u32 crc;
2946
2947 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2948
2949 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2950 i++, mclist = mclist->next) {
2951
2952 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2953 bit = crc & 0xff;
2954 regidx = (bit & 0xe0) >> 5;
2955 bit &= 0x1f;
2956 mc_filter[regidx] |= (1 << bit);
2957 }
2958
2959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961 mc_filter[i]);
2962 }
2963
2964 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2965 }
2966
2967 if (rx_mode != bp->rx_mode) {
2968 bp->rx_mode = rx_mode;
2969 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2970 }
2971
2972 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2973 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2974 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2975
Michael Chanc770a652005-08-25 15:38:39 -07002976 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002977}
2978
2979static void
2980load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2981 u32 rv2p_proc)
2982{
2983 int i;
2984 u32 val;
2985
2986
2987 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002988 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002989 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002990 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002991 rv2p_code++;
2992
2993 if (rv2p_proc == RV2P_PROC1) {
2994 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2995 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2996 }
2997 else {
2998 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2999 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3000 }
3001 }
3002
3003 /* Reset the processor, un-stall is done later. */
3004 if (rv2p_proc == RV2P_PROC1) {
3005 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3006 }
3007 else {
3008 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3009 }
3010}
3011
Michael Chanaf3ee512006-11-19 14:09:25 -08003012static int
Michael Chanb6016b72005-05-26 13:03:09 -07003013load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3014{
3015 u32 offset;
3016 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08003017 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003018
3019 /* Halt the CPU. */
3020 val = REG_RD_IND(bp, cpu_reg->mode);
3021 val |= cpu_reg->mode_value_halt;
3022 REG_WR_IND(bp, cpu_reg->mode, val);
3023 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3024
3025 /* Load the Text area. */
3026 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08003027 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07003028 int j;
3029
Michael Chanea1f8d52007-10-02 16:27:35 -07003030 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3031 fw->gz_text_len);
3032 if (rc < 0)
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003033 return rc;
Michael Chanea1f8d52007-10-02 16:27:35 -07003034
Michael Chanb6016b72005-05-26 13:03:09 -07003035 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003036 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07003037 }
3038 }
3039
3040 /* Load the Data area. */
3041 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3042 if (fw->data) {
3043 int j;
3044
3045 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3046 REG_WR_IND(bp, offset, fw->data[j]);
3047 }
3048 }
3049
3050 /* Load the SBSS area. */
3051 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07003052 if (fw->sbss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07003053 int j;
3054
3055 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003056 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003057 }
3058 }
3059
3060 /* Load the BSS area. */
3061 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07003062 if (fw->bss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07003063 int j;
3064
3065 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003066 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003067 }
3068 }
3069
3070 /* Load the Read-Only area. */
3071 offset = cpu_reg->spad_base +
3072 (fw->rodata_addr - cpu_reg->mips_view_base);
3073 if (fw->rodata) {
3074 int j;
3075
3076 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3077 REG_WR_IND(bp, offset, fw->rodata[j]);
3078 }
3079 }
3080
3081 /* Clear the pre-fetch instruction. */
3082 REG_WR_IND(bp, cpu_reg->inst, 0);
3083 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3084
3085 /* Start the CPU. */
3086 val = REG_RD_IND(bp, cpu_reg->mode);
3087 val &= ~cpu_reg->mode_value_halt;
3088 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3089 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08003090
3091 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003092}
3093
Michael Chanfba9fe92006-06-12 22:21:25 -07003094static int
Michael Chanb6016b72005-05-26 13:03:09 -07003095bnx2_init_cpus(struct bnx2 *bp)
3096{
3097 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08003098 struct fw_info *fw;
Michael Chan110d0ef2007-12-12 11:18:34 -08003099 int rc, rv2p_len;
3100 void *text, *rv2p;
Michael Chanb6016b72005-05-26 13:03:09 -07003101
3102 /* Initialize the RV2P processor. */
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003103 text = vmalloc(FW_BUF_SIZE);
3104 if (!text)
3105 return -ENOMEM;
Michael Chan110d0ef2007-12-12 11:18:34 -08003106 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3107 rv2p = bnx2_xi_rv2p_proc1;
3108 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3109 } else {
3110 rv2p = bnx2_rv2p_proc1;
3111 rv2p_len = sizeof(bnx2_rv2p_proc1);
3112 }
3113 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
Michael Chanea1f8d52007-10-02 16:27:35 -07003114 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07003115 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07003116
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003117 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
Michael Chanfba9fe92006-06-12 22:21:25 -07003118
Michael Chan110d0ef2007-12-12 11:18:34 -08003119 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3120 rv2p = bnx2_xi_rv2p_proc2;
3121 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3122 } else {
3123 rv2p = bnx2_rv2p_proc2;
3124 rv2p_len = sizeof(bnx2_rv2p_proc2);
3125 }
3126 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
Michael Chanea1f8d52007-10-02 16:27:35 -07003127 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07003128 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07003129
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003130 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07003131
3132 /* Initialize the RX Processor. */
3133 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3134 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3135 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3136 cpu_reg.state = BNX2_RXP_CPU_STATE;
3137 cpu_reg.state_value_clear = 0xffffff;
3138 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3139 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3140 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3141 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3142 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3143 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3144 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003145
Michael Chand43584c2006-11-19 14:14:35 -08003146 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3147 fw = &bnx2_rxp_fw_09;
3148 else
3149 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003150
Michael Chanea1f8d52007-10-02 16:27:35 -07003151 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003152 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003153 if (rc)
3154 goto init_cpu_err;
3155
Michael Chanb6016b72005-05-26 13:03:09 -07003156 /* Initialize the TX Processor. */
3157 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3158 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3159 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3160 cpu_reg.state = BNX2_TXP_CPU_STATE;
3161 cpu_reg.state_value_clear = 0xffffff;
3162 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3163 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3164 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3165 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3166 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3167 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3168 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003169
Michael Chand43584c2006-11-19 14:14:35 -08003170 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3171 fw = &bnx2_txp_fw_09;
3172 else
3173 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003174
Michael Chanea1f8d52007-10-02 16:27:35 -07003175 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003176 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003177 if (rc)
3178 goto init_cpu_err;
3179
Michael Chanb6016b72005-05-26 13:03:09 -07003180 /* Initialize the TX Patch-up Processor. */
3181 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3182 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3183 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3184 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3185 cpu_reg.state_value_clear = 0xffffff;
3186 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3187 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3188 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3189 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3190 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3191 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3192 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003193
Michael Chand43584c2006-11-19 14:14:35 -08003194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3195 fw = &bnx2_tpat_fw_09;
3196 else
3197 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003198
Michael Chanea1f8d52007-10-02 16:27:35 -07003199 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003200 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003201 if (rc)
3202 goto init_cpu_err;
3203
Michael Chanb6016b72005-05-26 13:03:09 -07003204 /* Initialize the Completion Processor. */
3205 cpu_reg.mode = BNX2_COM_CPU_MODE;
3206 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3207 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3208 cpu_reg.state = BNX2_COM_CPU_STATE;
3209 cpu_reg.state_value_clear = 0xffffff;
3210 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3211 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3212 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3213 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3214 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3215 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3216 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003217
Michael Chand43584c2006-11-19 14:14:35 -08003218 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3219 fw = &bnx2_com_fw_09;
3220 else
3221 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003222
Michael Chanea1f8d52007-10-02 16:27:35 -07003223 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003224 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003225 if (rc)
3226 goto init_cpu_err;
3227
Michael Chand43584c2006-11-19 14:14:35 -08003228 /* Initialize the Command Processor. */
3229 cpu_reg.mode = BNX2_CP_CPU_MODE;
3230 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3231 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3232 cpu_reg.state = BNX2_CP_CPU_STATE;
3233 cpu_reg.state_value_clear = 0xffffff;
3234 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3235 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3236 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3237 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3238 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3239 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3240 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07003241
Michael Chan110d0ef2007-12-12 11:18:34 -08003242 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Michael Chand43584c2006-11-19 14:14:35 -08003243 fw = &bnx2_cp_fw_09;
Michael Chan110d0ef2007-12-12 11:18:34 -08003244 else
3245 fw = &bnx2_cp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003246
Michael Chan110d0ef2007-12-12 11:18:34 -08003247 fw->text = text;
3248 rc = load_cpu_fw(bp, &cpu_reg, fw);
3249
Michael Chanfba9fe92006-06-12 22:21:25 -07003250init_cpu_err:
Michael Chanea1f8d52007-10-02 16:27:35 -07003251 vfree(text);
Michael Chanfba9fe92006-06-12 22:21:25 -07003252 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003253}
3254
3255static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07003256bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07003257{
3258 u16 pmcsr;
3259
3260 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3261
3262 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07003263 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07003264 u32 val;
3265
3266 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3267 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3268 PCI_PM_CTRL_PME_STATUS);
3269
3270 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3271 /* delay required during transition out of D3hot */
3272 msleep(20);
3273
3274 val = REG_RD(bp, BNX2_EMAC_MODE);
3275 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3276 val &= ~BNX2_EMAC_MODE_MPKT;
3277 REG_WR(bp, BNX2_EMAC_MODE, val);
3278
3279 val = REG_RD(bp, BNX2_RPM_CONFIG);
3280 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3281 REG_WR(bp, BNX2_RPM_CONFIG, val);
3282 break;
3283 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07003284 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07003285 int i;
3286 u32 val, wol_msg;
3287
3288 if (bp->wol) {
3289 u32 advertising;
3290 u8 autoneg;
3291
3292 autoneg = bp->autoneg;
3293 advertising = bp->advertising;
3294
Michael Chan239cd342007-10-17 19:26:15 -07003295 if (bp->phy_port == PORT_TP) {
3296 bp->autoneg = AUTONEG_SPEED;
3297 bp->advertising = ADVERTISED_10baseT_Half |
3298 ADVERTISED_10baseT_Full |
3299 ADVERTISED_100baseT_Half |
3300 ADVERTISED_100baseT_Full |
3301 ADVERTISED_Autoneg;
3302 }
Michael Chanb6016b72005-05-26 13:03:09 -07003303
Michael Chan239cd342007-10-17 19:26:15 -07003304 spin_lock_bh(&bp->phy_lock);
3305 bnx2_setup_phy(bp, bp->phy_port);
3306 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003307
3308 bp->autoneg = autoneg;
3309 bp->advertising = advertising;
3310
3311 bnx2_set_mac_addr(bp);
3312
3313 val = REG_RD(bp, BNX2_EMAC_MODE);
3314
3315 /* Enable port mode. */
3316 val &= ~BNX2_EMAC_MODE_PORT;
Michael Chan239cd342007-10-17 19:26:15 -07003317 val |= BNX2_EMAC_MODE_MPKT_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003318 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003319 BNX2_EMAC_MODE_MPKT;
Michael Chan239cd342007-10-17 19:26:15 -07003320 if (bp->phy_port == PORT_TP)
3321 val |= BNX2_EMAC_MODE_PORT_MII;
3322 else {
3323 val |= BNX2_EMAC_MODE_PORT_GMII;
3324 if (bp->line_speed == SPEED_2500)
3325 val |= BNX2_EMAC_MODE_25G_MODE;
3326 }
Michael Chanb6016b72005-05-26 13:03:09 -07003327
3328 REG_WR(bp, BNX2_EMAC_MODE, val);
3329
3330 /* receive all multicast */
3331 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3332 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3333 0xffffffff);
3334 }
3335 REG_WR(bp, BNX2_EMAC_RX_MODE,
3336 BNX2_EMAC_RX_MODE_SORT_MODE);
3337
3338 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3339 BNX2_RPM_SORT_USER0_MC_EN;
3340 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3341 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3342 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3343 BNX2_RPM_SORT_USER0_ENA);
3344
3345 /* Need to enable EMAC and RPM for WOL. */
3346 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3347 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3348 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3349 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3350
3351 val = REG_RD(bp, BNX2_RPM_CONFIG);
3352 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3353 REG_WR(bp, BNX2_RPM_CONFIG, val);
3354
3355 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3356 }
3357 else {
3358 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3359 }
3360
Michael Chandda1e392006-01-23 16:08:14 -08003361 if (!(bp->flags & NO_WOL_FLAG))
3362 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003363
3364 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3365 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3366 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3367
3368 if (bp->wol)
3369 pmcsr |= 3;
3370 }
3371 else {
3372 pmcsr |= 3;
3373 }
3374 if (bp->wol) {
3375 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3376 }
3377 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3378 pmcsr);
3379
3380 /* No more memory access after this point until
3381 * device is brought back to D0.
3382 */
3383 udelay(50);
3384 break;
3385 }
3386 default:
3387 return -EINVAL;
3388 }
3389 return 0;
3390}
3391
3392static int
3393bnx2_acquire_nvram_lock(struct bnx2 *bp)
3394{
3395 u32 val;
3396 int j;
3397
3398 /* Request access to the flash interface. */
3399 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3400 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3401 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3402 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3403 break;
3404
3405 udelay(5);
3406 }
3407
3408 if (j >= NVRAM_TIMEOUT_COUNT)
3409 return -EBUSY;
3410
3411 return 0;
3412}
3413
3414static int
3415bnx2_release_nvram_lock(struct bnx2 *bp)
3416{
3417 int j;
3418 u32 val;
3419
3420 /* Relinquish nvram interface. */
3421 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3422
3423 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3424 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3425 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3426 break;
3427
3428 udelay(5);
3429 }
3430
3431 if (j >= NVRAM_TIMEOUT_COUNT)
3432 return -EBUSY;
3433
3434 return 0;
3435}
3436
3437
3438static int
3439bnx2_enable_nvram_write(struct bnx2 *bp)
3440{
3441 u32 val;
3442
3443 val = REG_RD(bp, BNX2_MISC_CFG);
3444 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3445
Michael Chane30372c2007-07-16 18:26:23 -07003446 if (bp->flash_info->flags & BNX2_NV_WREN) {
Michael Chanb6016b72005-05-26 13:03:09 -07003447 int j;
3448
3449 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3450 REG_WR(bp, BNX2_NVM_COMMAND,
3451 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3452
3453 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3454 udelay(5);
3455
3456 val = REG_RD(bp, BNX2_NVM_COMMAND);
3457 if (val & BNX2_NVM_COMMAND_DONE)
3458 break;
3459 }
3460
3461 if (j >= NVRAM_TIMEOUT_COUNT)
3462 return -EBUSY;
3463 }
3464 return 0;
3465}
3466
3467static void
3468bnx2_disable_nvram_write(struct bnx2 *bp)
3469{
3470 u32 val;
3471
3472 val = REG_RD(bp, BNX2_MISC_CFG);
3473 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3474}
3475
3476
3477static void
3478bnx2_enable_nvram_access(struct bnx2 *bp)
3479{
3480 u32 val;
3481
3482 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3483 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003484 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003485 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3486}
3487
3488static void
3489bnx2_disable_nvram_access(struct bnx2 *bp)
3490{
3491 u32 val;
3492
3493 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3494 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003495 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003496 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3497 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3498}
3499
3500static int
3501bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3502{
3503 u32 cmd;
3504 int j;
3505
Michael Chane30372c2007-07-16 18:26:23 -07003506 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
Michael Chanb6016b72005-05-26 13:03:09 -07003507 /* Buffered flash, no erase needed */
3508 return 0;
3509
3510 /* Build an erase command */
3511 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3512 BNX2_NVM_COMMAND_DOIT;
3513
3514 /* Need to clear DONE bit separately. */
3515 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3516
3517 /* Address of the NVRAM to read from. */
3518 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3519
3520 /* Issue an erase command. */
3521 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3522
3523 /* Wait for completion. */
3524 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3525 u32 val;
3526
3527 udelay(5);
3528
3529 val = REG_RD(bp, BNX2_NVM_COMMAND);
3530 if (val & BNX2_NVM_COMMAND_DONE)
3531 break;
3532 }
3533
3534 if (j >= NVRAM_TIMEOUT_COUNT)
3535 return -EBUSY;
3536
3537 return 0;
3538}
3539
3540static int
3541bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3542{
3543 u32 cmd;
3544 int j;
3545
3546 /* Build the command word. */
3547 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3548
Michael Chane30372c2007-07-16 18:26:23 -07003549 /* Calculate an offset of a buffered flash, not needed for 5709. */
3550 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003551 offset = ((offset / bp->flash_info->page_size) <<
3552 bp->flash_info->page_bits) +
3553 (offset % bp->flash_info->page_size);
3554 }
3555
3556 /* Need to clear DONE bit separately. */
3557 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3558
3559 /* Address of the NVRAM to read from. */
3560 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3561
3562 /* Issue a read command. */
3563 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3564
3565 /* Wait for completion. */
3566 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3567 u32 val;
3568
3569 udelay(5);
3570
3571 val = REG_RD(bp, BNX2_NVM_COMMAND);
3572 if (val & BNX2_NVM_COMMAND_DONE) {
3573 val = REG_RD(bp, BNX2_NVM_READ);
3574
3575 val = be32_to_cpu(val);
3576 memcpy(ret_val, &val, 4);
3577 break;
3578 }
3579 }
3580 if (j >= NVRAM_TIMEOUT_COUNT)
3581 return -EBUSY;
3582
3583 return 0;
3584}
3585
3586
3587static int
3588bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3589{
3590 u32 cmd, val32;
3591 int j;
3592
3593 /* Build the command word. */
3594 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3595
Michael Chane30372c2007-07-16 18:26:23 -07003596 /* Calculate an offset of a buffered flash, not needed for 5709. */
3597 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003598 offset = ((offset / bp->flash_info->page_size) <<
3599 bp->flash_info->page_bits) +
3600 (offset % bp->flash_info->page_size);
3601 }
3602
3603 /* Need to clear DONE bit separately. */
3604 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3605
3606 memcpy(&val32, val, 4);
3607 val32 = cpu_to_be32(val32);
3608
3609 /* Write the data. */
3610 REG_WR(bp, BNX2_NVM_WRITE, val32);
3611
3612 /* Address of the NVRAM to write to. */
3613 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3614
3615 /* Issue the write command. */
3616 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3617
3618 /* Wait for completion. */
3619 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3620 udelay(5);
3621
3622 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3623 break;
3624 }
3625 if (j >= NVRAM_TIMEOUT_COUNT)
3626 return -EBUSY;
3627
3628 return 0;
3629}
3630
3631static int
3632bnx2_init_nvram(struct bnx2 *bp)
3633{
3634 u32 val;
Michael Chane30372c2007-07-16 18:26:23 -07003635 int j, entry_count, rc = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003636 struct flash_spec *flash;
3637
Michael Chane30372c2007-07-16 18:26:23 -07003638 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3639 bp->flash_info = &flash_5709;
3640 goto get_flash_size;
3641 }
3642
Michael Chanb6016b72005-05-26 13:03:09 -07003643 /* Determine the selected interface. */
3644 val = REG_RD(bp, BNX2_NVM_CFG1);
3645
Denis Chengff8ac602007-09-02 18:30:18 +08003646 entry_count = ARRAY_SIZE(flash_table);
Michael Chanb6016b72005-05-26 13:03:09 -07003647
Michael Chanb6016b72005-05-26 13:03:09 -07003648 if (val & 0x40000000) {
3649
3650 /* Flash interface has been reconfigured */
3651 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003652 j++, flash++) {
3653 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3654 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003655 bp->flash_info = flash;
3656 break;
3657 }
3658 }
3659 }
3660 else {
Michael Chan37137702005-11-04 08:49:17 -08003661 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003662 /* Not yet been reconfigured */
3663
Michael Chan37137702005-11-04 08:49:17 -08003664 if (val & (1 << 23))
3665 mask = FLASH_BACKUP_STRAP_MASK;
3666 else
3667 mask = FLASH_STRAP_MASK;
3668
Michael Chanb6016b72005-05-26 13:03:09 -07003669 for (j = 0, flash = &flash_table[0]; j < entry_count;
3670 j++, flash++) {
3671
Michael Chan37137702005-11-04 08:49:17 -08003672 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003673 bp->flash_info = flash;
3674
3675 /* Request access to the flash interface. */
3676 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3677 return rc;
3678
3679 /* Enable access to flash interface */
3680 bnx2_enable_nvram_access(bp);
3681
3682 /* Reconfigure the flash interface */
3683 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3684 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3685 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3686 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3687
3688 /* Disable access to flash interface */
3689 bnx2_disable_nvram_access(bp);
3690 bnx2_release_nvram_lock(bp);
3691
3692 break;
3693 }
3694 }
3695 } /* if (val & 0x40000000) */
3696
3697 if (j == entry_count) {
3698 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003699 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003700 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003701 }
3702
Michael Chane30372c2007-07-16 18:26:23 -07003703get_flash_size:
Michael Chan1122db72006-01-23 16:11:42 -08003704 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3705 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3706 if (val)
3707 bp->flash_size = val;
3708 else
3709 bp->flash_size = bp->flash_info->total_size;
3710
Michael Chanb6016b72005-05-26 13:03:09 -07003711 return rc;
3712}
3713
3714static int
3715bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3716 int buf_size)
3717{
3718 int rc = 0;
3719 u32 cmd_flags, offset32, len32, extra;
3720
3721 if (buf_size == 0)
3722 return 0;
3723
3724 /* Request access to the flash interface. */
3725 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3726 return rc;
3727
3728 /* Enable access to flash interface */
3729 bnx2_enable_nvram_access(bp);
3730
3731 len32 = buf_size;
3732 offset32 = offset;
3733 extra = 0;
3734
3735 cmd_flags = 0;
3736
3737 if (offset32 & 3) {
3738 u8 buf[4];
3739 u32 pre_len;
3740
3741 offset32 &= ~3;
3742 pre_len = 4 - (offset & 3);
3743
3744 if (pre_len >= len32) {
3745 pre_len = len32;
3746 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3747 BNX2_NVM_COMMAND_LAST;
3748 }
3749 else {
3750 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3751 }
3752
3753 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3754
3755 if (rc)
3756 return rc;
3757
3758 memcpy(ret_buf, buf + (offset & 3), pre_len);
3759
3760 offset32 += 4;
3761 ret_buf += pre_len;
3762 len32 -= pre_len;
3763 }
3764 if (len32 & 3) {
3765 extra = 4 - (len32 & 3);
3766 len32 = (len32 + 4) & ~3;
3767 }
3768
3769 if (len32 == 4) {
3770 u8 buf[4];
3771
3772 if (cmd_flags)
3773 cmd_flags = BNX2_NVM_COMMAND_LAST;
3774 else
3775 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3776 BNX2_NVM_COMMAND_LAST;
3777
3778 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3779
3780 memcpy(ret_buf, buf, 4 - extra);
3781 }
3782 else if (len32 > 0) {
3783 u8 buf[4];
3784
3785 /* Read the first word. */
3786 if (cmd_flags)
3787 cmd_flags = 0;
3788 else
3789 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3790
3791 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3792
3793 /* Advance to the next dword. */
3794 offset32 += 4;
3795 ret_buf += 4;
3796 len32 -= 4;
3797
3798 while (len32 > 4 && rc == 0) {
3799 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3800
3801 /* Advance to the next dword. */
3802 offset32 += 4;
3803 ret_buf += 4;
3804 len32 -= 4;
3805 }
3806
3807 if (rc)
3808 return rc;
3809
3810 cmd_flags = BNX2_NVM_COMMAND_LAST;
3811 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3812
3813 memcpy(ret_buf, buf, 4 - extra);
3814 }
3815
3816 /* Disable access to flash interface */
3817 bnx2_disable_nvram_access(bp);
3818
3819 bnx2_release_nvram_lock(bp);
3820
3821 return rc;
3822}
3823
3824static int
3825bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3826 int buf_size)
3827{
3828 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003829 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003830 int rc = 0;
3831 int align_start, align_end;
3832
3833 buf = data_buf;
3834 offset32 = offset;
3835 len32 = buf_size;
3836 align_start = align_end = 0;
3837
3838 if ((align_start = (offset32 & 3))) {
3839 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003840 len32 += align_start;
3841 if (len32 < 4)
3842 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003843 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3844 return rc;
3845 }
3846
3847 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003848 align_end = 4 - (len32 & 3);
3849 len32 += align_end;
3850 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3851 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003852 }
3853
3854 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003855 align_buf = kmalloc(len32, GFP_KERNEL);
3856 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003857 return -ENOMEM;
3858 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003859 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003860 }
3861 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003862 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003863 }
Michael Chane6be7632007-01-08 19:56:13 -08003864 memcpy(align_buf + align_start, data_buf, buf_size);
3865 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003866 }
3867
Michael Chane30372c2007-07-16 18:26:23 -07003868 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanae181bc2006-05-22 16:39:20 -07003869 flash_buffer = kmalloc(264, GFP_KERNEL);
3870 if (flash_buffer == NULL) {
3871 rc = -ENOMEM;
3872 goto nvram_write_end;
3873 }
3874 }
3875
Michael Chanb6016b72005-05-26 13:03:09 -07003876 written = 0;
3877 while ((written < len32) && (rc == 0)) {
3878 u32 page_start, page_end, data_start, data_end;
3879 u32 addr, cmd_flags;
3880 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003881
3882 /* Find the page_start addr */
3883 page_start = offset32 + written;
3884 page_start -= (page_start % bp->flash_info->page_size);
3885 /* Find the page_end addr */
3886 page_end = page_start + bp->flash_info->page_size;
3887 /* Find the data_start addr */
3888 data_start = (written == 0) ? offset32 : page_start;
3889 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003890 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003891 (offset32 + len32) : page_end;
3892
3893 /* Request access to the flash interface. */
3894 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3895 goto nvram_write_end;
3896
3897 /* Enable access to flash interface */
3898 bnx2_enable_nvram_access(bp);
3899
3900 cmd_flags = BNX2_NVM_COMMAND_FIRST;
Michael Chane30372c2007-07-16 18:26:23 -07003901 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003902 int j;
3903
3904 /* Read the whole page into the buffer
3905 * (non-buffer flash only) */
3906 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3907 if (j == (bp->flash_info->page_size - 4)) {
3908 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3909 }
3910 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003911 page_start + j,
3912 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003913 cmd_flags);
3914
3915 if (rc)
3916 goto nvram_write_end;
3917
3918 cmd_flags = 0;
3919 }
3920 }
3921
3922 /* Enable writes to flash interface (unlock write-protect) */
3923 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3924 goto nvram_write_end;
3925
Michael Chanb6016b72005-05-26 13:03:09 -07003926 /* Loop to write back the buffer data from page_start to
3927 * data_start */
3928 i = 0;
Michael Chane30372c2007-07-16 18:26:23 -07003929 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanc8738792007-03-30 14:53:06 -07003930 /* Erase the page */
3931 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3932 goto nvram_write_end;
3933
3934 /* Re-enable the write again for the actual write */
3935 bnx2_enable_nvram_write(bp);
3936
Michael Chanb6016b72005-05-26 13:03:09 -07003937 for (addr = page_start; addr < data_start;
3938 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003939
Michael Chanb6016b72005-05-26 13:03:09 -07003940 rc = bnx2_nvram_write_dword(bp, addr,
3941 &flash_buffer[i], cmd_flags);
3942
3943 if (rc != 0)
3944 goto nvram_write_end;
3945
3946 cmd_flags = 0;
3947 }
3948 }
3949
3950 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003951 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003952 if ((addr == page_end - 4) ||
Michael Chane30372c2007-07-16 18:26:23 -07003953 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
Michael Chanb6016b72005-05-26 13:03:09 -07003954 (addr == data_end - 4))) {
3955
3956 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3957 }
3958 rc = bnx2_nvram_write_dword(bp, addr, buf,
3959 cmd_flags);
3960
3961 if (rc != 0)
3962 goto nvram_write_end;
3963
3964 cmd_flags = 0;
3965 buf += 4;
3966 }
3967
3968 /* Loop to write back the buffer data from data_end
3969 * to page_end */
Michael Chane30372c2007-07-16 18:26:23 -07003970 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003971 for (addr = data_end; addr < page_end;
3972 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003973
Michael Chanb6016b72005-05-26 13:03:09 -07003974 if (addr == page_end-4) {
3975 cmd_flags = BNX2_NVM_COMMAND_LAST;
3976 }
3977 rc = bnx2_nvram_write_dword(bp, addr,
3978 &flash_buffer[i], cmd_flags);
3979
3980 if (rc != 0)
3981 goto nvram_write_end;
3982
3983 cmd_flags = 0;
3984 }
3985 }
3986
3987 /* Disable writes to flash interface (lock write-protect) */
3988 bnx2_disable_nvram_write(bp);
3989
3990 /* Disable access to flash interface */
3991 bnx2_disable_nvram_access(bp);
3992 bnx2_release_nvram_lock(bp);
3993
3994 /* Increment written */
3995 written += data_end - data_start;
3996 }
3997
3998nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003999 kfree(flash_buffer);
4000 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07004001 return rc;
4002}
4003
Michael Chan0d8a6572007-07-07 22:49:43 -07004004static void
4005bnx2_init_remote_phy(struct bnx2 *bp)
4006{
4007 u32 val;
4008
4009 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4010 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4011 return;
4012
4013 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4014 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4015 return;
4016
4017 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
Michael Chan0d8a6572007-07-07 22:49:43 -07004018 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4019
4020 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4021 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4022 bp->phy_port = PORT_FIBRE;
4023 else
4024 bp->phy_port = PORT_TP;
Michael Chan489310a2007-10-10 16:16:31 -07004025
4026 if (netif_running(bp->dev)) {
4027 u32 sig;
4028
4029 if (val & BNX2_LINK_STATUS_LINK_UP) {
4030 bp->link_up = 1;
4031 netif_carrier_on(bp->dev);
4032 } else {
4033 bp->link_up = 0;
4034 netif_carrier_off(bp->dev);
4035 }
4036 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4037 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4038 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4039 sig);
4040 }
Michael Chan0d8a6572007-07-07 22:49:43 -07004041 }
4042}
4043
Michael Chanb6016b72005-05-26 13:03:09 -07004044static int
4045bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4046{
4047 u32 val;
4048 int i, rc = 0;
Michael Chan489310a2007-10-10 16:16:31 -07004049 u8 old_port;
Michael Chanb6016b72005-05-26 13:03:09 -07004050
4051 /* Wait for the current PCI transaction to complete before
4052 * issuing a reset. */
4053 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4054 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4055 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4056 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4057 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4058 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4059 udelay(5);
4060
Michael Chanb090ae22006-01-23 16:07:10 -08004061 /* Wait for the firmware to tell us it is ok to issue a reset. */
4062 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4063
Michael Chanb6016b72005-05-26 13:03:09 -07004064 /* Deposit a driver reset signature so the firmware knows that
4065 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08004066 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07004067 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4068
Michael Chanb6016b72005-05-26 13:03:09 -07004069 /* Do a dummy read to force the chip to complete all current transaction
4070 * before we issue a reset. */
4071 val = REG_RD(bp, BNX2_MISC_ID);
4072
Michael Chan234754d2006-11-19 14:11:41 -08004073 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4074 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4075 REG_RD(bp, BNX2_MISC_COMMAND);
4076 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07004077
Michael Chan234754d2006-11-19 14:11:41 -08004078 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4079 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07004080
Michael Chan234754d2006-11-19 14:11:41 -08004081 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004082
Michael Chan234754d2006-11-19 14:11:41 -08004083 } else {
4084 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4085 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4086 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4087
4088 /* Chip reset. */
4089 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4090
Michael Chan594a9df2007-08-28 15:39:42 -07004091 /* Reading back any register after chip reset will hang the
4092 * bus on 5706 A0 and A1. The msleep below provides plenty
4093 * of margin for write posting.
4094 */
Michael Chan234754d2006-11-19 14:11:41 -08004095 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
Arjan van de Ven8e545882007-08-28 14:34:43 -07004096 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4097 msleep(20);
Michael Chanb6016b72005-05-26 13:03:09 -07004098
Michael Chan234754d2006-11-19 14:11:41 -08004099 /* Reset takes approximate 30 usec */
4100 for (i = 0; i < 10; i++) {
4101 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4102 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4103 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4104 break;
4105 udelay(10);
4106 }
4107
4108 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4109 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4110 printk(KERN_ERR PFX "Chip reset did not complete\n");
4111 return -EBUSY;
4112 }
Michael Chanb6016b72005-05-26 13:03:09 -07004113 }
4114
4115 /* Make sure byte swapping is properly configured. */
4116 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4117 if (val != 0x01020304) {
4118 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4119 return -ENODEV;
4120 }
4121
Michael Chanb6016b72005-05-26 13:03:09 -07004122 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08004123 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4124 if (rc)
4125 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004126
Michael Chan0d8a6572007-07-07 22:49:43 -07004127 spin_lock_bh(&bp->phy_lock);
Michael Chan489310a2007-10-10 16:16:31 -07004128 old_port = bp->phy_port;
Michael Chan0d8a6572007-07-07 22:49:43 -07004129 bnx2_init_remote_phy(bp);
Michael Chan489310a2007-10-10 16:16:31 -07004130 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
Michael Chan0d8a6572007-07-07 22:49:43 -07004131 bnx2_set_default_remote_link(bp);
4132 spin_unlock_bh(&bp->phy_lock);
4133
Michael Chanb6016b72005-05-26 13:03:09 -07004134 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4135 /* Adjust the voltage regular to two steps lower. The default
4136 * of this register is 0x0000000e. */
4137 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4138
4139 /* Remove bad rbuf memory from the free pool. */
4140 rc = bnx2_alloc_bad_rbuf(bp);
4141 }
4142
4143 return rc;
4144}
4145
4146static int
4147bnx2_init_chip(struct bnx2 *bp)
4148{
4149 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08004150 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004151
4152 /* Make sure the interrupt is not active. */
4153 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4154
4155 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4156 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4157#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004158 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07004159#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004160 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07004161 DMA_READ_CHANS << 12 |
4162 DMA_WRITE_CHANS << 16;
4163
4164 val |= (0x2 << 20) | (1 << 11);
4165
Michael Chandda1e392006-01-23 16:08:14 -08004166 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07004167 val |= (1 << 23);
4168
4169 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4170 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4171 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4172
4173 REG_WR(bp, BNX2_DMA_CONFIG, val);
4174
4175 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4176 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4177 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4178 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4179 }
4180
4181 if (bp->flags & PCIX_FLAG) {
4182 u16 val16;
4183
4184 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4185 &val16);
4186 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4187 val16 & ~PCI_X_CMD_ERO);
4188 }
4189
4190 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4191 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4192 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4193 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4194
4195 /* Initialize context mapping and zero out the quick contexts. The
4196 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07004197 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4198 rc = bnx2_init_5709_context(bp);
4199 if (rc)
4200 return rc;
4201 } else
Michael Chan59b47d82006-11-19 14:10:45 -08004202 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004203
Michael Chanfba9fe92006-06-12 22:21:25 -07004204 if ((rc = bnx2_init_cpus(bp)) != 0)
4205 return rc;
4206
Michael Chanb6016b72005-05-26 13:03:09 -07004207 bnx2_init_nvram(bp);
4208
4209 bnx2_set_mac_addr(bp);
4210
4211 val = REG_RD(bp, BNX2_MQ_CONFIG);
4212 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4213 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07004214 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4215 val |= BNX2_MQ_CONFIG_HALT_DIS;
4216
Michael Chanb6016b72005-05-26 13:03:09 -07004217 REG_WR(bp, BNX2_MQ_CONFIG, val);
4218
4219 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4220 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4221 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4222
4223 val = (BCM_PAGE_BITS - 8) << 24;
4224 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4225
4226 /* Configure page size. */
4227 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4228 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4229 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4230 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4231
4232 val = bp->mac_addr[0] +
4233 (bp->mac_addr[1] << 8) +
4234 (bp->mac_addr[2] << 16) +
4235 bp->mac_addr[3] +
4236 (bp->mac_addr[4] << 8) +
4237 (bp->mac_addr[5] << 16);
4238 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4239
4240 /* Program the MTU. Also include 4 bytes for CRC32. */
4241 val = bp->dev->mtu + ETH_HLEN + 4;
4242 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4243 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4244 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4245
4246 bp->last_status_idx = 0;
4247 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4248
4249 /* Set up how to generate a link change interrupt. */
4250 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4251
4252 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4253 (u64) bp->status_blk_mapping & 0xffffffff);
4254 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4255
4256 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4257 (u64) bp->stats_blk_mapping & 0xffffffff);
4258 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4259 (u64) bp->stats_blk_mapping >> 32);
4260
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004261 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07004262 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4263
4264 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4265 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4266
4267 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4268 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4269
4270 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4271
4272 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4273
4274 REG_WR(bp, BNX2_HC_COM_TICKS,
4275 (bp->com_ticks_int << 16) | bp->com_ticks);
4276
4277 REG_WR(bp, BNX2_HC_CMD_TICKS,
4278 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4279
Michael Chan02537b062007-06-04 21:24:07 -07004280 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4281 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4282 else
Michael Chan7ea69202007-07-16 18:27:10 -07004283 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
Michael Chanb6016b72005-05-26 13:03:09 -07004284 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4285
4286 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07004287 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004288 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004289 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4290 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004291 }
4292
Michael Chan8e6a72c2007-05-03 13:24:48 -07004293 if (bp->flags & ONE_SHOT_MSI_FLAG)
4294 val |= BNX2_HC_CONFIG_ONE_SHOT;
4295
4296 REG_WR(bp, BNX2_HC_CONFIG, val);
4297
Michael Chanb6016b72005-05-26 13:03:09 -07004298 /* Clear internal stats counters. */
4299 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4300
Michael Chanda3e4fb2007-05-03 13:24:23 -07004301 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07004302
4303 /* Initialize the receive filter. */
4304 bnx2_set_rx_mode(bp->dev);
4305
Michael Chan0aa38df2007-06-04 21:23:06 -07004306 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4307 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4308 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4309 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4310 }
Michael Chanb090ae22006-01-23 16:07:10 -08004311 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4312 0);
Michael Chanb6016b72005-05-26 13:03:09 -07004313
Michael Chandf149d72007-07-07 22:51:36 -07004314 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
Michael Chanb6016b72005-05-26 13:03:09 -07004315 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4316
4317 udelay(20);
4318
Michael Chanbf5295b2006-03-23 01:11:56 -08004319 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4320
Michael Chanb090ae22006-01-23 16:07:10 -08004321 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004322}
4323
Michael Chan59b47d82006-11-19 14:10:45 -08004324static void
4325bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4326{
4327 u32 val, offset0, offset1, offset2, offset3;
4328
4329 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4330 offset0 = BNX2_L2CTX_TYPE_XI;
4331 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4332 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4333 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4334 } else {
4335 offset0 = BNX2_L2CTX_TYPE;
4336 offset1 = BNX2_L2CTX_CMD_TYPE;
4337 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4338 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4339 }
4340 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4341 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4342
4343 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4344 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4345
4346 val = (u64) bp->tx_desc_mapping >> 32;
4347 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4348
4349 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4350 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4351}
Michael Chanb6016b72005-05-26 13:03:09 -07004352
4353static void
4354bnx2_init_tx_ring(struct bnx2 *bp)
4355{
4356 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08004357 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07004358
Michael Chan2f8af122006-08-15 01:39:10 -07004359 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4360
Michael Chanb6016b72005-05-26 13:03:09 -07004361 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004362
Michael Chanb6016b72005-05-26 13:03:09 -07004363 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4364 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4365
4366 bp->tx_prod = 0;
4367 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08004368 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004369 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004370
Michael Chan59b47d82006-11-19 14:10:45 -08004371 cid = TX_CID;
4372 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4373 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07004374
Michael Chan59b47d82006-11-19 14:10:45 -08004375 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07004376}
4377
4378static void
Michael Chan5d5d0012007-12-12 11:17:43 -08004379bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4380 int num_rings)
Michael Chanb6016b72005-05-26 13:03:09 -07004381{
Michael Chanb6016b72005-05-26 13:03:09 -07004382 int i;
Michael Chan5d5d0012007-12-12 11:17:43 -08004383 struct rx_bd *rxbd;
Michael Chanb6016b72005-05-26 13:03:09 -07004384
Michael Chan5d5d0012007-12-12 11:17:43 -08004385 for (i = 0; i < num_rings; i++) {
Michael Chan13daffa2006-03-20 17:49:20 -08004386 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07004387
Michael Chan5d5d0012007-12-12 11:17:43 -08004388 rxbd = &rx_ring[i][0];
Michael Chan13daffa2006-03-20 17:49:20 -08004389 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
Michael Chan5d5d0012007-12-12 11:17:43 -08004390 rxbd->rx_bd_len = buf_size;
Michael Chan13daffa2006-03-20 17:49:20 -08004391 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4392 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004393 if (i == (num_rings - 1))
Michael Chan13daffa2006-03-20 17:49:20 -08004394 j = 0;
4395 else
4396 j = i + 1;
Michael Chan5d5d0012007-12-12 11:17:43 -08004397 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4398 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
Michael Chan13daffa2006-03-20 17:49:20 -08004399 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004400}
4401
4402static void
4403bnx2_init_rx_ring(struct bnx2 *bp)
4404{
4405 int i;
4406 u16 prod, ring_prod;
4407 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4408
4409 bp->rx_prod = 0;
4410 bp->rx_cons = 0;
4411 bp->rx_prod_bseq = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08004412 bp->rx_pg_prod = 0;
4413 bp->rx_pg_cons = 0;
Michael Chan5d5d0012007-12-12 11:17:43 -08004414
4415 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4416 bp->rx_buf_use_size, bp->rx_max_ring);
4417
4418 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
Michael Chan47bf4242007-12-12 11:19:12 -08004419 if (bp->rx_pg_ring_size) {
4420 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4421 bp->rx_pg_desc_mapping,
4422 PAGE_SIZE, bp->rx_max_pg_ring);
4423 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4424 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4425 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4426 BNX2_L2CTX_RBDC_JUMBO_KEY);
4427
4428 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4429 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4430
4431 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4432 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4433
4434 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4435 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4436 }
Michael Chanb6016b72005-05-26 13:03:09 -07004437
4438 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4439 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4440 val |= 0x02 << 8;
Michael Chan5d5d0012007-12-12 11:17:43 -08004441 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004442
Michael Chan13daffa2006-03-20 17:49:20 -08004443 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chan5d5d0012007-12-12 11:17:43 -08004444 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004445
Michael Chan13daffa2006-03-20 17:49:20 -08004446 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chan5d5d0012007-12-12 11:17:43 -08004447 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004448
Michael Chan47bf4242007-12-12 11:19:12 -08004449 ring_prod = prod = bp->rx_pg_prod;
4450 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4451 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4452 break;
4453 prod = NEXT_RX_BD(prod);
4454 ring_prod = RX_PG_RING_IDX(prod);
4455 }
4456 bp->rx_pg_prod = prod;
4457
Michael Chan5d5d0012007-12-12 11:17:43 -08004458 ring_prod = prod = bp->rx_prod;
Michael Chan236b6392006-03-20 17:49:02 -08004459 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004460 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4461 break;
4462 }
4463 prod = NEXT_RX_BD(prod);
4464 ring_prod = RX_RING_IDX(prod);
4465 }
4466 bp->rx_prod = prod;
4467
Michael Chan47bf4242007-12-12 11:19:12 -08004468 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
Michael Chanb6016b72005-05-26 13:03:09 -07004469 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4470
4471 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4472}
4473
Michael Chan5d5d0012007-12-12 11:17:43 -08004474static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
Michael Chan13daffa2006-03-20 17:49:20 -08004475{
Michael Chan5d5d0012007-12-12 11:17:43 -08004476 u32 max, num_rings = 1;
Michael Chan13daffa2006-03-20 17:49:20 -08004477
Michael Chan5d5d0012007-12-12 11:17:43 -08004478 while (ring_size > MAX_RX_DESC_CNT) {
4479 ring_size -= MAX_RX_DESC_CNT;
Michael Chan13daffa2006-03-20 17:49:20 -08004480 num_rings++;
4481 }
4482 /* round to next power of 2 */
Michael Chan5d5d0012007-12-12 11:17:43 -08004483 max = max_size;
Michael Chan13daffa2006-03-20 17:49:20 -08004484 while ((max & num_rings) == 0)
4485 max >>= 1;
4486
4487 if (num_rings != max)
4488 max <<= 1;
4489
Michael Chan5d5d0012007-12-12 11:17:43 -08004490 return max;
4491}
4492
4493static void
4494bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4495{
4496 u32 rx_size;
4497
4498 /* 8 for CRC and VLAN */
4499 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4500
4501 bp->rx_copy_thresh = RX_COPY_THRESH;
Michael Chan47bf4242007-12-12 11:19:12 -08004502 bp->rx_pg_ring_size = 0;
4503 bp->rx_max_pg_ring = 0;
4504 bp->rx_max_pg_ring_idx = 0;
Michael Chan5d5d0012007-12-12 11:17:43 -08004505
4506 bp->rx_buf_use_size = rx_size;
4507 /* hw alignment */
4508 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chan1db82f22007-12-12 11:19:35 -08004509 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
Michael Chan5d5d0012007-12-12 11:17:43 -08004510 bp->rx_ring_size = size;
4511 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
Michael Chan13daffa2006-03-20 17:49:20 -08004512 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4513}
4514
4515static void
Michael Chanb6016b72005-05-26 13:03:09 -07004516bnx2_free_tx_skbs(struct bnx2 *bp)
4517{
4518 int i;
4519
4520 if (bp->tx_buf_ring == NULL)
4521 return;
4522
4523 for (i = 0; i < TX_DESC_CNT; ) {
4524 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4525 struct sk_buff *skb = tx_buf->skb;
4526 int j, last;
4527
4528 if (skb == NULL) {
4529 i++;
4530 continue;
4531 }
4532
4533 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4534 skb_headlen(skb), PCI_DMA_TODEVICE);
4535
4536 tx_buf->skb = NULL;
4537
4538 last = skb_shinfo(skb)->nr_frags;
4539 for (j = 0; j < last; j++) {
4540 tx_buf = &bp->tx_buf_ring[i + j + 1];
4541 pci_unmap_page(bp->pdev,
4542 pci_unmap_addr(tx_buf, mapping),
4543 skb_shinfo(skb)->frags[j].size,
4544 PCI_DMA_TODEVICE);
4545 }
Michael Chan745720e2006-06-29 12:37:41 -07004546 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004547 i += j + 1;
4548 }
4549
4550}
4551
4552static void
4553bnx2_free_rx_skbs(struct bnx2 *bp)
4554{
4555 int i;
4556
4557 if (bp->rx_buf_ring == NULL)
4558 return;
4559
Michael Chan13daffa2006-03-20 17:49:20 -08004560 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004561 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4562 struct sk_buff *skb = rx_buf->skb;
4563
Michael Chan05d0f1c2005-11-04 08:53:48 -08004564 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004565 continue;
4566
4567 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4568 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4569
4570 rx_buf->skb = NULL;
4571
Michael Chan745720e2006-06-29 12:37:41 -07004572 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004573 }
Michael Chan47bf4242007-12-12 11:19:12 -08004574 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4575 bnx2_free_rx_page(bp, i);
Michael Chanb6016b72005-05-26 13:03:09 -07004576}
4577
4578static void
4579bnx2_free_skbs(struct bnx2 *bp)
4580{
4581 bnx2_free_tx_skbs(bp);
4582 bnx2_free_rx_skbs(bp);
4583}
4584
4585static int
4586bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4587{
4588 int rc;
4589
4590 rc = bnx2_reset_chip(bp, reset_code);
4591 bnx2_free_skbs(bp);
4592 if (rc)
4593 return rc;
4594
Michael Chanfba9fe92006-06-12 22:21:25 -07004595 if ((rc = bnx2_init_chip(bp)) != 0)
4596 return rc;
4597
Michael Chanb6016b72005-05-26 13:03:09 -07004598 bnx2_init_tx_ring(bp);
4599 bnx2_init_rx_ring(bp);
4600 return 0;
4601}
4602
4603static int
4604bnx2_init_nic(struct bnx2 *bp)
4605{
4606 int rc;
4607
4608 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4609 return rc;
4610
Michael Chan80be4432006-11-19 14:07:28 -08004611 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004612 bnx2_init_phy(bp);
4613 bnx2_set_link(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07004614 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004615 return 0;
4616}
4617
4618static int
4619bnx2_test_registers(struct bnx2 *bp)
4620{
4621 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004622 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004623 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004624 u16 offset;
4625 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004626#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004627 u32 rw_mask;
4628 u32 ro_mask;
4629 } reg_tbl[] = {
4630 { 0x006c, 0, 0x00000000, 0x0000003f },
4631 { 0x0090, 0, 0xffffffff, 0x00000000 },
4632 { 0x0094, 0, 0x00000000, 0x00000000 },
4633
Michael Chan5bae30c2007-05-03 13:18:46 -07004634 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4635 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4636 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4637 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4638 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4639 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4640 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4641 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4642 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004643
Michael Chan5bae30c2007-05-03 13:18:46 -07004644 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4645 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4646 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4647 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4648 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4649 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004650
Michael Chan5bae30c2007-05-03 13:18:46 -07004651 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4652 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4653 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004654
4655 { 0x1000, 0, 0x00000000, 0x00000001 },
4656 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004657
4658 { 0x1408, 0, 0x01c00800, 0x00000000 },
4659 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4660 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004661 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004662 { 0x14b0, 0, 0x00000002, 0x00000001 },
4663 { 0x14b8, 0, 0x00000000, 0x00000000 },
4664 { 0x14c0, 0, 0x00000000, 0x00000009 },
4665 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4666 { 0x14cc, 0, 0x00000000, 0x00000001 },
4667 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004668
4669 { 0x1800, 0, 0x00000000, 0x00000001 },
4670 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004671
4672 { 0x2800, 0, 0x00000000, 0x00000001 },
4673 { 0x2804, 0, 0x00000000, 0x00003f01 },
4674 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4675 { 0x2810, 0, 0xffff0000, 0x00000000 },
4676 { 0x2814, 0, 0xffff0000, 0x00000000 },
4677 { 0x2818, 0, 0xffff0000, 0x00000000 },
4678 { 0x281c, 0, 0xffff0000, 0x00000000 },
4679 { 0x2834, 0, 0xffffffff, 0x00000000 },
4680 { 0x2840, 0, 0x00000000, 0xffffffff },
4681 { 0x2844, 0, 0x00000000, 0xffffffff },
4682 { 0x2848, 0, 0xffffffff, 0x00000000 },
4683 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4684
4685 { 0x2c00, 0, 0x00000000, 0x00000011 },
4686 { 0x2c04, 0, 0x00000000, 0x00030007 },
4687
Michael Chanb6016b72005-05-26 13:03:09 -07004688 { 0x3c00, 0, 0x00000000, 0x00000001 },
4689 { 0x3c04, 0, 0x00000000, 0x00070000 },
4690 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4691 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4692 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4693 { 0x3c14, 0, 0x00000000, 0xffffffff },
4694 { 0x3c18, 0, 0x00000000, 0xffffffff },
4695 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4696 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004697
4698 { 0x5004, 0, 0x00000000, 0x0000007f },
4699 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004700
Michael Chanb6016b72005-05-26 13:03:09 -07004701 { 0x5c00, 0, 0x00000000, 0x00000001 },
4702 { 0x5c04, 0, 0x00000000, 0x0003000f },
4703 { 0x5c08, 0, 0x00000003, 0x00000000 },
4704 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4705 { 0x5c10, 0, 0x00000000, 0xffffffff },
4706 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4707 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4708 { 0x5c88, 0, 0x00000000, 0x00077373 },
4709 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4710
4711 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4712 { 0x680c, 0, 0xffffffff, 0x00000000 },
4713 { 0x6810, 0, 0xffffffff, 0x00000000 },
4714 { 0x6814, 0, 0xffffffff, 0x00000000 },
4715 { 0x6818, 0, 0xffffffff, 0x00000000 },
4716 { 0x681c, 0, 0xffffffff, 0x00000000 },
4717 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4718 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4719 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4720 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4721 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4722 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4723 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4724 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4725 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4726 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4727 { 0x684c, 0, 0xffffffff, 0x00000000 },
4728 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4729 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4730 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4731 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4732 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4733 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4734
4735 { 0xffff, 0, 0x00000000, 0x00000000 },
4736 };
4737
4738 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004739 is_5709 = 0;
4740 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4741 is_5709 = 1;
4742
Michael Chanb6016b72005-05-26 13:03:09 -07004743 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4744 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004745 u16 flags = reg_tbl[i].flags;
4746
4747 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4748 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004749
4750 offset = (u32) reg_tbl[i].offset;
4751 rw_mask = reg_tbl[i].rw_mask;
4752 ro_mask = reg_tbl[i].ro_mask;
4753
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004754 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004755
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004756 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004757
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004758 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004759 if ((val & rw_mask) != 0) {
4760 goto reg_test_err;
4761 }
4762
4763 if ((val & ro_mask) != (save_val & ro_mask)) {
4764 goto reg_test_err;
4765 }
4766
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004767 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004768
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004769 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004770 if ((val & rw_mask) != rw_mask) {
4771 goto reg_test_err;
4772 }
4773
4774 if ((val & ro_mask) != (save_val & ro_mask)) {
4775 goto reg_test_err;
4776 }
4777
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004778 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004779 continue;
4780
4781reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004782 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004783 ret = -ENODEV;
4784 break;
4785 }
4786 return ret;
4787}
4788
4789static int
4790bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4791{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004792 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004793 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4794 int i;
4795
4796 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4797 u32 offset;
4798
4799 for (offset = 0; offset < size; offset += 4) {
4800
4801 REG_WR_IND(bp, start + offset, test_pattern[i]);
4802
4803 if (REG_RD_IND(bp, start + offset) !=
4804 test_pattern[i]) {
4805 return -ENODEV;
4806 }
4807 }
4808 }
4809 return 0;
4810}
4811
4812static int
4813bnx2_test_memory(struct bnx2 *bp)
4814{
4815 int ret = 0;
4816 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004817 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004818 u32 offset;
4819 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004820 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004821 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004822 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004823 { 0xe0000, 0x4000 },
4824 { 0x120000, 0x4000 },
4825 { 0x1a0000, 0x4000 },
4826 { 0x160000, 0x4000 },
4827 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004828 },
4829 mem_tbl_5709[] = {
4830 { 0x60000, 0x4000 },
4831 { 0xa0000, 0x3000 },
4832 { 0xe0000, 0x4000 },
4833 { 0x120000, 0x4000 },
4834 { 0x1a0000, 0x4000 },
4835 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004836 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004837 struct mem_entry *mem_tbl;
4838
4839 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4840 mem_tbl = mem_tbl_5709;
4841 else
4842 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004843
4844 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4845 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4846 mem_tbl[i].len)) != 0) {
4847 return ret;
4848 }
4849 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004850
Michael Chanb6016b72005-05-26 13:03:09 -07004851 return ret;
4852}
4853
Michael Chanbc5a0692006-01-23 16:13:22 -08004854#define BNX2_MAC_LOOPBACK 0
4855#define BNX2_PHY_LOOPBACK 1
4856
Michael Chanb6016b72005-05-26 13:03:09 -07004857static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004858bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004859{
4860 unsigned int pkt_size, num_pkts, i;
4861 struct sk_buff *skb, *rx_skb;
4862 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004863 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004864 dma_addr_t map;
4865 struct tx_bd *txbd;
4866 struct sw_bd *rx_buf;
4867 struct l2_fhdr *rx_hdr;
4868 int ret = -ENODEV;
4869
Michael Chanbc5a0692006-01-23 16:13:22 -08004870 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4871 bp->loopback = MAC_LOOPBACK;
4872 bnx2_set_mac_loopback(bp);
4873 }
4874 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan489310a2007-10-10 16:16:31 -07004875 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4876 return 0;
4877
Michael Chan80be4432006-11-19 14:07:28 -08004878 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004879 bnx2_set_phy_loopback(bp);
4880 }
4881 else
4882 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004883
4884 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004885 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004886 if (!skb)
4887 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004888 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004889 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004890 memset(packet + 6, 0x0, 8);
4891 for (i = 14; i < pkt_size; i++)
4892 packet[i] = (unsigned char) (i & 0xff);
4893
4894 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4895 PCI_DMA_TODEVICE);
4896
Michael Chanbf5295b2006-03-23 01:11:56 -08004897 REG_WR(bp, BNX2_HC_COMMAND,
4898 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4899
Michael Chanb6016b72005-05-26 13:03:09 -07004900 REG_RD(bp, BNX2_HC_COMMAND);
4901
4902 udelay(5);
4903 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4904
Michael Chanb6016b72005-05-26 13:03:09 -07004905 num_pkts = 0;
4906
Michael Chanbc5a0692006-01-23 16:13:22 -08004907 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004908
4909 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4910 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4911 txbd->tx_bd_mss_nbytes = pkt_size;
4912 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4913
4914 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004915 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4916 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004917
Michael Chan234754d2006-11-19 14:11:41 -08004918 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4919 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004920
4921 udelay(100);
4922
Michael Chanbf5295b2006-03-23 01:11:56 -08004923 REG_WR(bp, BNX2_HC_COMMAND,
4924 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4925
Michael Chanb6016b72005-05-26 13:03:09 -07004926 REG_RD(bp, BNX2_HC_COMMAND);
4927
4928 udelay(5);
4929
4930 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004931 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004932
Michael Chanbc5a0692006-01-23 16:13:22 -08004933 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004934 goto loopback_test_done;
4935 }
4936
4937 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4938 if (rx_idx != rx_start_idx + num_pkts) {
4939 goto loopback_test_done;
4940 }
4941
4942 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4943 rx_skb = rx_buf->skb;
4944
4945 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4946 skb_reserve(rx_skb, bp->rx_offset);
4947
4948 pci_dma_sync_single_for_cpu(bp->pdev,
4949 pci_unmap_addr(rx_buf, mapping),
4950 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4951
Michael Chanade2bfe2006-01-23 16:09:51 -08004952 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004953 (L2_FHDR_ERRORS_BAD_CRC |
4954 L2_FHDR_ERRORS_PHY_DECODE |
4955 L2_FHDR_ERRORS_ALIGNMENT |
4956 L2_FHDR_ERRORS_TOO_SHORT |
4957 L2_FHDR_ERRORS_GIANT_FRAME)) {
4958
4959 goto loopback_test_done;
4960 }
4961
4962 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4963 goto loopback_test_done;
4964 }
4965
4966 for (i = 14; i < pkt_size; i++) {
4967 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4968 goto loopback_test_done;
4969 }
4970 }
4971
4972 ret = 0;
4973
4974loopback_test_done:
4975 bp->loopback = 0;
4976 return ret;
4977}
4978
Michael Chanbc5a0692006-01-23 16:13:22 -08004979#define BNX2_MAC_LOOPBACK_FAILED 1
4980#define BNX2_PHY_LOOPBACK_FAILED 2
4981#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4982 BNX2_PHY_LOOPBACK_FAILED)
4983
4984static int
4985bnx2_test_loopback(struct bnx2 *bp)
4986{
4987 int rc = 0;
4988
4989 if (!netif_running(bp->dev))
4990 return BNX2_LOOPBACK_FAILED;
4991
4992 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4993 spin_lock_bh(&bp->phy_lock);
4994 bnx2_init_phy(bp);
4995 spin_unlock_bh(&bp->phy_lock);
4996 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4997 rc |= BNX2_MAC_LOOPBACK_FAILED;
4998 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4999 rc |= BNX2_PHY_LOOPBACK_FAILED;
5000 return rc;
5001}
5002
Michael Chanb6016b72005-05-26 13:03:09 -07005003#define NVRAM_SIZE 0x200
5004#define CRC32_RESIDUAL 0xdebb20e3
5005
5006static int
5007bnx2_test_nvram(struct bnx2 *bp)
5008{
5009 u32 buf[NVRAM_SIZE / 4];
5010 u8 *data = (u8 *) buf;
5011 int rc = 0;
5012 u32 magic, csum;
5013
5014 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5015 goto test_nvram_done;
5016
5017 magic = be32_to_cpu(buf[0]);
5018 if (magic != 0x669955aa) {
5019 rc = -ENODEV;
5020 goto test_nvram_done;
5021 }
5022
5023 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5024 goto test_nvram_done;
5025
5026 csum = ether_crc_le(0x100, data);
5027 if (csum != CRC32_RESIDUAL) {
5028 rc = -ENODEV;
5029 goto test_nvram_done;
5030 }
5031
5032 csum = ether_crc_le(0x100, data + 0x100);
5033 if (csum != CRC32_RESIDUAL) {
5034 rc = -ENODEV;
5035 }
5036
5037test_nvram_done:
5038 return rc;
5039}
5040
5041static int
5042bnx2_test_link(struct bnx2 *bp)
5043{
5044 u32 bmsr;
5045
Michael Chan489310a2007-10-10 16:16:31 -07005046 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5047 if (bp->link_up)
5048 return 0;
5049 return -ENODEV;
5050 }
Michael Chanc770a652005-08-25 15:38:39 -07005051 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07005052 bnx2_enable_bmsr1(bp);
5053 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5054 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5055 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07005056 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005057
Michael Chanb6016b72005-05-26 13:03:09 -07005058 if (bmsr & BMSR_LSTATUS) {
5059 return 0;
5060 }
5061 return -ENODEV;
5062}
5063
5064static int
5065bnx2_test_intr(struct bnx2 *bp)
5066{
5067 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07005068 u16 status_idx;
5069
5070 if (!netif_running(bp->dev))
5071 return -ENODEV;
5072
5073 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5074
5075 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08005076 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07005077 REG_RD(bp, BNX2_HC_COMMAND);
5078
5079 for (i = 0; i < 10; i++) {
5080 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5081 status_idx) {
5082
5083 break;
5084 }
5085
5086 msleep_interruptible(10);
5087 }
5088 if (i < 10)
5089 return 0;
5090
5091 return -ENODEV;
5092}
5093
5094static void
Michael Chan48b01e22006-11-19 14:08:00 -08005095bnx2_5706_serdes_timer(struct bnx2 *bp)
5096{
5097 spin_lock(&bp->phy_lock);
5098 if (bp->serdes_an_pending)
5099 bp->serdes_an_pending--;
5100 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5101 u32 bmcr;
5102
5103 bp->current_interval = bp->timer_interval;
5104
Michael Chanca58c3a2007-05-03 13:22:52 -07005105 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005106
5107 if (bmcr & BMCR_ANENABLE) {
5108 u32 phy1, phy2;
5109
5110 bnx2_write_phy(bp, 0x1c, 0x7c00);
5111 bnx2_read_phy(bp, 0x1c, &phy1);
5112
5113 bnx2_write_phy(bp, 0x17, 0x0f01);
5114 bnx2_read_phy(bp, 0x15, &phy2);
5115 bnx2_write_phy(bp, 0x17, 0x0f01);
5116 bnx2_read_phy(bp, 0x15, &phy2);
5117
5118 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5119 !(phy2 & 0x20)) { /* no CONFIG */
5120
5121 bmcr &= ~BMCR_ANENABLE;
5122 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07005123 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005124 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5125 }
5126 }
5127 }
5128 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5129 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5130 u32 phy2;
5131
5132 bnx2_write_phy(bp, 0x17, 0x0f01);
5133 bnx2_read_phy(bp, 0x15, &phy2);
5134 if (phy2 & 0x20) {
5135 u32 bmcr;
5136
Michael Chanca58c3a2007-05-03 13:22:52 -07005137 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005138 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07005139 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005140
5141 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5142 }
5143 } else
5144 bp->current_interval = bp->timer_interval;
5145
5146 spin_unlock(&bp->phy_lock);
5147}
5148
5149static void
Michael Chanf8dd0642006-11-19 14:08:29 -08005150bnx2_5708_serdes_timer(struct bnx2 *bp)
5151{
Michael Chan0d8a6572007-07-07 22:49:43 -07005152 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5153 return;
5154
Michael Chanf8dd0642006-11-19 14:08:29 -08005155 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5156 bp->serdes_an_pending = 0;
5157 return;
5158 }
5159
5160 spin_lock(&bp->phy_lock);
5161 if (bp->serdes_an_pending)
5162 bp->serdes_an_pending--;
5163 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5164 u32 bmcr;
5165
Michael Chanca58c3a2007-05-03 13:22:52 -07005166 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08005167 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07005168 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08005169 bp->current_interval = SERDES_FORCED_TIMEOUT;
5170 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07005171 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08005172 bp->serdes_an_pending = 2;
5173 bp->current_interval = bp->timer_interval;
5174 }
5175
5176 } else
5177 bp->current_interval = bp->timer_interval;
5178
5179 spin_unlock(&bp->phy_lock);
5180}
5181
5182static void
Michael Chanb6016b72005-05-26 13:03:09 -07005183bnx2_timer(unsigned long data)
5184{
5185 struct bnx2 *bp = (struct bnx2 *) data;
Michael Chanb6016b72005-05-26 13:03:09 -07005186
Michael Chancd339a02005-08-25 15:35:24 -07005187 if (!netif_running(bp->dev))
5188 return;
5189
Michael Chanb6016b72005-05-26 13:03:09 -07005190 if (atomic_read(&bp->intr_sem) != 0)
5191 goto bnx2_restart_timer;
5192
Michael Chandf149d72007-07-07 22:51:36 -07005193 bnx2_send_heart_beat(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005194
Michael Chancea94db2006-06-12 22:16:13 -07005195 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5196
Michael Chan02537b062007-06-04 21:24:07 -07005197 /* workaround occasional corrupted counters */
5198 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5199 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5200 BNX2_HC_COMMAND_STATS_NOW);
5201
Michael Chanf8dd0642006-11-19 14:08:29 -08005202 if (bp->phy_flags & PHY_SERDES_FLAG) {
5203 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5204 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07005205 else
Michael Chanf8dd0642006-11-19 14:08:29 -08005206 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005207 }
5208
5209bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07005210 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005211}
5212
Michael Chan8e6a72c2007-05-03 13:24:48 -07005213static int
5214bnx2_request_irq(struct bnx2 *bp)
5215{
5216 struct net_device *dev = bp->dev;
5217 int rc = 0;
5218
5219 if (bp->flags & USING_MSI_FLAG) {
5220 irq_handler_t fn = bnx2_msi;
5221
5222 if (bp->flags & ONE_SHOT_MSI_FLAG)
5223 fn = bnx2_msi_1shot;
5224
5225 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5226 } else
5227 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5228 IRQF_SHARED, dev->name, dev);
5229 return rc;
5230}
5231
5232static void
5233bnx2_free_irq(struct bnx2 *bp)
5234{
5235 struct net_device *dev = bp->dev;
5236
5237 if (bp->flags & USING_MSI_FLAG) {
5238 free_irq(bp->pdev->irq, dev);
5239 pci_disable_msi(bp->pdev);
5240 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5241 } else
5242 free_irq(bp->pdev->irq, dev);
5243}
5244
Michael Chanb6016b72005-05-26 13:03:09 -07005245/* Called with rtnl_lock */
5246static int
5247bnx2_open(struct net_device *dev)
5248{
Michael Chan972ec0d2006-01-23 16:12:43 -08005249 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005250 int rc;
5251
Michael Chan1b2f9222007-05-03 13:20:19 -07005252 netif_carrier_off(dev);
5253
Pavel Machek829ca9a2005-09-03 15:56:56 -07005254 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005255 bnx2_disable_int(bp);
5256
5257 rc = bnx2_alloc_mem(bp);
5258 if (rc)
5259 return rc;
5260
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005261 napi_enable(&bp->napi);
5262
Michael Chan8e6a72c2007-05-03 13:24:48 -07005263 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07005264 if (pci_enable_msi(bp->pdev) == 0) {
5265 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07005266 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5267 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005268 }
5269 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07005270 rc = bnx2_request_irq(bp);
5271
Michael Chanb6016b72005-05-26 13:03:09 -07005272 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005273 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005274 bnx2_free_mem(bp);
5275 return rc;
5276 }
5277
5278 rc = bnx2_init_nic(bp);
5279
5280 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005281 napi_disable(&bp->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005282 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005283 bnx2_free_skbs(bp);
5284 bnx2_free_mem(bp);
5285 return rc;
5286 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005287
Michael Chancd339a02005-08-25 15:35:24 -07005288 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005289
5290 atomic_set(&bp->intr_sem, 0);
5291
5292 bnx2_enable_int(bp);
5293
5294 if (bp->flags & USING_MSI_FLAG) {
5295 /* Test MSI to make sure it is working
5296 * If MSI test fails, go back to INTx mode
5297 */
5298 if (bnx2_test_intr(bp) != 0) {
5299 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5300 " using MSI, switching to INTx mode. Please"
5301 " report this failure to the PCI maintainer"
5302 " and include system chipset information.\n",
5303 bp->dev->name);
5304
5305 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005306 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005307
5308 rc = bnx2_init_nic(bp);
5309
Michael Chan8e6a72c2007-05-03 13:24:48 -07005310 if (!rc)
5311 rc = bnx2_request_irq(bp);
5312
Michael Chanb6016b72005-05-26 13:03:09 -07005313 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005314 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005315 bnx2_free_skbs(bp);
5316 bnx2_free_mem(bp);
5317 del_timer_sync(&bp->timer);
5318 return rc;
5319 }
5320 bnx2_enable_int(bp);
5321 }
5322 }
5323 if (bp->flags & USING_MSI_FLAG) {
5324 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5325 }
5326
5327 netif_start_queue(dev);
5328
5329 return 0;
5330}
5331
5332static void
David Howellsc4028952006-11-22 14:57:56 +00005333bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07005334{
David Howellsc4028952006-11-22 14:57:56 +00005335 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005336
Michael Chanafdc08b2005-08-25 15:34:29 -07005337 if (!netif_running(bp->dev))
5338 return;
5339
5340 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07005341 bnx2_netif_stop(bp);
5342
5343 bnx2_init_nic(bp);
5344
5345 atomic_set(&bp->intr_sem, 1);
5346 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07005347 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005348}
5349
5350static void
5351bnx2_tx_timeout(struct net_device *dev)
5352{
Michael Chan972ec0d2006-01-23 16:12:43 -08005353 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005354
5355 /* This allows the netif to be shutdown gracefully before resetting */
5356 schedule_work(&bp->reset_task);
5357}
5358
5359#ifdef BCM_VLAN
5360/* Called with rtnl_lock */
5361static void
5362bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5363{
Michael Chan972ec0d2006-01-23 16:12:43 -08005364 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005365
5366 bnx2_netif_stop(bp);
5367
5368 bp->vlgrp = vlgrp;
5369 bnx2_set_rx_mode(dev);
5370
5371 bnx2_netif_start(bp);
5372}
Michael Chanb6016b72005-05-26 13:03:09 -07005373#endif
5374
Herbert Xu932ff272006-06-09 12:20:56 -07005375/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07005376 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5377 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07005378 */
5379static int
5380bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5381{
Michael Chan972ec0d2006-01-23 16:12:43 -08005382 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005383 dma_addr_t mapping;
5384 struct tx_bd *txbd;
5385 struct sw_bd *tx_buf;
5386 u32 len, vlan_tag_flags, last_frag, mss;
5387 u16 prod, ring_prod;
5388 int i;
5389
Michael Chane89bbf12005-08-25 15:36:58 -07005390 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07005391 netif_stop_queue(dev);
5392 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5393 dev->name);
5394
5395 return NETDEV_TX_BUSY;
5396 }
5397 len = skb_headlen(skb);
5398 prod = bp->tx_prod;
5399 ring_prod = TX_RING_IDX(prod);
5400
5401 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005402 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07005403 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5404 }
5405
5406 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5407 vlan_tag_flags |=
5408 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5409 }
Michael Chanfde82052007-05-03 17:23:35 -07005410 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07005411 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005412 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07005413
Michael Chanb6016b72005-05-26 13:03:09 -07005414 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5415
Michael Chan4666f872007-05-03 13:22:28 -07005416 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005417
Michael Chan4666f872007-05-03 13:22:28 -07005418 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5419 u32 tcp_off = skb_transport_offset(skb) -
5420 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07005421
Michael Chan4666f872007-05-03 13:22:28 -07005422 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5423 TX_BD_FLAGS_SW_FLAGS;
5424 if (likely(tcp_off == 0))
5425 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5426 else {
5427 tcp_off >>= 3;
5428 vlan_tag_flags |= ((tcp_off & 0x3) <<
5429 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5430 ((tcp_off & 0x10) <<
5431 TX_BD_FLAGS_TCP6_OFF4_SHL);
5432 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5433 }
5434 } else {
5435 if (skb_header_cloned(skb) &&
5436 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5437 dev_kfree_skb(skb);
5438 return NETDEV_TX_OK;
5439 }
5440
5441 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5442
5443 iph = ip_hdr(skb);
5444 iph->check = 0;
5445 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5446 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5447 iph->daddr, 0,
5448 IPPROTO_TCP,
5449 0);
5450 if (tcp_opt_len || (iph->ihl > 5)) {
5451 vlan_tag_flags |= ((iph->ihl - 5) +
5452 (tcp_opt_len >> 2)) << 8;
5453 }
Michael Chanb6016b72005-05-26 13:03:09 -07005454 }
Michael Chan4666f872007-05-03 13:22:28 -07005455 } else
Michael Chanb6016b72005-05-26 13:03:09 -07005456 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005457
5458 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005459
Michael Chanb6016b72005-05-26 13:03:09 -07005460 tx_buf = &bp->tx_buf_ring[ring_prod];
5461 tx_buf->skb = skb;
5462 pci_unmap_addr_set(tx_buf, mapping, mapping);
5463
5464 txbd = &bp->tx_desc_ring[ring_prod];
5465
5466 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5467 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5468 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5469 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5470
5471 last_frag = skb_shinfo(skb)->nr_frags;
5472
5473 for (i = 0; i < last_frag; i++) {
5474 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5475
5476 prod = NEXT_TX_BD(prod);
5477 ring_prod = TX_RING_IDX(prod);
5478 txbd = &bp->tx_desc_ring[ring_prod];
5479
5480 len = frag->size;
5481 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5482 len, PCI_DMA_TODEVICE);
5483 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5484 mapping, mapping);
5485
5486 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5487 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5488 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5489 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5490
5491 }
5492 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5493
5494 prod = NEXT_TX_BD(prod);
5495 bp->tx_prod_bseq += skb->len;
5496
Michael Chan234754d2006-11-19 14:11:41 -08005497 REG_WR16(bp, bp->tx_bidx_addr, prod);
5498 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07005499
5500 mmiowb();
5501
5502 bp->tx_prod = prod;
5503 dev->trans_start = jiffies;
5504
Michael Chane89bbf12005-08-25 15:36:58 -07005505 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07005506 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07005507 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07005508 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005509 }
5510
5511 return NETDEV_TX_OK;
5512}
5513
5514/* Called with rtnl_lock */
5515static int
5516bnx2_close(struct net_device *dev)
5517{
Michael Chan972ec0d2006-01-23 16:12:43 -08005518 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005519 u32 reset_code;
5520
Michael Chanafdc08b2005-08-25 15:34:29 -07005521 /* Calling flush_scheduled_work() may deadlock because
5522 * linkwatch_event() may be on the workqueue and it will try to get
5523 * the rtnl_lock which we are holding.
5524 */
5525 while (bp->in_reset_task)
5526 msleep(1);
5527
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005528 bnx2_disable_int_sync(bp);
5529 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005530 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005531 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07005532 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005533 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005534 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5535 else
5536 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5537 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005538 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005539 bnx2_free_skbs(bp);
5540 bnx2_free_mem(bp);
5541 bp->link_up = 0;
5542 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005543 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07005544 return 0;
5545}
5546
5547#define GET_NET_STATS64(ctr) \
5548 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5549 (unsigned long) (ctr##_lo)
5550
5551#define GET_NET_STATS32(ctr) \
5552 (ctr##_lo)
5553
5554#if (BITS_PER_LONG == 64)
5555#define GET_NET_STATS GET_NET_STATS64
5556#else
5557#define GET_NET_STATS GET_NET_STATS32
5558#endif
5559
5560static struct net_device_stats *
5561bnx2_get_stats(struct net_device *dev)
5562{
Michael Chan972ec0d2006-01-23 16:12:43 -08005563 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005564 struct statistics_block *stats_blk = bp->stats_blk;
5565 struct net_device_stats *net_stats = &bp->net_stats;
5566
5567 if (bp->stats_blk == NULL) {
5568 return net_stats;
5569 }
5570 net_stats->rx_packets =
5571 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5572 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5573 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5574
5575 net_stats->tx_packets =
5576 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5577 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5578 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5579
5580 net_stats->rx_bytes =
5581 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5582
5583 net_stats->tx_bytes =
5584 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5585
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005586 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005587 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5588
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005589 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005590 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5591
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005592 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005593 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5594 stats_blk->stat_EtherStatsOverrsizePkts);
5595
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005596 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005597 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5598
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005599 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005600 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5601
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005602 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005603 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5604
5605 net_stats->rx_errors = net_stats->rx_length_errors +
5606 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5607 net_stats->rx_crc_errors;
5608
5609 net_stats->tx_aborted_errors =
5610 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5611 stats_blk->stat_Dot3StatsLateCollisions);
5612
Michael Chan5b0c76a2005-11-04 08:45:49 -08005613 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5614 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005615 net_stats->tx_carrier_errors = 0;
5616 else {
5617 net_stats->tx_carrier_errors =
5618 (unsigned long)
5619 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5620 }
5621
5622 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005623 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005624 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5625 +
5626 net_stats->tx_aborted_errors +
5627 net_stats->tx_carrier_errors;
5628
Michael Chancea94db2006-06-12 22:16:13 -07005629 net_stats->rx_missed_errors =
5630 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5631 stats_blk->stat_FwRxDrop);
5632
Michael Chanb6016b72005-05-26 13:03:09 -07005633 return net_stats;
5634}
5635
5636/* All ethtool functions called with rtnl_lock */
5637
5638static int
5639bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5640{
Michael Chan972ec0d2006-01-23 16:12:43 -08005641 struct bnx2 *bp = netdev_priv(dev);
Michael Chan7b6b8342007-07-07 22:50:15 -07005642 int support_serdes = 0, support_copper = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005643
5644 cmd->supported = SUPPORTED_Autoneg;
Michael Chan7b6b8342007-07-07 22:50:15 -07005645 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5646 support_serdes = 1;
5647 support_copper = 1;
5648 } else if (bp->phy_port == PORT_FIBRE)
5649 support_serdes = 1;
5650 else
5651 support_copper = 1;
5652
5653 if (support_serdes) {
Michael Chanb6016b72005-05-26 13:03:09 -07005654 cmd->supported |= SUPPORTED_1000baseT_Full |
5655 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005656 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5657 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005658
Michael Chanb6016b72005-05-26 13:03:09 -07005659 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005660 if (support_copper) {
Michael Chanb6016b72005-05-26 13:03:09 -07005661 cmd->supported |= SUPPORTED_10baseT_Half |
5662 SUPPORTED_10baseT_Full |
5663 SUPPORTED_100baseT_Half |
5664 SUPPORTED_100baseT_Full |
5665 SUPPORTED_1000baseT_Full |
5666 SUPPORTED_TP;
5667
Michael Chanb6016b72005-05-26 13:03:09 -07005668 }
5669
Michael Chan7b6b8342007-07-07 22:50:15 -07005670 spin_lock_bh(&bp->phy_lock);
5671 cmd->port = bp->phy_port;
Michael Chanb6016b72005-05-26 13:03:09 -07005672 cmd->advertising = bp->advertising;
5673
5674 if (bp->autoneg & AUTONEG_SPEED) {
5675 cmd->autoneg = AUTONEG_ENABLE;
5676 }
5677 else {
5678 cmd->autoneg = AUTONEG_DISABLE;
5679 }
5680
5681 if (netif_carrier_ok(dev)) {
5682 cmd->speed = bp->line_speed;
5683 cmd->duplex = bp->duplex;
5684 }
5685 else {
5686 cmd->speed = -1;
5687 cmd->duplex = -1;
5688 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005689 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005690
5691 cmd->transceiver = XCVR_INTERNAL;
5692 cmd->phy_address = bp->phy_addr;
5693
5694 return 0;
5695}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005696
Michael Chanb6016b72005-05-26 13:03:09 -07005697static int
5698bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5699{
Michael Chan972ec0d2006-01-23 16:12:43 -08005700 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005701 u8 autoneg = bp->autoneg;
5702 u8 req_duplex = bp->req_duplex;
5703 u16 req_line_speed = bp->req_line_speed;
5704 u32 advertising = bp->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005705 int err = -EINVAL;
5706
5707 spin_lock_bh(&bp->phy_lock);
5708
5709 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5710 goto err_out_unlock;
5711
5712 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5713 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005714
5715 if (cmd->autoneg == AUTONEG_ENABLE) {
5716 autoneg |= AUTONEG_SPEED;
5717
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005718 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005719
5720 /* allow advertising 1 speed */
5721 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5722 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5723 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5724 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5725
Michael Chan7b6b8342007-07-07 22:50:15 -07005726 if (cmd->port == PORT_FIBRE)
5727 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005728
5729 advertising = cmd->advertising;
5730
Michael Chan27a005b2007-05-03 13:23:41 -07005731 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
Michael Chan7b6b8342007-07-07 22:50:15 -07005732 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5733 (cmd->port == PORT_TP))
5734 goto err_out_unlock;
5735 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07005736 advertising = cmd->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005737 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5738 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005739 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005740 if (cmd->port == PORT_FIBRE)
Michael Chanb6016b72005-05-26 13:03:09 -07005741 advertising = ETHTOOL_ALL_FIBRE_SPEED;
Michael Chan7b6b8342007-07-07 22:50:15 -07005742 else
Michael Chanb6016b72005-05-26 13:03:09 -07005743 advertising = ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005744 }
5745 advertising |= ADVERTISED_Autoneg;
5746 }
5747 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005748 if (cmd->port == PORT_FIBRE) {
Michael Chan80be4432006-11-19 14:07:28 -08005749 if ((cmd->speed != SPEED_1000 &&
5750 cmd->speed != SPEED_2500) ||
5751 (cmd->duplex != DUPLEX_FULL))
Michael Chan7b6b8342007-07-07 22:50:15 -07005752 goto err_out_unlock;
Michael Chan80be4432006-11-19 14:07:28 -08005753
5754 if (cmd->speed == SPEED_2500 &&
5755 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
Michael Chan7b6b8342007-07-07 22:50:15 -07005756 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005757 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005758 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5759 goto err_out_unlock;
5760
Michael Chanb6016b72005-05-26 13:03:09 -07005761 autoneg &= ~AUTONEG_SPEED;
5762 req_line_speed = cmd->speed;
5763 req_duplex = cmd->duplex;
5764 advertising = 0;
5765 }
5766
5767 bp->autoneg = autoneg;
5768 bp->advertising = advertising;
5769 bp->req_line_speed = req_line_speed;
5770 bp->req_duplex = req_duplex;
5771
Michael Chan7b6b8342007-07-07 22:50:15 -07005772 err = bnx2_setup_phy(bp, cmd->port);
Michael Chanb6016b72005-05-26 13:03:09 -07005773
Michael Chan7b6b8342007-07-07 22:50:15 -07005774err_out_unlock:
Michael Chanc770a652005-08-25 15:38:39 -07005775 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005776
Michael Chan7b6b8342007-07-07 22:50:15 -07005777 return err;
Michael Chanb6016b72005-05-26 13:03:09 -07005778}
5779
5780static void
5781bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5782{
Michael Chan972ec0d2006-01-23 16:12:43 -08005783 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005784
5785 strcpy(info->driver, DRV_MODULE_NAME);
5786 strcpy(info->version, DRV_MODULE_VERSION);
5787 strcpy(info->bus_info, pci_name(bp->pdev));
Michael Chan58fc2ea2007-07-07 22:52:02 -07005788 strcpy(info->fw_version, bp->fw_version);
Michael Chanb6016b72005-05-26 13:03:09 -07005789}
5790
Michael Chan244ac4f2006-03-20 17:48:46 -08005791#define BNX2_REGDUMP_LEN (32 * 1024)
5792
5793static int
5794bnx2_get_regs_len(struct net_device *dev)
5795{
5796 return BNX2_REGDUMP_LEN;
5797}
5798
5799static void
5800bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5801{
5802 u32 *p = _p, i, offset;
5803 u8 *orig_p = _p;
5804 struct bnx2 *bp = netdev_priv(dev);
5805 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5806 0x0800, 0x0880, 0x0c00, 0x0c10,
5807 0x0c30, 0x0d08, 0x1000, 0x101c,
5808 0x1040, 0x1048, 0x1080, 0x10a4,
5809 0x1400, 0x1490, 0x1498, 0x14f0,
5810 0x1500, 0x155c, 0x1580, 0x15dc,
5811 0x1600, 0x1658, 0x1680, 0x16d8,
5812 0x1800, 0x1820, 0x1840, 0x1854,
5813 0x1880, 0x1894, 0x1900, 0x1984,
5814 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5815 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5816 0x2000, 0x2030, 0x23c0, 0x2400,
5817 0x2800, 0x2820, 0x2830, 0x2850,
5818 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5819 0x3c00, 0x3c94, 0x4000, 0x4010,
5820 0x4080, 0x4090, 0x43c0, 0x4458,
5821 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5822 0x4fc0, 0x5010, 0x53c0, 0x5444,
5823 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5824 0x5fc0, 0x6000, 0x6400, 0x6428,
5825 0x6800, 0x6848, 0x684c, 0x6860,
5826 0x6888, 0x6910, 0x8000 };
5827
5828 regs->version = 0;
5829
5830 memset(p, 0, BNX2_REGDUMP_LEN);
5831
5832 if (!netif_running(bp->dev))
5833 return;
5834
5835 i = 0;
5836 offset = reg_boundaries[0];
5837 p += offset;
5838 while (offset < BNX2_REGDUMP_LEN) {
5839 *p++ = REG_RD(bp, offset);
5840 offset += 4;
5841 if (offset == reg_boundaries[i + 1]) {
5842 offset = reg_boundaries[i + 2];
5843 p = (u32 *) (orig_p + offset);
5844 i += 2;
5845 }
5846 }
5847}
5848
Michael Chanb6016b72005-05-26 13:03:09 -07005849static void
5850bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5851{
Michael Chan972ec0d2006-01-23 16:12:43 -08005852 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005853
5854 if (bp->flags & NO_WOL_FLAG) {
5855 wol->supported = 0;
5856 wol->wolopts = 0;
5857 }
5858 else {
5859 wol->supported = WAKE_MAGIC;
5860 if (bp->wol)
5861 wol->wolopts = WAKE_MAGIC;
5862 else
5863 wol->wolopts = 0;
5864 }
5865 memset(&wol->sopass, 0, sizeof(wol->sopass));
5866}
5867
5868static int
5869bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5870{
Michael Chan972ec0d2006-01-23 16:12:43 -08005871 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005872
5873 if (wol->wolopts & ~WAKE_MAGIC)
5874 return -EINVAL;
5875
5876 if (wol->wolopts & WAKE_MAGIC) {
5877 if (bp->flags & NO_WOL_FLAG)
5878 return -EINVAL;
5879
5880 bp->wol = 1;
5881 }
5882 else {
5883 bp->wol = 0;
5884 }
5885 return 0;
5886}
5887
5888static int
5889bnx2_nway_reset(struct net_device *dev)
5890{
Michael Chan972ec0d2006-01-23 16:12:43 -08005891 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005892 u32 bmcr;
5893
5894 if (!(bp->autoneg & AUTONEG_SPEED)) {
5895 return -EINVAL;
5896 }
5897
Michael Chanc770a652005-08-25 15:38:39 -07005898 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005899
Michael Chan7b6b8342007-07-07 22:50:15 -07005900 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5901 int rc;
5902
5903 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5904 spin_unlock_bh(&bp->phy_lock);
5905 return rc;
5906 }
5907
Michael Chanb6016b72005-05-26 13:03:09 -07005908 /* Force a link down visible on the other side */
5909 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005910 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005911 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005912
5913 msleep(20);
5914
Michael Chanc770a652005-08-25 15:38:39 -07005915 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005916
5917 bp->current_interval = SERDES_AN_TIMEOUT;
5918 bp->serdes_an_pending = 1;
5919 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005920 }
5921
Michael Chanca58c3a2007-05-03 13:22:52 -07005922 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005923 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005924 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005925
Michael Chanc770a652005-08-25 15:38:39 -07005926 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005927
5928 return 0;
5929}
5930
5931static int
5932bnx2_get_eeprom_len(struct net_device *dev)
5933{
Michael Chan972ec0d2006-01-23 16:12:43 -08005934 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005935
Michael Chan1122db72006-01-23 16:11:42 -08005936 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005937 return 0;
5938
Michael Chan1122db72006-01-23 16:11:42 -08005939 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005940}
5941
5942static int
5943bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5944 u8 *eebuf)
5945{
Michael Chan972ec0d2006-01-23 16:12:43 -08005946 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005947 int rc;
5948
John W. Linville1064e942005-11-10 12:58:24 -08005949 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005950
5951 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5952
5953 return rc;
5954}
5955
5956static int
5957bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5958 u8 *eebuf)
5959{
Michael Chan972ec0d2006-01-23 16:12:43 -08005960 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005961 int rc;
5962
John W. Linville1064e942005-11-10 12:58:24 -08005963 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005964
5965 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5966
5967 return rc;
5968}
5969
5970static int
5971bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5972{
Michael Chan972ec0d2006-01-23 16:12:43 -08005973 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005974
5975 memset(coal, 0, sizeof(struct ethtool_coalesce));
5976
5977 coal->rx_coalesce_usecs = bp->rx_ticks;
5978 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5979 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5980 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5981
5982 coal->tx_coalesce_usecs = bp->tx_ticks;
5983 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5984 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5985 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5986
5987 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5988
5989 return 0;
5990}
5991
5992static int
5993bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5994{
Michael Chan972ec0d2006-01-23 16:12:43 -08005995 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005996
5997 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5998 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5999
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006000 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07006001 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6002
6003 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6004 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6005
6006 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6007 if (bp->rx_quick_cons_trip_int > 0xff)
6008 bp->rx_quick_cons_trip_int = 0xff;
6009
6010 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6011 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6012
6013 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6014 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6015
6016 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6017 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6018
6019 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6020 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6021 0xff;
6022
6023 bp->stats_ticks = coal->stats_block_coalesce_usecs;
Michael Chan02537b062007-06-04 21:24:07 -07006024 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6025 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6026 bp->stats_ticks = USEC_PER_SEC;
6027 }
Michael Chan7ea69202007-07-16 18:27:10 -07006028 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6029 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6030 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07006031
6032 if (netif_running(bp->dev)) {
6033 bnx2_netif_stop(bp);
6034 bnx2_init_nic(bp);
6035 bnx2_netif_start(bp);
6036 }
6037
6038 return 0;
6039}
6040
6041static void
6042bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6043{
Michael Chan972ec0d2006-01-23 16:12:43 -08006044 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006045
Michael Chan13daffa2006-03-20 17:49:20 -08006046 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07006047 ering->rx_mini_max_pending = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08006048 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07006049
6050 ering->rx_pending = bp->rx_ring_size;
6051 ering->rx_mini_pending = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08006052 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
Michael Chanb6016b72005-05-26 13:03:09 -07006053
6054 ering->tx_max_pending = MAX_TX_DESC_CNT;
6055 ering->tx_pending = bp->tx_ring_size;
6056}
6057
6058static int
Michael Chan5d5d0012007-12-12 11:17:43 -08006059bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
Michael Chanb6016b72005-05-26 13:03:09 -07006060{
Michael Chan13daffa2006-03-20 17:49:20 -08006061 if (netif_running(bp->dev)) {
6062 bnx2_netif_stop(bp);
6063 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6064 bnx2_free_skbs(bp);
6065 bnx2_free_mem(bp);
6066 }
6067
Michael Chan5d5d0012007-12-12 11:17:43 -08006068 bnx2_set_rx_ring_size(bp, rx);
6069 bp->tx_ring_size = tx;
Michael Chanb6016b72005-05-26 13:03:09 -07006070
6071 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08006072 int rc;
6073
6074 rc = bnx2_alloc_mem(bp);
6075 if (rc)
6076 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07006077 bnx2_init_nic(bp);
6078 bnx2_netif_start(bp);
6079 }
Michael Chanb6016b72005-05-26 13:03:09 -07006080 return 0;
6081}
6082
Michael Chan5d5d0012007-12-12 11:17:43 -08006083static int
6084bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6085{
6086 struct bnx2 *bp = netdev_priv(dev);
6087 int rc;
6088
6089 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6090 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6091 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6092
6093 return -EINVAL;
6094 }
6095 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6096 return rc;
6097}
6098
Michael Chanb6016b72005-05-26 13:03:09 -07006099static void
6100bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6101{
Michael Chan972ec0d2006-01-23 16:12:43 -08006102 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006103
6104 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6105 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6106 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6107}
6108
6109static int
6110bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6111{
Michael Chan972ec0d2006-01-23 16:12:43 -08006112 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006113
6114 bp->req_flow_ctrl = 0;
6115 if (epause->rx_pause)
6116 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6117 if (epause->tx_pause)
6118 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6119
6120 if (epause->autoneg) {
6121 bp->autoneg |= AUTONEG_FLOW_CTRL;
6122 }
6123 else {
6124 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6125 }
6126
Michael Chanc770a652005-08-25 15:38:39 -07006127 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006128
Michael Chan0d8a6572007-07-07 22:49:43 -07006129 bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07006130
Michael Chanc770a652005-08-25 15:38:39 -07006131 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006132
6133 return 0;
6134}
6135
6136static u32
6137bnx2_get_rx_csum(struct net_device *dev)
6138{
Michael Chan972ec0d2006-01-23 16:12:43 -08006139 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006140
6141 return bp->rx_csum;
6142}
6143
6144static int
6145bnx2_set_rx_csum(struct net_device *dev, u32 data)
6146{
Michael Chan972ec0d2006-01-23 16:12:43 -08006147 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006148
6149 bp->rx_csum = data;
6150 return 0;
6151}
6152
Michael Chanb11d6212006-06-29 12:31:21 -07006153static int
6154bnx2_set_tso(struct net_device *dev, u32 data)
6155{
Michael Chan4666f872007-05-03 13:22:28 -07006156 struct bnx2 *bp = netdev_priv(dev);
6157
6158 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07006159 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006160 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6161 dev->features |= NETIF_F_TSO6;
6162 } else
6163 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6164 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07006165 return 0;
6166}
6167
Michael Chancea94db2006-06-12 22:16:13 -07006168#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07006169
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006170static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006171 char string[ETH_GSTRING_LEN];
6172} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6173 { "rx_bytes" },
6174 { "rx_error_bytes" },
6175 { "tx_bytes" },
6176 { "tx_error_bytes" },
6177 { "rx_ucast_packets" },
6178 { "rx_mcast_packets" },
6179 { "rx_bcast_packets" },
6180 { "tx_ucast_packets" },
6181 { "tx_mcast_packets" },
6182 { "tx_bcast_packets" },
6183 { "tx_mac_errors" },
6184 { "tx_carrier_errors" },
6185 { "rx_crc_errors" },
6186 { "rx_align_errors" },
6187 { "tx_single_collisions" },
6188 { "tx_multi_collisions" },
6189 { "tx_deferred" },
6190 { "tx_excess_collisions" },
6191 { "tx_late_collisions" },
6192 { "tx_total_collisions" },
6193 { "rx_fragments" },
6194 { "rx_jabbers" },
6195 { "rx_undersize_packets" },
6196 { "rx_oversize_packets" },
6197 { "rx_64_byte_packets" },
6198 { "rx_65_to_127_byte_packets" },
6199 { "rx_128_to_255_byte_packets" },
6200 { "rx_256_to_511_byte_packets" },
6201 { "rx_512_to_1023_byte_packets" },
6202 { "rx_1024_to_1522_byte_packets" },
6203 { "rx_1523_to_9022_byte_packets" },
6204 { "tx_64_byte_packets" },
6205 { "tx_65_to_127_byte_packets" },
6206 { "tx_128_to_255_byte_packets" },
6207 { "tx_256_to_511_byte_packets" },
6208 { "tx_512_to_1023_byte_packets" },
6209 { "tx_1024_to_1522_byte_packets" },
6210 { "tx_1523_to_9022_byte_packets" },
6211 { "rx_xon_frames" },
6212 { "rx_xoff_frames" },
6213 { "tx_xon_frames" },
6214 { "tx_xoff_frames" },
6215 { "rx_mac_ctrl_frames" },
6216 { "rx_filtered_packets" },
6217 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07006218 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07006219};
6220
6221#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6222
Arjan van de Venf71e1302006-03-03 21:33:57 -05006223static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006224 STATS_OFFSET32(stat_IfHCInOctets_hi),
6225 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6226 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6227 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6228 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6229 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6230 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6231 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6232 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6233 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6234 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006235 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6236 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6237 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6238 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6239 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6240 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6241 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6242 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6243 STATS_OFFSET32(stat_EtherStatsCollisions),
6244 STATS_OFFSET32(stat_EtherStatsFragments),
6245 STATS_OFFSET32(stat_EtherStatsJabbers),
6246 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6247 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6248 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6249 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6250 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6251 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6252 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6253 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6254 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6255 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6256 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6257 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6258 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6259 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6260 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6261 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6262 STATS_OFFSET32(stat_XonPauseFramesReceived),
6263 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6264 STATS_OFFSET32(stat_OutXonSent),
6265 STATS_OFFSET32(stat_OutXoffSent),
6266 STATS_OFFSET32(stat_MacControlFramesReceived),
6267 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6268 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07006269 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07006270};
6271
6272/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6273 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006274 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006275static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006276 8,0,8,8,8,8,8,8,8,8,
6277 4,0,4,4,4,4,4,4,4,4,
6278 4,4,4,4,4,4,4,4,4,4,
6279 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006280 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07006281};
6282
Michael Chan5b0c76a2005-11-04 08:45:49 -08006283static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6284 8,0,8,8,8,8,8,8,8,8,
6285 4,4,4,4,4,4,4,4,4,4,
6286 4,4,4,4,4,4,4,4,4,4,
6287 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006288 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08006289};
6290
Michael Chanb6016b72005-05-26 13:03:09 -07006291#define BNX2_NUM_TESTS 6
6292
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006293static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006294 char string[ETH_GSTRING_LEN];
6295} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6296 { "register_test (offline)" },
6297 { "memory_test (offline)" },
6298 { "loopback_test (offline)" },
6299 { "nvram_test (online)" },
6300 { "interrupt_test (online)" },
6301 { "link_test (online)" },
6302};
6303
6304static int
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006305bnx2_get_sset_count(struct net_device *dev, int sset)
Michael Chanb6016b72005-05-26 13:03:09 -07006306{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006307 switch (sset) {
6308 case ETH_SS_TEST:
6309 return BNX2_NUM_TESTS;
6310 case ETH_SS_STATS:
6311 return BNX2_NUM_STATS;
6312 default:
6313 return -EOPNOTSUPP;
6314 }
Michael Chanb6016b72005-05-26 13:03:09 -07006315}
6316
6317static void
6318bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6319{
Michael Chan972ec0d2006-01-23 16:12:43 -08006320 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006321
6322 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6323 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08006324 int i;
6325
Michael Chanb6016b72005-05-26 13:03:09 -07006326 bnx2_netif_stop(bp);
6327 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6328 bnx2_free_skbs(bp);
6329
6330 if (bnx2_test_registers(bp) != 0) {
6331 buf[0] = 1;
6332 etest->flags |= ETH_TEST_FL_FAILED;
6333 }
6334 if (bnx2_test_memory(bp) != 0) {
6335 buf[1] = 1;
6336 etest->flags |= ETH_TEST_FL_FAILED;
6337 }
Michael Chanbc5a0692006-01-23 16:13:22 -08006338 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07006339 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07006340
6341 if (!netif_running(bp->dev)) {
6342 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6343 }
6344 else {
6345 bnx2_init_nic(bp);
6346 bnx2_netif_start(bp);
6347 }
6348
6349 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08006350 for (i = 0; i < 7; i++) {
6351 if (bp->link_up)
6352 break;
6353 msleep_interruptible(1000);
6354 }
Michael Chanb6016b72005-05-26 13:03:09 -07006355 }
6356
6357 if (bnx2_test_nvram(bp) != 0) {
6358 buf[3] = 1;
6359 etest->flags |= ETH_TEST_FL_FAILED;
6360 }
6361 if (bnx2_test_intr(bp) != 0) {
6362 buf[4] = 1;
6363 etest->flags |= ETH_TEST_FL_FAILED;
6364 }
6365
6366 if (bnx2_test_link(bp) != 0) {
6367 buf[5] = 1;
6368 etest->flags |= ETH_TEST_FL_FAILED;
6369
6370 }
6371}
6372
6373static void
6374bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6375{
6376 switch (stringset) {
6377 case ETH_SS_STATS:
6378 memcpy(buf, bnx2_stats_str_arr,
6379 sizeof(bnx2_stats_str_arr));
6380 break;
6381 case ETH_SS_TEST:
6382 memcpy(buf, bnx2_tests_str_arr,
6383 sizeof(bnx2_tests_str_arr));
6384 break;
6385 }
6386}
6387
Michael Chanb6016b72005-05-26 13:03:09 -07006388static void
6389bnx2_get_ethtool_stats(struct net_device *dev,
6390 struct ethtool_stats *stats, u64 *buf)
6391{
Michael Chan972ec0d2006-01-23 16:12:43 -08006392 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006393 int i;
6394 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006395 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006396
6397 if (hw_stats == NULL) {
6398 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6399 return;
6400 }
6401
Michael Chan5b0c76a2005-11-04 08:45:49 -08006402 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6403 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6404 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6405 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07006406 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006407 else
6408 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07006409
6410 for (i = 0; i < BNX2_NUM_STATS; i++) {
6411 if (stats_len_arr[i] == 0) {
6412 /* skip this counter */
6413 buf[i] = 0;
6414 continue;
6415 }
6416 if (stats_len_arr[i] == 4) {
6417 /* 4-byte counter */
6418 buf[i] = (u64)
6419 *(hw_stats + bnx2_stats_offset_arr[i]);
6420 continue;
6421 }
6422 /* 8-byte counter */
6423 buf[i] = (((u64) *(hw_stats +
6424 bnx2_stats_offset_arr[i])) << 32) +
6425 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6426 }
6427}
6428
6429static int
6430bnx2_phys_id(struct net_device *dev, u32 data)
6431{
Michael Chan972ec0d2006-01-23 16:12:43 -08006432 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006433 int i;
6434 u32 save;
6435
6436 if (data == 0)
6437 data = 2;
6438
6439 save = REG_RD(bp, BNX2_MISC_CFG);
6440 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6441
6442 for (i = 0; i < (data * 2); i++) {
6443 if ((i % 2) == 0) {
6444 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6445 }
6446 else {
6447 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6448 BNX2_EMAC_LED_1000MB_OVERRIDE |
6449 BNX2_EMAC_LED_100MB_OVERRIDE |
6450 BNX2_EMAC_LED_10MB_OVERRIDE |
6451 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6452 BNX2_EMAC_LED_TRAFFIC);
6453 }
6454 msleep_interruptible(500);
6455 if (signal_pending(current))
6456 break;
6457 }
6458 REG_WR(bp, BNX2_EMAC_LED, 0);
6459 REG_WR(bp, BNX2_MISC_CFG, save);
6460 return 0;
6461}
6462
Michael Chan4666f872007-05-03 13:22:28 -07006463static int
6464bnx2_set_tx_csum(struct net_device *dev, u32 data)
6465{
6466 struct bnx2 *bp = netdev_priv(dev);
6467
6468 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Michael Chan6460d942007-07-14 19:07:52 -07006469 return (ethtool_op_set_tx_ipv6_csum(dev, data));
Michael Chan4666f872007-05-03 13:22:28 -07006470 else
6471 return (ethtool_op_set_tx_csum(dev, data));
6472}
6473
Jeff Garzik7282d492006-09-13 14:30:00 -04006474static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07006475 .get_settings = bnx2_get_settings,
6476 .set_settings = bnx2_set_settings,
6477 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08006478 .get_regs_len = bnx2_get_regs_len,
6479 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07006480 .get_wol = bnx2_get_wol,
6481 .set_wol = bnx2_set_wol,
6482 .nway_reset = bnx2_nway_reset,
6483 .get_link = ethtool_op_get_link,
6484 .get_eeprom_len = bnx2_get_eeprom_len,
6485 .get_eeprom = bnx2_get_eeprom,
6486 .set_eeprom = bnx2_set_eeprom,
6487 .get_coalesce = bnx2_get_coalesce,
6488 .set_coalesce = bnx2_set_coalesce,
6489 .get_ringparam = bnx2_get_ringparam,
6490 .set_ringparam = bnx2_set_ringparam,
6491 .get_pauseparam = bnx2_get_pauseparam,
6492 .set_pauseparam = bnx2_set_pauseparam,
6493 .get_rx_csum = bnx2_get_rx_csum,
6494 .set_rx_csum = bnx2_set_rx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07006495 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07006496 .set_sg = ethtool_op_set_sg,
Michael Chanb11d6212006-06-29 12:31:21 -07006497 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07006498 .self_test = bnx2_self_test,
6499 .get_strings = bnx2_get_strings,
6500 .phys_id = bnx2_phys_id,
Michael Chanb6016b72005-05-26 13:03:09 -07006501 .get_ethtool_stats = bnx2_get_ethtool_stats,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006502 .get_sset_count = bnx2_get_sset_count,
Michael Chanb6016b72005-05-26 13:03:09 -07006503};
6504
6505/* Called with rtnl_lock */
6506static int
6507bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6508{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006509 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08006510 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006511 int err;
6512
6513 switch(cmd) {
6514 case SIOCGMIIPHY:
6515 data->phy_id = bp->phy_addr;
6516
6517 /* fallthru */
6518 case SIOCGMIIREG: {
6519 u32 mii_regval;
6520
Michael Chan7b6b8342007-07-07 22:50:15 -07006521 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6522 return -EOPNOTSUPP;
6523
Michael Chandad3e452007-05-03 13:18:03 -07006524 if (!netif_running(dev))
6525 return -EAGAIN;
6526
Michael Chanc770a652005-08-25 15:38:39 -07006527 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006528 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07006529 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006530
6531 data->val_out = mii_regval;
6532
6533 return err;
6534 }
6535
6536 case SIOCSMIIREG:
6537 if (!capable(CAP_NET_ADMIN))
6538 return -EPERM;
6539
Michael Chan7b6b8342007-07-07 22:50:15 -07006540 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6541 return -EOPNOTSUPP;
6542
Michael Chandad3e452007-05-03 13:18:03 -07006543 if (!netif_running(dev))
6544 return -EAGAIN;
6545
Michael Chanc770a652005-08-25 15:38:39 -07006546 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006547 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07006548 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006549
6550 return err;
6551
6552 default:
6553 /* do nothing */
6554 break;
6555 }
6556 return -EOPNOTSUPP;
6557}
6558
6559/* Called with rtnl_lock */
6560static int
6561bnx2_change_mac_addr(struct net_device *dev, void *p)
6562{
6563 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08006564 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006565
Michael Chan73eef4c2005-08-25 15:39:15 -07006566 if (!is_valid_ether_addr(addr->sa_data))
6567 return -EINVAL;
6568
Michael Chanb6016b72005-05-26 13:03:09 -07006569 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6570 if (netif_running(dev))
6571 bnx2_set_mac_addr(bp);
6572
6573 return 0;
6574}
6575
6576/* Called with rtnl_lock */
6577static int
6578bnx2_change_mtu(struct net_device *dev, int new_mtu)
6579{
Michael Chan972ec0d2006-01-23 16:12:43 -08006580 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006581
6582 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6583 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6584 return -EINVAL;
6585
6586 dev->mtu = new_mtu;
Michael Chan5d5d0012007-12-12 11:17:43 -08006587 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
Michael Chanb6016b72005-05-26 13:03:09 -07006588}
6589
6590#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6591static void
6592poll_bnx2(struct net_device *dev)
6593{
Michael Chan972ec0d2006-01-23 16:12:43 -08006594 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006595
6596 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006597 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006598 enable_irq(bp->pdev->irq);
6599}
6600#endif
6601
Michael Chan253c8b72007-01-08 19:56:01 -08006602static void __devinit
6603bnx2_get_5709_media(struct bnx2 *bp)
6604{
6605 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6606 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6607 u32 strap;
6608
6609 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6610 return;
6611 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6612 bp->phy_flags |= PHY_SERDES_FLAG;
6613 return;
6614 }
6615
6616 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6617 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6618 else
6619 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6620
6621 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6622 switch (strap) {
6623 case 0x4:
6624 case 0x5:
6625 case 0x6:
6626 bp->phy_flags |= PHY_SERDES_FLAG;
6627 return;
6628 }
6629 } else {
6630 switch (strap) {
6631 case 0x1:
6632 case 0x2:
6633 case 0x4:
6634 bp->phy_flags |= PHY_SERDES_FLAG;
6635 return;
6636 }
6637 }
6638}
6639
Michael Chan883e5152007-05-03 13:25:11 -07006640static void __devinit
6641bnx2_get_pci_speed(struct bnx2 *bp)
6642{
6643 u32 reg;
6644
6645 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6646 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6647 u32 clkreg;
6648
6649 bp->flags |= PCIX_FLAG;
6650
6651 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6652
6653 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6654 switch (clkreg) {
6655 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6656 bp->bus_speed_mhz = 133;
6657 break;
6658
6659 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6660 bp->bus_speed_mhz = 100;
6661 break;
6662
6663 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6664 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6665 bp->bus_speed_mhz = 66;
6666 break;
6667
6668 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6669 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6670 bp->bus_speed_mhz = 50;
6671 break;
6672
6673 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6674 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6675 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6676 bp->bus_speed_mhz = 33;
6677 break;
6678 }
6679 }
6680 else {
6681 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6682 bp->bus_speed_mhz = 66;
6683 else
6684 bp->bus_speed_mhz = 33;
6685 }
6686
6687 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6688 bp->flags |= PCI_32BIT_FLAG;
6689
6690}
6691
Michael Chanb6016b72005-05-26 13:03:09 -07006692static int __devinit
6693bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6694{
6695 struct bnx2 *bp;
6696 unsigned long mem_len;
Michael Chan58fc2ea2007-07-07 22:52:02 -07006697 int rc, i, j;
Michael Chanb6016b72005-05-26 13:03:09 -07006698 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006699 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006700
Michael Chanb6016b72005-05-26 13:03:09 -07006701 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006702 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006703
6704 bp->flags = 0;
6705 bp->phy_flags = 0;
6706
6707 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6708 rc = pci_enable_device(pdev);
6709 if (rc) {
Joe Perches898eb712007-10-18 03:06:30 -07006710 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006711 goto err_out;
6712 }
6713
6714 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006715 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006716 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006717 rc = -ENODEV;
6718 goto err_out_disable;
6719 }
6720
6721 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6722 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006723 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006724 goto err_out_disable;
6725 }
6726
6727 pci_set_master(pdev);
6728
6729 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6730 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006731 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006732 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006733 rc = -EIO;
6734 goto err_out_release;
6735 }
6736
Michael Chanb6016b72005-05-26 13:03:09 -07006737 bp->dev = dev;
6738 bp->pdev = pdev;
6739
6740 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006741 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006742 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006743
6744 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006745 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006746 dev->mem_end = dev->mem_start + mem_len;
6747 dev->irq = pdev->irq;
6748
6749 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6750
6751 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006752 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006753 rc = -ENOMEM;
6754 goto err_out_release;
6755 }
6756
6757 /* Configure byte swap and enable write to the reg_window registers.
6758 * Rely on CPU to do target byte swapping on big endian systems
6759 * The chip's target access swapping will not swap all accesses
6760 */
6761 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6762 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6763 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6764
Pavel Machek829ca9a2005-09-03 15:56:56 -07006765 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006766
6767 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6768
Michael Chan883e5152007-05-03 13:25:11 -07006769 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6770 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6771 dev_err(&pdev->dev,
6772 "Cannot find PCIE capability, aborting.\n");
6773 rc = -EIO;
6774 goto err_out_unmap;
6775 }
6776 bp->flags |= PCIE_FLAG;
6777 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006778 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6779 if (bp->pcix_cap == 0) {
6780 dev_err(&pdev->dev,
6781 "Cannot find PCIX capability, aborting.\n");
6782 rc = -EIO;
6783 goto err_out_unmap;
6784 }
6785 }
6786
Michael Chan8e6a72c2007-05-03 13:24:48 -07006787 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6788 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6789 bp->flags |= MSI_CAP_FLAG;
6790 }
6791
Michael Chan40453c82007-05-03 13:19:18 -07006792 /* 5708 cannot support DMA addresses > 40-bit. */
6793 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6794 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6795 else
6796 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6797
6798 /* Configure DMA attributes. */
6799 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6800 dev->features |= NETIF_F_HIGHDMA;
6801 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6802 if (rc) {
6803 dev_err(&pdev->dev,
6804 "pci_set_consistent_dma_mask failed, aborting.\n");
6805 goto err_out_unmap;
6806 }
6807 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6808 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6809 goto err_out_unmap;
6810 }
6811
Michael Chan883e5152007-05-03 13:25:11 -07006812 if (!(bp->flags & PCIE_FLAG))
6813 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006814
6815 /* 5706A0 may falsely detect SERR and PERR. */
6816 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6817 reg = REG_RD(bp, PCI_COMMAND);
6818 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6819 REG_WR(bp, PCI_COMMAND, reg);
6820 }
6821 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6822 !(bp->flags & PCIX_FLAG)) {
6823
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006824 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006825 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006826 goto err_out_unmap;
6827 }
6828
6829 bnx2_init_nvram(bp);
6830
Michael Chane3648b32005-11-04 08:51:21 -08006831 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6832
6833 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006834 BNX2_SHM_HDR_SIGNATURE_SIG) {
6835 u32 off = PCI_FUNC(pdev->devfn) << 2;
6836
6837 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6838 } else
Michael Chane3648b32005-11-04 08:51:21 -08006839 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6840
Michael Chanb6016b72005-05-26 13:03:09 -07006841 /* Get the permanent MAC address. First we need to make sure the
6842 * firmware is actually running.
6843 */
Michael Chane3648b32005-11-04 08:51:21 -08006844 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006845
6846 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6847 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006848 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006849 rc = -ENODEV;
6850 goto err_out_unmap;
6851 }
6852
Michael Chan58fc2ea2007-07-07 22:52:02 -07006853 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6854 for (i = 0, j = 0; i < 3; i++) {
6855 u8 num, k, skip0;
6856
6857 num = (u8) (reg >> (24 - (i * 8)));
6858 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6859 if (num >= k || !skip0 || k == 1) {
6860 bp->fw_version[j++] = (num / k) + '0';
6861 skip0 = 0;
6862 }
6863 }
6864 if (i != 2)
6865 bp->fw_version[j++] = '.';
6866 }
Michael Chan846f5c62007-10-10 16:16:51 -07006867 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6868 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6869 bp->wol = 1;
6870
6871 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
Michael Chanc2d3db82007-07-16 18:26:43 -07006872 bp->flags |= ASF_ENABLE_FLAG;
6873
6874 for (i = 0; i < 30; i++) {
6875 reg = REG_RD_IND(bp, bp->shmem_base +
6876 BNX2_BC_STATE_CONDITION);
6877 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6878 break;
6879 msleep(10);
6880 }
6881 }
Michael Chan58fc2ea2007-07-07 22:52:02 -07006882 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6883 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6884 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6885 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6886 int i;
6887 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6888
6889 bp->fw_version[j++] = ' ';
6890 for (i = 0; i < 3; i++) {
6891 reg = REG_RD_IND(bp, addr + i * 4);
6892 reg = swab32(reg);
6893 memcpy(&bp->fw_version[j], &reg, 4);
6894 j += 4;
6895 }
6896 }
Michael Chanb6016b72005-05-26 13:03:09 -07006897
Michael Chane3648b32005-11-04 08:51:21 -08006898 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006899 bp->mac_addr[0] = (u8) (reg >> 8);
6900 bp->mac_addr[1] = (u8) reg;
6901
Michael Chane3648b32005-11-04 08:51:21 -08006902 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006903 bp->mac_addr[2] = (u8) (reg >> 24);
6904 bp->mac_addr[3] = (u8) (reg >> 16);
6905 bp->mac_addr[4] = (u8) (reg >> 8);
6906 bp->mac_addr[5] = (u8) reg;
6907
Michael Chan5d5d0012007-12-12 11:17:43 -08006908 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6909
Michael Chanb6016b72005-05-26 13:03:09 -07006910 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006911 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006912
6913 bp->rx_csum = 1;
6914
Michael Chanb6016b72005-05-26 13:03:09 -07006915 bp->tx_quick_cons_trip_int = 20;
6916 bp->tx_quick_cons_trip = 20;
6917 bp->tx_ticks_int = 80;
6918 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006919
Michael Chanb6016b72005-05-26 13:03:09 -07006920 bp->rx_quick_cons_trip_int = 6;
6921 bp->rx_quick_cons_trip = 6;
6922 bp->rx_ticks_int = 18;
6923 bp->rx_ticks = 18;
6924
Michael Chan7ea69202007-07-16 18:27:10 -07006925 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07006926
6927 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006928 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006929
Michael Chan5b0c76a2005-11-04 08:45:49 -08006930 bp->phy_addr = 1;
6931
Michael Chanb6016b72005-05-26 13:03:09 -07006932 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006933 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6934 bnx2_get_5709_media(bp);
6935 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006936 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006937
Michael Chan0d8a6572007-07-07 22:49:43 -07006938 bp->phy_port = PORT_TP;
Michael Chanbac0dff2006-11-19 14:15:05 -08006939 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07006940 bp->phy_port = PORT_FIBRE;
Michael Chan846f5c62007-10-10 16:16:51 -07006941 reg = REG_RD_IND(bp, bp->shmem_base +
6942 BNX2_SHARED_HW_CFG_CONFIG);
6943 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6944 bp->flags |= NO_WOL_FLAG;
6945 bp->wol = 0;
6946 }
Michael Chanbac0dff2006-11-19 14:15:05 -08006947 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006948 bp->phy_addr = 2;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006949 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6950 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6951 }
Michael Chan0d8a6572007-07-07 22:49:43 -07006952 bnx2_init_remote_phy(bp);
6953
Michael Chan261dd5c2007-01-08 19:55:46 -08006954 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6955 CHIP_NUM(bp) == CHIP_NUM_5708)
6956 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanfb0c18b2007-12-10 17:18:23 -08006957 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6958 (CHIP_REV(bp) == CHIP_REV_Ax ||
6959 CHIP_REV(bp) == CHIP_REV_Bx))
Michael Chanb659f442007-02-02 00:46:35 -08006960 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006961
Michael Chan16088272006-06-12 22:16:43 -07006962 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6963 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
Michael Chan846f5c62007-10-10 16:16:51 -07006964 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chandda1e392006-01-23 16:08:14 -08006965 bp->flags |= NO_WOL_FLAG;
Michael Chan846f5c62007-10-10 16:16:51 -07006966 bp->wol = 0;
6967 }
Michael Chandda1e392006-01-23 16:08:14 -08006968
Michael Chanb6016b72005-05-26 13:03:09 -07006969 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6970 bp->tx_quick_cons_trip_int =
6971 bp->tx_quick_cons_trip;
6972 bp->tx_ticks_int = bp->tx_ticks;
6973 bp->rx_quick_cons_trip_int =
6974 bp->rx_quick_cons_trip;
6975 bp->rx_ticks_int = bp->rx_ticks;
6976 bp->comp_prod_trip_int = bp->comp_prod_trip;
6977 bp->com_ticks_int = bp->com_ticks;
6978 bp->cmd_ticks_int = bp->cmd_ticks;
6979 }
6980
Michael Chanf9317a42006-09-29 17:06:23 -07006981 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6982 *
6983 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6984 * with byte enables disabled on the unused 32-bit word. This is legal
6985 * but causes problems on the AMD 8132 which will eventually stop
6986 * responding after a while.
6987 *
6988 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006989 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006990 */
6991 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6992 struct pci_dev *amd_8132 = NULL;
6993
6994 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6995 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6996 amd_8132))) {
Michael Chanf9317a42006-09-29 17:06:23 -07006997
Auke Kok44c10132007-06-08 15:46:36 -07006998 if (amd_8132->revision >= 0x10 &&
6999 amd_8132->revision <= 0x13) {
Michael Chanf9317a42006-09-29 17:06:23 -07007000 disable_msi = 1;
7001 pci_dev_put(amd_8132);
7002 break;
7003 }
7004 }
7005 }
7006
Michael Chandeaf3912007-07-07 22:48:00 -07007007 bnx2_set_default_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07007008 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7009
Michael Chancd339a02005-08-25 15:35:24 -07007010 init_timer(&bp->timer);
7011 bp->timer.expires = RUN_AT(bp->timer_interval);
7012 bp->timer.data = (unsigned long) bp;
7013 bp->timer.function = bnx2_timer;
7014
Michael Chanb6016b72005-05-26 13:03:09 -07007015 return 0;
7016
7017err_out_unmap:
7018 if (bp->regview) {
7019 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07007020 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07007021 }
7022
7023err_out_release:
7024 pci_release_regions(pdev);
7025
7026err_out_disable:
7027 pci_disable_device(pdev);
7028 pci_set_drvdata(pdev, NULL);
7029
7030err_out:
7031 return rc;
7032}
7033
Michael Chan883e5152007-05-03 13:25:11 -07007034static char * __devinit
7035bnx2_bus_string(struct bnx2 *bp, char *str)
7036{
7037 char *s = str;
7038
7039 if (bp->flags & PCIE_FLAG) {
7040 s += sprintf(s, "PCI Express");
7041 } else {
7042 s += sprintf(s, "PCI");
7043 if (bp->flags & PCIX_FLAG)
7044 s += sprintf(s, "-X");
7045 if (bp->flags & PCI_32BIT_FLAG)
7046 s += sprintf(s, " 32-bit");
7047 else
7048 s += sprintf(s, " 64-bit");
7049 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7050 }
7051 return str;
7052}
7053
Michael Chanb6016b72005-05-26 13:03:09 -07007054static int __devinit
7055bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7056{
7057 static int version_printed = 0;
7058 struct net_device *dev = NULL;
7059 struct bnx2 *bp;
Joe Perches0795af52007-10-03 17:59:30 -07007060 int rc;
Michael Chan883e5152007-05-03 13:25:11 -07007061 char str[40];
Joe Perches0795af52007-10-03 17:59:30 -07007062 DECLARE_MAC_BUF(mac);
Michael Chanb6016b72005-05-26 13:03:09 -07007063
7064 if (version_printed++ == 0)
7065 printk(KERN_INFO "%s", version);
7066
7067 /* dev zeroed in init_etherdev */
7068 dev = alloc_etherdev(sizeof(*bp));
7069
7070 if (!dev)
7071 return -ENOMEM;
7072
7073 rc = bnx2_init_board(pdev, dev);
7074 if (rc < 0) {
7075 free_netdev(dev);
7076 return rc;
7077 }
7078
7079 dev->open = bnx2_open;
7080 dev->hard_start_xmit = bnx2_start_xmit;
7081 dev->stop = bnx2_close;
7082 dev->get_stats = bnx2_get_stats;
7083 dev->set_multicast_list = bnx2_set_rx_mode;
7084 dev->do_ioctl = bnx2_ioctl;
7085 dev->set_mac_address = bnx2_change_mac_addr;
7086 dev->change_mtu = bnx2_change_mtu;
7087 dev->tx_timeout = bnx2_tx_timeout;
7088 dev->watchdog_timeo = TX_TIMEOUT;
7089#ifdef BCM_VLAN
7090 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07007091#endif
Michael Chanb6016b72005-05-26 13:03:09 -07007092 dev->ethtool_ops = &bnx2_ethtool_ops;
Michael Chanb6016b72005-05-26 13:03:09 -07007093
Michael Chan972ec0d2006-01-23 16:12:43 -08007094 bp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007095 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
Michael Chanb6016b72005-05-26 13:03:09 -07007096
7097#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7098 dev->poll_controller = poll_bnx2;
7099#endif
7100
Michael Chan1b2f9222007-05-03 13:20:19 -07007101 pci_set_drvdata(pdev, dev);
7102
7103 memcpy(dev->dev_addr, bp->mac_addr, 6);
7104 memcpy(dev->perm_addr, bp->mac_addr, 6);
7105 bp->name = board_info[ent->driver_data].name;
7106
Stephen Hemmingerd212f872007-06-27 00:47:37 -07007107 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan4666f872007-05-03 13:22:28 -07007108 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Stephen Hemmingerd212f872007-06-27 00:47:37 -07007109 dev->features |= NETIF_F_IPV6_CSUM;
7110
Michael Chan1b2f9222007-05-03 13:20:19 -07007111#ifdef BCM_VLAN
7112 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7113#endif
7114 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07007115 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7116 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07007117
Michael Chanb6016b72005-05-26 13:03:09 -07007118 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04007119 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07007120 if (bp->regview)
7121 iounmap(bp->regview);
7122 pci_release_regions(pdev);
7123 pci_disable_device(pdev);
7124 pci_set_drvdata(pdev, NULL);
7125 free_netdev(dev);
7126 return rc;
7127 }
7128
Michael Chan883e5152007-05-03 13:25:11 -07007129 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Joe Perches0795af52007-10-03 17:59:30 -07007130 "IRQ %d, node addr %s\n",
Michael Chanb6016b72005-05-26 13:03:09 -07007131 dev->name,
7132 bp->name,
7133 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7134 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07007135 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07007136 dev->base_addr,
Joe Perches0795af52007-10-03 17:59:30 -07007137 bp->pdev->irq, print_mac(mac, dev->dev_addr));
Michael Chanb6016b72005-05-26 13:03:09 -07007138
Michael Chanb6016b72005-05-26 13:03:09 -07007139 return 0;
7140}
7141
7142static void __devexit
7143bnx2_remove_one(struct pci_dev *pdev)
7144{
7145 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007146 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007147
Michael Chanafdc08b2005-08-25 15:34:29 -07007148 flush_scheduled_work();
7149
Michael Chanb6016b72005-05-26 13:03:09 -07007150 unregister_netdev(dev);
7151
7152 if (bp->regview)
7153 iounmap(bp->regview);
7154
7155 free_netdev(dev);
7156 pci_release_regions(pdev);
7157 pci_disable_device(pdev);
7158 pci_set_drvdata(pdev, NULL);
7159}
7160
7161static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07007162bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07007163{
7164 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007165 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007166 u32 reset_code;
7167
Michael Chan6caebb02007-08-03 20:57:25 -07007168 /* PCI register 4 needs to be saved whether netif_running() or not.
7169 * MSI address and data need to be saved if using MSI and
7170 * netif_running().
7171 */
7172 pci_save_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07007173 if (!netif_running(dev))
7174 return 0;
7175
Michael Chan1d60290f2006-03-20 17:50:08 -08007176 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07007177 bnx2_netif_stop(bp);
7178 netif_device_detach(dev);
7179 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08007180 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07007181 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08007182 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07007183 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7184 else
7185 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7186 bnx2_reset_chip(bp, reset_code);
7187 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07007188 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07007189 return 0;
7190}
7191
7192static int
7193bnx2_resume(struct pci_dev *pdev)
7194{
7195 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007196 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007197
Michael Chan6caebb02007-08-03 20:57:25 -07007198 pci_restore_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07007199 if (!netif_running(dev))
7200 return 0;
7201
Pavel Machek829ca9a2005-09-03 15:56:56 -07007202 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07007203 netif_device_attach(dev);
7204 bnx2_init_nic(bp);
7205 bnx2_netif_start(bp);
7206 return 0;
7207}
7208
7209static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07007210 .name = DRV_MODULE_NAME,
7211 .id_table = bnx2_pci_tbl,
7212 .probe = bnx2_init_one,
7213 .remove = __devexit_p(bnx2_remove_one),
7214 .suspend = bnx2_suspend,
7215 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07007216};
7217
7218static int __init bnx2_init(void)
7219{
Jeff Garzik29917622006-08-19 17:48:59 -04007220 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07007221}
7222
7223static void __exit bnx2_cleanup(void)
7224{
7225 pci_unregister_driver(&bnx2_pci_driver);
7226}
7227
7228module_init(bnx2_init);
7229module_exit(bnx2_cleanup);
7230
7231
7232