blob: 83cdbde5d2d68af0775af0f83e1dbd1d0e6a110c [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070029#include <linux/bitops.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080030#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
Linus Torvaldsde081fa2007-07-12 16:40:08 -070043#include <net/tcp.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080044#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
Michael Chan110d0ef2007-12-12 11:18:34 -080055#define FW_BUF_SIZE 0x10000
Denys Vlasenkob3448b02007-09-30 17:55:51 -070056
Michael Chanb6016b72005-05-26 13:03:09 -070057#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chana0d142c2007-12-12 11:20:22 -080059#define DRV_MODULE_VERSION "1.7.0"
60#define DRV_MODULE_RELDATE "December 11, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080088 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070089 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070090} board_t;
91
92/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050093static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070094 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
Michael Chane30372c2007-07-16 18:26:23 -0700131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700216};
217
Michael Chane30372c2007-07-16 18:26:23 -0700218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
Michael Chanb6016b72005-05-26 13:03:09 -0700227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
Michael Chane89bbf12005-08-25 15:36:58 -0700229static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230{
Michael Chan2f8af122006-08-15 01:39:10 -0700231 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700232
Michael Chan2f8af122006-08-15 01:39:10 -0700233 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
Michael Chane89bbf12005-08-25 15:36:58 -0700244 return (bp->tx_ring_size - diff);
245}
246
Michael Chanb6016b72005-05-26 13:03:09 -0700247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
Michael Chan1b8227c2007-05-03 13:24:05 -0700250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
Michael Chan1b8227c2007-05-03 13:24:05 -0700262 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700265 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700272 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700290 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400370
Michael Chanb6016b72005-05-26 13:03:09 -0700371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
Michael Chanb6016b72005-05-26 13:03:09 -0700410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
Michael Chanbf5295b2006-03-23 01:11:56 -0800417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700418}
419
420static void
421bnx2_disable_int_sync(struct bnx2 *bp)
422{
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
426}
427
428static void
429bnx2_netif_stop(struct bnx2 *bp)
430{
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700433 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -0700434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436 }
437}
438
439static void
440bnx2_netif_start(struct bnx2 *bp)
441{
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700445 napi_enable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -0700446 bnx2_enable_int(bp);
447 }
448 }
449}
450
451static void
452bnx2_free_mem(struct bnx2 *bp)
453{
Michael Chan13daffa2006-03-20 17:49:20 -0800454 int i;
455
Michael Chan59b47d82006-11-19 14:10:45 -0800456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
462 }
463 }
Michael Chanb6016b72005-05-26 13:03:09 -0700464 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800465 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800468 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700469 }
470 if (bp->tx_desc_ring) {
Michael Chane343d552007-12-12 11:16:19 -0800471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
474 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
Michael Chane343d552007-12-12 11:16:19 -0800479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700483 }
Michael Chan13daffa2006-03-20 17:49:20 -0800484 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400485 bp->rx_buf_ring = NULL;
Michael Chan47bf4242007-12-12 11:19:12 -0800486 for (i = 0; i < bp->rx_max_pg_ring; i++) {
487 if (bp->rx_pg_desc_ring[i])
488 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489 bp->rx_pg_desc_ring[i],
490 bp->rx_pg_desc_mapping[i]);
491 bp->rx_pg_desc_ring[i] = NULL;
492 }
493 if (bp->rx_pg_ring)
494 vfree(bp->rx_pg_ring);
495 bp->rx_pg_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700496}
497
498static int
499bnx2_alloc_mem(struct bnx2 *bp)
500{
Michael Chan0f31f992006-03-23 01:12:38 -0800501 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800502
Michael Chane343d552007-12-12 11:16:19 -0800503 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700504 if (bp->tx_buf_ring == NULL)
505 return -ENOMEM;
506
Michael Chane343d552007-12-12 11:16:19 -0800507 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700508 &bp->tx_desc_mapping);
509 if (bp->tx_desc_ring == NULL)
510 goto alloc_mem_err;
511
Michael Chane343d552007-12-12 11:16:19 -0800512 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700513 if (bp->rx_buf_ring == NULL)
514 goto alloc_mem_err;
515
Michael Chane343d552007-12-12 11:16:19 -0800516 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chan13daffa2006-03-20 17:49:20 -0800517
518 for (i = 0; i < bp->rx_max_ring; i++) {
519 bp->rx_desc_ring[i] =
Michael Chane343d552007-12-12 11:16:19 -0800520 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800521 &bp->rx_desc_mapping[i]);
522 if (bp->rx_desc_ring[i] == NULL)
523 goto alloc_mem_err;
524
525 }
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan47bf4242007-12-12 11:19:12 -0800527 if (bp->rx_pg_ring_size) {
528 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
529 bp->rx_max_pg_ring);
530 if (bp->rx_pg_ring == NULL)
531 goto alloc_mem_err;
532
533 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
534 bp->rx_max_pg_ring);
535 }
536
537 for (i = 0; i < bp->rx_max_pg_ring; i++) {
538 bp->rx_pg_desc_ring[i] =
539 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540 &bp->rx_pg_desc_mapping[i]);
541 if (bp->rx_pg_desc_ring[i] == NULL)
542 goto alloc_mem_err;
543
544 }
545
Michael Chan0f31f992006-03-23 01:12:38 -0800546 /* Combine status and statistics blocks into one allocation. */
547 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548 bp->status_stats_size = status_blk_size +
549 sizeof(struct statistics_block);
550
551 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700552 &bp->status_blk_mapping);
553 if (bp->status_blk == NULL)
554 goto alloc_mem_err;
555
Michael Chan0f31f992006-03-23 01:12:38 -0800556 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700557
Michael Chan0f31f992006-03-23 01:12:38 -0800558 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
559 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700560
Michael Chan0f31f992006-03-23 01:12:38 -0800561 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700562
Michael Chan59b47d82006-11-19 14:10:45 -0800563 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565 if (bp->ctx_pages == 0)
566 bp->ctx_pages = 1;
567 for (i = 0; i < bp->ctx_pages; i++) {
568 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
569 BCM_PAGE_SIZE,
570 &bp->ctx_blk_mapping[i]);
571 if (bp->ctx_blk[i] == NULL)
572 goto alloc_mem_err;
573 }
574 }
Michael Chanb6016b72005-05-26 13:03:09 -0700575 return 0;
576
577alloc_mem_err:
578 bnx2_free_mem(bp);
579 return -ENOMEM;
580}
581
582static void
Michael Chane3648b32005-11-04 08:51:21 -0800583bnx2_report_fw_link(struct bnx2 *bp)
584{
585 u32 fw_link_status = 0;
586
Michael Chan0d8a6572007-07-07 22:49:43 -0700587 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
588 return;
589
Michael Chane3648b32005-11-04 08:51:21 -0800590 if (bp->link_up) {
591 u32 bmsr;
592
593 switch (bp->line_speed) {
594 case SPEED_10:
595 if (bp->duplex == DUPLEX_HALF)
596 fw_link_status = BNX2_LINK_STATUS_10HALF;
597 else
598 fw_link_status = BNX2_LINK_STATUS_10FULL;
599 break;
600 case SPEED_100:
601 if (bp->duplex == DUPLEX_HALF)
602 fw_link_status = BNX2_LINK_STATUS_100HALF;
603 else
604 fw_link_status = BNX2_LINK_STATUS_100FULL;
605 break;
606 case SPEED_1000:
607 if (bp->duplex == DUPLEX_HALF)
608 fw_link_status = BNX2_LINK_STATUS_1000HALF;
609 else
610 fw_link_status = BNX2_LINK_STATUS_1000FULL;
611 break;
612 case SPEED_2500:
613 if (bp->duplex == DUPLEX_HALF)
614 fw_link_status = BNX2_LINK_STATUS_2500HALF;
615 else
616 fw_link_status = BNX2_LINK_STATUS_2500FULL;
617 break;
618 }
619
620 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
621
622 if (bp->autoneg) {
623 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
624
Michael Chanca58c3a2007-05-03 13:22:52 -0700625 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800627
628 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
631 else
632 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
633 }
634 }
635 else
636 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
637
638 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
639}
640
Michael Chan9b1084b2007-07-07 22:50:37 -0700641static char *
642bnx2_xceiver_str(struct bnx2 *bp)
643{
644 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
646 "Copper"));
647}
648
Michael Chane3648b32005-11-04 08:51:21 -0800649static void
Michael Chanb6016b72005-05-26 13:03:09 -0700650bnx2_report_link(struct bnx2 *bp)
651{
652 if (bp->link_up) {
653 netif_carrier_on(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700654 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700656
657 printk("%d Mbps ", bp->line_speed);
658
659 if (bp->duplex == DUPLEX_FULL)
660 printk("full duplex");
661 else
662 printk("half duplex");
663
664 if (bp->flow_ctrl) {
665 if (bp->flow_ctrl & FLOW_CTRL_RX) {
666 printk(", receive ");
667 if (bp->flow_ctrl & FLOW_CTRL_TX)
668 printk("& transmit ");
669 }
670 else {
671 printk(", transmit ");
672 }
673 printk("flow control ON");
674 }
675 printk("\n");
676 }
677 else {
678 netif_carrier_off(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700679 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700681 }
Michael Chane3648b32005-11-04 08:51:21 -0800682
683 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700684}
685
686static void
687bnx2_resolve_flow_ctrl(struct bnx2 *bp)
688{
689 u32 local_adv, remote_adv;
690
691 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400692 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700693 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
694
695 if (bp->duplex == DUPLEX_FULL) {
696 bp->flow_ctrl = bp->req_flow_ctrl;
697 }
698 return;
699 }
700
701 if (bp->duplex != DUPLEX_FULL) {
702 return;
703 }
704
Michael Chan5b0c76a2005-11-04 08:45:49 -0800705 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
707 u32 val;
708
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711 bp->flow_ctrl |= FLOW_CTRL_TX;
712 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713 bp->flow_ctrl |= FLOW_CTRL_RX;
714 return;
715 }
716
Michael Chanca58c3a2007-05-03 13:22:52 -0700717 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700719
720 if (bp->phy_flags & PHY_SERDES_FLAG) {
721 u32 new_local_adv = 0;
722 u32 new_remote_adv = 0;
723
724 if (local_adv & ADVERTISE_1000XPAUSE)
725 new_local_adv |= ADVERTISE_PAUSE_CAP;
726 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727 new_local_adv |= ADVERTISE_PAUSE_ASYM;
728 if (remote_adv & ADVERTISE_1000XPAUSE)
729 new_remote_adv |= ADVERTISE_PAUSE_CAP;
730 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
732
733 local_adv = new_local_adv;
734 remote_adv = new_remote_adv;
735 }
736
737 /* See Table 28B-3 of 802.3ab-1999 spec. */
738 if (local_adv & ADVERTISE_PAUSE_CAP) {
739 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740 if (remote_adv & ADVERTISE_PAUSE_CAP) {
741 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
742 }
743 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744 bp->flow_ctrl = FLOW_CTRL_RX;
745 }
746 }
747 else {
748 if (remote_adv & ADVERTISE_PAUSE_CAP) {
749 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
750 }
751 }
752 }
753 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
756
757 bp->flow_ctrl = FLOW_CTRL_TX;
758 }
759 }
760}
761
762static int
Michael Chan27a005b2007-05-03 13:23:41 -0700763bnx2_5709s_linkup(struct bnx2 *bp)
764{
765 u32 val, speed;
766
767 bp->link_up = 1;
768
769 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
772
773 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774 bp->line_speed = bp->req_line_speed;
775 bp->duplex = bp->req_duplex;
776 return 0;
777 }
778 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
779 switch (speed) {
780 case MII_BNX2_GP_TOP_AN_SPEED_10:
781 bp->line_speed = SPEED_10;
782 break;
783 case MII_BNX2_GP_TOP_AN_SPEED_100:
784 bp->line_speed = SPEED_100;
785 break;
786 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788 bp->line_speed = SPEED_1000;
789 break;
790 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791 bp->line_speed = SPEED_2500;
792 break;
793 }
794 if (val & MII_BNX2_GP_TOP_AN_FD)
795 bp->duplex = DUPLEX_FULL;
796 else
797 bp->duplex = DUPLEX_HALF;
798 return 0;
799}
800
801static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800802bnx2_5708s_linkup(struct bnx2 *bp)
803{
804 u32 val;
805
806 bp->link_up = 1;
807 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809 case BCM5708S_1000X_STAT1_SPEED_10:
810 bp->line_speed = SPEED_10;
811 break;
812 case BCM5708S_1000X_STAT1_SPEED_100:
813 bp->line_speed = SPEED_100;
814 break;
815 case BCM5708S_1000X_STAT1_SPEED_1G:
816 bp->line_speed = SPEED_1000;
817 break;
818 case BCM5708S_1000X_STAT1_SPEED_2G5:
819 bp->line_speed = SPEED_2500;
820 break;
821 }
822 if (val & BCM5708S_1000X_STAT1_FD)
823 bp->duplex = DUPLEX_FULL;
824 else
825 bp->duplex = DUPLEX_HALF;
826
827 return 0;
828}
829
830static int
831bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700832{
833 u32 bmcr, local_adv, remote_adv, common;
834
835 bp->link_up = 1;
836 bp->line_speed = SPEED_1000;
837
Michael Chanca58c3a2007-05-03 13:22:52 -0700838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700839 if (bmcr & BMCR_FULLDPLX) {
840 bp->duplex = DUPLEX_FULL;
841 }
842 else {
843 bp->duplex = DUPLEX_HALF;
844 }
845
846 if (!(bmcr & BMCR_ANENABLE)) {
847 return 0;
848 }
849
Michael Chanca58c3a2007-05-03 13:22:52 -0700850 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700852
853 common = local_adv & remote_adv;
854 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
855
856 if (common & ADVERTISE_1000XFULL) {
857 bp->duplex = DUPLEX_FULL;
858 }
859 else {
860 bp->duplex = DUPLEX_HALF;
861 }
862 }
863
864 return 0;
865}
866
867static int
868bnx2_copper_linkup(struct bnx2 *bp)
869{
870 u32 bmcr;
871
Michael Chanca58c3a2007-05-03 13:22:52 -0700872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700873 if (bmcr & BMCR_ANENABLE) {
874 u32 local_adv, remote_adv, common;
875
876 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
878
879 common = local_adv & (remote_adv >> 2);
880 if (common & ADVERTISE_1000FULL) {
881 bp->line_speed = SPEED_1000;
882 bp->duplex = DUPLEX_FULL;
883 }
884 else if (common & ADVERTISE_1000HALF) {
885 bp->line_speed = SPEED_1000;
886 bp->duplex = DUPLEX_HALF;
887 }
888 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700889 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700891
892 common = local_adv & remote_adv;
893 if (common & ADVERTISE_100FULL) {
894 bp->line_speed = SPEED_100;
895 bp->duplex = DUPLEX_FULL;
896 }
897 else if (common & ADVERTISE_100HALF) {
898 bp->line_speed = SPEED_100;
899 bp->duplex = DUPLEX_HALF;
900 }
901 else if (common & ADVERTISE_10FULL) {
902 bp->line_speed = SPEED_10;
903 bp->duplex = DUPLEX_FULL;
904 }
905 else if (common & ADVERTISE_10HALF) {
906 bp->line_speed = SPEED_10;
907 bp->duplex = DUPLEX_HALF;
908 }
909 else {
910 bp->line_speed = 0;
911 bp->link_up = 0;
912 }
913 }
914 }
915 else {
916 if (bmcr & BMCR_SPEED100) {
917 bp->line_speed = SPEED_100;
918 }
919 else {
920 bp->line_speed = SPEED_10;
921 }
922 if (bmcr & BMCR_FULLDPLX) {
923 bp->duplex = DUPLEX_FULL;
924 }
925 else {
926 bp->duplex = DUPLEX_HALF;
927 }
928 }
929
930 return 0;
931}
932
933static int
934bnx2_set_mac_link(struct bnx2 *bp)
935{
936 u32 val;
937
938 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940 (bp->duplex == DUPLEX_HALF)) {
941 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
942 }
943
944 /* Configure the EMAC mode register. */
945 val = REG_RD(bp, BNX2_EMAC_MODE);
946
947 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800948 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800949 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700950
951 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800952 switch (bp->line_speed) {
953 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800954 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800956 break;
957 }
958 /* fall through */
959 case SPEED_100:
960 val |= BNX2_EMAC_MODE_PORT_MII;
961 break;
962 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800963 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800964 /* fall through */
965 case SPEED_1000:
966 val |= BNX2_EMAC_MODE_PORT_GMII;
967 break;
968 }
Michael Chanb6016b72005-05-26 13:03:09 -0700969 }
970 else {
971 val |= BNX2_EMAC_MODE_PORT_GMII;
972 }
973
974 /* Set the MAC to operate in the appropriate duplex mode. */
975 if (bp->duplex == DUPLEX_HALF)
976 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977 REG_WR(bp, BNX2_EMAC_MODE, val);
978
979 /* Enable/disable rx PAUSE. */
980 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
981
982 if (bp->flow_ctrl & FLOW_CTRL_RX)
983 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
985
986 /* Enable/disable tx PAUSE. */
987 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
989
990 if (bp->flow_ctrl & FLOW_CTRL_TX)
991 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
993
994 /* Acknowledge the interrupt. */
995 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
996
997 return 0;
998}
999
Michael Chan27a005b2007-05-03 13:23:41 -07001000static void
1001bnx2_enable_bmsr1(struct bnx2 *bp)
1002{
1003 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004 (CHIP_NUM(bp) == CHIP_NUM_5709))
1005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006 MII_BNX2_BLK_ADDR_GP_STATUS);
1007}
1008
1009static void
1010bnx2_disable_bmsr1(struct bnx2 *bp)
1011{
1012 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013 (CHIP_NUM(bp) == CHIP_NUM_5709))
1014 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1016}
1017
Michael Chanb6016b72005-05-26 13:03:09 -07001018static int
Michael Chan605a9e22007-05-03 13:23:13 -07001019bnx2_test_and_enable_2g5(struct bnx2 *bp)
1020{
1021 u32 up1;
1022 int ret = 1;
1023
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025 return 0;
1026
1027 if (bp->autoneg & AUTONEG_SPEED)
1028 bp->advertising |= ADVERTISED_2500baseX_Full;
1029
Michael Chan27a005b2007-05-03 13:23:41 -07001030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1032
Michael Chan605a9e22007-05-03 13:23:13 -07001033 bnx2_read_phy(bp, bp->mii_up1, &up1);
1034 if (!(up1 & BCM5708S_UP1_2G5)) {
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, bp->mii_up1, up1);
1037 ret = 0;
1038 }
1039
Michael Chan27a005b2007-05-03 13:23:41 -07001040 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1043
Michael Chan605a9e22007-05-03 13:23:13 -07001044 return ret;
1045}
1046
1047static int
1048bnx2_test_and_disable_2g5(struct bnx2 *bp)
1049{
1050 u32 up1;
1051 int ret = 0;
1052
1053 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1054 return 0;
1055
Michael Chan27a005b2007-05-03 13:23:41 -07001056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1058
Michael Chan605a9e22007-05-03 13:23:13 -07001059 bnx2_read_phy(bp, bp->mii_up1, &up1);
1060 if (up1 & BCM5708S_UP1_2G5) {
1061 up1 &= ~BCM5708S_UP1_2G5;
1062 bnx2_write_phy(bp, bp->mii_up1, up1);
1063 ret = 1;
1064 }
1065
Michael Chan27a005b2007-05-03 13:23:41 -07001066 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069
Michael Chan605a9e22007-05-03 13:23:13 -07001070 return ret;
1071}
1072
1073static void
1074bnx2_enable_forced_2g5(struct bnx2 *bp)
1075{
1076 u32 bmcr;
1077
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079 return;
1080
Michael Chan27a005b2007-05-03 13:23:41 -07001081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082 u32 val;
1083
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr |= BCM5708S_BMCR_FORCE_2500;
1098 }
1099
1100 if (bp->autoneg & AUTONEG_SPEED) {
1101 bmcr &= ~BMCR_ANENABLE;
1102 if (bp->req_duplex == DUPLEX_FULL)
1103 bmcr |= BMCR_FULLDPLX;
1104 }
1105 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106}
1107
1108static void
1109bnx2_disable_forced_2g5(struct bnx2 *bp)
1110{
1111 u32 bmcr;
1112
1113 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1114 return;
1115
Michael Chan27a005b2007-05-03 13:23:41 -07001116 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1117 u32 val;
1118
1119 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120 MII_BNX2_BLK_ADDR_SERDES_DIG);
1121 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1124
1125 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1128
1129 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001130 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1132 }
1133
1134 if (bp->autoneg & AUTONEG_SPEED)
1135 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1137}
1138
1139static int
Michael Chanb6016b72005-05-26 13:03:09 -07001140bnx2_set_link(struct bnx2 *bp)
1141{
1142 u32 bmsr;
1143 u8 link_up;
1144
Michael Chan80be4432006-11-19 14:07:28 -08001145 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001146 bp->link_up = 1;
1147 return 0;
1148 }
1149
Michael Chan0d8a6572007-07-07 22:49:43 -07001150 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1151 return 0;
1152
Michael Chanb6016b72005-05-26 13:03:09 -07001153 link_up = bp->link_up;
1154
Michael Chan27a005b2007-05-03 13:23:41 -07001155 bnx2_enable_bmsr1(bp);
1156 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001159
1160 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1162 u32 val;
1163
1164 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165 if (val & BNX2_EMAC_STATUS_LINK)
1166 bmsr |= BMSR_LSTATUS;
1167 else
1168 bmsr &= ~BMSR_LSTATUS;
1169 }
1170
1171 if (bmsr & BMSR_LSTATUS) {
1172 bp->link_up = 1;
1173
1174 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001175 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176 bnx2_5706s_linkup(bp);
1177 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001179 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001181 }
1182 else {
1183 bnx2_copper_linkup(bp);
1184 }
1185 bnx2_resolve_flow_ctrl(bp);
1186 }
1187 else {
1188 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001189 (bp->autoneg & AUTONEG_SPEED))
1190 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001191
Michael Chanb6016b72005-05-26 13:03:09 -07001192 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1193 bp->link_up = 0;
1194 }
1195
1196 if (bp->link_up != link_up) {
1197 bnx2_report_link(bp);
1198 }
1199
1200 bnx2_set_mac_link(bp);
1201
1202 return 0;
1203}
1204
1205static int
1206bnx2_reset_phy(struct bnx2 *bp)
1207{
1208 int i;
1209 u32 reg;
1210
Michael Chanca58c3a2007-05-03 13:22:52 -07001211 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001212
1213#define PHY_RESET_MAX_WAIT 100
1214 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1215 udelay(10);
1216
Michael Chanca58c3a2007-05-03 13:22:52 -07001217 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001218 if (!(reg & BMCR_RESET)) {
1219 udelay(20);
1220 break;
1221 }
1222 }
1223 if (i == PHY_RESET_MAX_WAIT) {
1224 return -EBUSY;
1225 }
1226 return 0;
1227}
1228
1229static u32
1230bnx2_phy_get_pause_adv(struct bnx2 *bp)
1231{
1232 u32 adv = 0;
1233
1234 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1236
1237 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238 adv = ADVERTISE_1000XPAUSE;
1239 }
1240 else {
1241 adv = ADVERTISE_PAUSE_CAP;
1242 }
1243 }
1244 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246 adv = ADVERTISE_1000XPSE_ASYM;
1247 }
1248 else {
1249 adv = ADVERTISE_PAUSE_ASYM;
1250 }
1251 }
1252 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1255 }
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1258 }
1259 }
1260 return adv;
1261}
1262
Michael Chan0d8a6572007-07-07 22:49:43 -07001263static int bnx2_fw_sync(struct bnx2 *, u32, int);
1264
Michael Chanb6016b72005-05-26 13:03:09 -07001265static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001266bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1267{
1268 u32 speed_arg = 0, pause_adv;
1269
1270 pause_adv = bnx2_phy_get_pause_adv(bp);
1271
1272 if (bp->autoneg & AUTONEG_SPEED) {
1273 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274 if (bp->advertising & ADVERTISED_10baseT_Half)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276 if (bp->advertising & ADVERTISED_10baseT_Full)
1277 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278 if (bp->advertising & ADVERTISED_100baseT_Half)
1279 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280 if (bp->advertising & ADVERTISED_100baseT_Full)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1286 } else {
1287 if (bp->req_line_speed == SPEED_2500)
1288 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289 else if (bp->req_line_speed == SPEED_1000)
1290 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291 else if (bp->req_line_speed == SPEED_100) {
1292 if (bp->req_duplex == DUPLEX_FULL)
1293 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1294 else
1295 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 } else if (bp->req_line_speed == SPEED_10) {
1297 if (bp->req_duplex == DUPLEX_FULL)
1298 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1299 else
1300 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1301 }
1302 }
1303
1304 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1308
1309 if (port == PORT_TP)
1310 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1312
1313 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1314
1315 spin_unlock_bh(&bp->phy_lock);
1316 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317 spin_lock_bh(&bp->phy_lock);
1318
1319 return 0;
1320}
1321
1322static int
1323bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001324{
Michael Chan605a9e22007-05-03 13:23:13 -07001325 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001326 u32 new_adv = 0;
1327
Michael Chan0d8a6572007-07-07 22:49:43 -07001328 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329 return (bnx2_setup_remote_phy(bp, port));
1330
Michael Chanb6016b72005-05-26 13:03:09 -07001331 if (!(bp->autoneg & AUTONEG_SPEED)) {
1332 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001333 int force_link_down = 0;
1334
Michael Chan605a9e22007-05-03 13:23:13 -07001335 if (bp->req_line_speed == SPEED_2500) {
1336 if (!bnx2_test_and_enable_2g5(bp))
1337 force_link_down = 1;
1338 } else if (bp->req_line_speed == SPEED_1000) {
1339 if (bnx2_test_and_disable_2g5(bp))
1340 force_link_down = 1;
1341 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001342 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001343 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1344
Michael Chanca58c3a2007-05-03 13:22:52 -07001345 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001346 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001347 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001348
Michael Chan27a005b2007-05-03 13:23:41 -07001349 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350 if (bp->req_line_speed == SPEED_2500)
1351 bnx2_enable_forced_2g5(bp);
1352 else if (bp->req_line_speed == SPEED_1000) {
1353 bnx2_disable_forced_2g5(bp);
1354 new_bmcr &= ~0x2000;
1355 }
1356
1357 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001358 if (bp->req_line_speed == SPEED_2500)
1359 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1360 else
1361 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001362 }
1363
Michael Chanb6016b72005-05-26 13:03:09 -07001364 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001365 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001366 new_bmcr |= BMCR_FULLDPLX;
1367 }
1368 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001369 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001370 new_bmcr &= ~BMCR_FULLDPLX;
1371 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001372 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001375 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001376 ~(ADVERTISE_1000XFULL |
1377 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001378 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001379 BMCR_ANRESTART | BMCR_ANENABLE);
1380
1381 bp->link_up = 0;
1382 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001383 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001384 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001385 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001386 bnx2_write_phy(bp, bp->mii_adv, adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001388 } else {
1389 bnx2_resolve_flow_ctrl(bp);
1390 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001391 }
1392 return 0;
1393 }
1394
Michael Chan605a9e22007-05-03 13:23:13 -07001395 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001396
Michael Chanb6016b72005-05-26 13:03:09 -07001397 if (bp->advertising & ADVERTISED_1000baseT_Full)
1398 new_adv |= ADVERTISE_1000XFULL;
1399
1400 new_adv |= bnx2_phy_get_pause_adv(bp);
1401
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_read_phy(bp, bp->mii_adv, &adv);
1403 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001404
1405 bp->serdes_an_pending = 0;
1406 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407 /* Force a link down visible on the other side */
1408 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001409 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001410 spin_unlock_bh(&bp->phy_lock);
1411 msleep(20);
1412 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001413 }
1414
Michael Chanca58c3a2007-05-03 13:22:52 -07001415 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001417 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001418 /* Speed up link-up time when the link partner
1419 * does not autonegotiate which is very common
1420 * in blade servers. Some blade servers use
1421 * IPMI for kerboard input and it's important
1422 * to minimize link disruptions. Autoneg. involves
1423 * exchanging base pages plus 3 next pages and
1424 * normally completes in about 120 msec.
1425 */
1426 bp->current_interval = SERDES_AN_TIMEOUT;
1427 bp->serdes_an_pending = 1;
1428 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001429 } else {
1430 bnx2_resolve_flow_ctrl(bp);
1431 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001432 }
1433
1434 return 0;
1435}
1436
1437#define ETHTOOL_ALL_FIBRE_SPEED \
Michael Chandeaf3912007-07-07 22:48:00 -07001438 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1439 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440 (ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07001441
1442#define ETHTOOL_ALL_COPPER_SPEED \
1443 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1444 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1445 ADVERTISED_1000baseT_Full)
1446
1447#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001449
Michael Chanb6016b72005-05-26 13:03:09 -07001450#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1451
Michael Chandeaf3912007-07-07 22:48:00 -07001452static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001453bnx2_set_default_remote_link(struct bnx2 *bp)
1454{
1455 u32 link;
1456
1457 if (bp->phy_port == PORT_TP)
1458 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1459 else
1460 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1461
1462 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463 bp->req_line_speed = 0;
1464 bp->autoneg |= AUTONEG_SPEED;
1465 bp->advertising = ADVERTISED_Autoneg;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467 bp->advertising |= ADVERTISED_10baseT_Half;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469 bp->advertising |= ADVERTISED_10baseT_Full;
1470 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471 bp->advertising |= ADVERTISED_100baseT_Half;
1472 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473 bp->advertising |= ADVERTISED_100baseT_Full;
1474 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475 bp->advertising |= ADVERTISED_1000baseT_Full;
1476 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477 bp->advertising |= ADVERTISED_2500baseX_Full;
1478 } else {
1479 bp->autoneg = 0;
1480 bp->advertising = 0;
1481 bp->req_duplex = DUPLEX_FULL;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483 bp->req_line_speed = SPEED_10;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485 bp->req_duplex = DUPLEX_HALF;
1486 }
1487 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488 bp->req_line_speed = SPEED_100;
1489 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490 bp->req_duplex = DUPLEX_HALF;
1491 }
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493 bp->req_line_speed = SPEED_1000;
1494 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495 bp->req_line_speed = SPEED_2500;
1496 }
1497}
1498
1499static void
Michael Chandeaf3912007-07-07 22:48:00 -07001500bnx2_set_default_link(struct bnx2 *bp)
1501{
Michael Chan0d8a6572007-07-07 22:49:43 -07001502 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503 return bnx2_set_default_remote_link(bp);
1504
Michael Chandeaf3912007-07-07 22:48:00 -07001505 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506 bp->req_line_speed = 0;
1507 if (bp->phy_flags & PHY_SERDES_FLAG) {
1508 u32 reg;
1509
1510 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1511
1512 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1515 bp->autoneg = 0;
1516 bp->req_line_speed = bp->line_speed = SPEED_1000;
1517 bp->req_duplex = DUPLEX_FULL;
1518 }
1519 } else
1520 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1521}
1522
Michael Chan0d8a6572007-07-07 22:49:43 -07001523static void
Michael Chandf149d72007-07-07 22:51:36 -07001524bnx2_send_heart_beat(struct bnx2 *bp)
1525{
1526 u32 msg;
1527 u32 addr;
1528
1529 spin_lock(&bp->indirect_lock);
1530 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534 spin_unlock(&bp->indirect_lock);
1535}
1536
1537static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001538bnx2_remote_phy_event(struct bnx2 *bp)
1539{
1540 u32 msg;
1541 u8 link_up = bp->link_up;
1542 u8 old_port;
1543
1544 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1545
Michael Chandf149d72007-07-07 22:51:36 -07001546 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547 bnx2_send_heart_beat(bp);
1548
1549 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1550
Michael Chan0d8a6572007-07-07 22:49:43 -07001551 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1552 bp->link_up = 0;
1553 else {
1554 u32 speed;
1555
1556 bp->link_up = 1;
1557 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558 bp->duplex = DUPLEX_FULL;
1559 switch (speed) {
1560 case BNX2_LINK_STATUS_10HALF:
1561 bp->duplex = DUPLEX_HALF;
1562 case BNX2_LINK_STATUS_10FULL:
1563 bp->line_speed = SPEED_10;
1564 break;
1565 case BNX2_LINK_STATUS_100HALF:
1566 bp->duplex = DUPLEX_HALF;
1567 case BNX2_LINK_STATUS_100BASE_T4:
1568 case BNX2_LINK_STATUS_100FULL:
1569 bp->line_speed = SPEED_100;
1570 break;
1571 case BNX2_LINK_STATUS_1000HALF:
1572 bp->duplex = DUPLEX_HALF;
1573 case BNX2_LINK_STATUS_1000FULL:
1574 bp->line_speed = SPEED_1000;
1575 break;
1576 case BNX2_LINK_STATUS_2500HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_2500FULL:
1579 bp->line_speed = SPEED_2500;
1580 break;
1581 default:
1582 bp->line_speed = 0;
1583 break;
1584 }
1585
1586 spin_lock(&bp->phy_lock);
1587 bp->flow_ctrl = 0;
1588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590 if (bp->duplex == DUPLEX_FULL)
1591 bp->flow_ctrl = bp->req_flow_ctrl;
1592 } else {
1593 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594 bp->flow_ctrl |= FLOW_CTRL_TX;
1595 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596 bp->flow_ctrl |= FLOW_CTRL_RX;
1597 }
1598
1599 old_port = bp->phy_port;
1600 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601 bp->phy_port = PORT_FIBRE;
1602 else
1603 bp->phy_port = PORT_TP;
1604
1605 if (old_port != bp->phy_port)
1606 bnx2_set_default_link(bp);
1607
1608 spin_unlock(&bp->phy_lock);
1609 }
1610 if (bp->link_up != link_up)
1611 bnx2_report_link(bp);
1612
1613 bnx2_set_mac_link(bp);
1614}
1615
1616static int
1617bnx2_set_remote_link(struct bnx2 *bp)
1618{
1619 u32 evt_code;
1620
1621 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1622 switch (evt_code) {
1623 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624 bnx2_remote_phy_event(bp);
1625 break;
1626 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1627 default:
Michael Chandf149d72007-07-07 22:51:36 -07001628 bnx2_send_heart_beat(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07001629 break;
1630 }
1631 return 0;
1632}
1633
Michael Chanb6016b72005-05-26 13:03:09 -07001634static int
1635bnx2_setup_copper_phy(struct bnx2 *bp)
1636{
1637 u32 bmcr;
1638 u32 new_bmcr;
1639
Michael Chanca58c3a2007-05-03 13:22:52 -07001640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001641
1642 if (bp->autoneg & AUTONEG_SPEED) {
1643 u32 adv_reg, adv1000_reg;
1644 u32 new_adv_reg = 0;
1645 u32 new_adv1000_reg = 0;
1646
Michael Chanca58c3a2007-05-03 13:22:52 -07001647 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001648 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649 ADVERTISE_PAUSE_ASYM);
1650
1651 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652 adv1000_reg &= PHY_ALL_1000_SPEED;
1653
1654 if (bp->advertising & ADVERTISED_10baseT_Half)
1655 new_adv_reg |= ADVERTISE_10HALF;
1656 if (bp->advertising & ADVERTISED_10baseT_Full)
1657 new_adv_reg |= ADVERTISE_10FULL;
1658 if (bp->advertising & ADVERTISED_100baseT_Half)
1659 new_adv_reg |= ADVERTISE_100HALF;
1660 if (bp->advertising & ADVERTISED_100baseT_Full)
1661 new_adv_reg |= ADVERTISE_100FULL;
1662 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001664
Michael Chanb6016b72005-05-26 13:03:09 -07001665 new_adv_reg |= ADVERTISE_CSMA;
1666
1667 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1668
1669 if ((adv1000_reg != new_adv1000_reg) ||
1670 (adv_reg != new_adv_reg) ||
1671 ((bmcr & BMCR_ANENABLE) == 0)) {
1672
Michael Chanca58c3a2007-05-03 13:22:52 -07001673 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001674 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001675 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001676 BMCR_ANENABLE);
1677 }
1678 else if (bp->link_up) {
1679 /* Flow ctrl may have changed from auto to forced */
1680 /* or vice-versa. */
1681
1682 bnx2_resolve_flow_ctrl(bp);
1683 bnx2_set_mac_link(bp);
1684 }
1685 return 0;
1686 }
1687
1688 new_bmcr = 0;
1689 if (bp->req_line_speed == SPEED_100) {
1690 new_bmcr |= BMCR_SPEED100;
1691 }
1692 if (bp->req_duplex == DUPLEX_FULL) {
1693 new_bmcr |= BMCR_FULLDPLX;
1694 }
1695 if (new_bmcr != bmcr) {
1696 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001697
Michael Chanca58c3a2007-05-03 13:22:52 -07001698 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001700
Michael Chanb6016b72005-05-26 13:03:09 -07001701 if (bmsr & BMSR_LSTATUS) {
1702 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001703 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001704 spin_unlock_bh(&bp->phy_lock);
1705 msleep(50);
1706 spin_lock_bh(&bp->phy_lock);
1707
Michael Chanca58c3a2007-05-03 13:22:52 -07001708 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001710 }
1711
Michael Chanca58c3a2007-05-03 13:22:52 -07001712 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001713
1714 /* Normally, the new speed is setup after the link has
1715 * gone down and up again. In some cases, link will not go
1716 * down so we need to set up the new speed here.
1717 */
1718 if (bmsr & BMSR_LSTATUS) {
1719 bp->line_speed = bp->req_line_speed;
1720 bp->duplex = bp->req_duplex;
1721 bnx2_resolve_flow_ctrl(bp);
1722 bnx2_set_mac_link(bp);
1723 }
Michael Chan27a005b2007-05-03 13:23:41 -07001724 } else {
1725 bnx2_resolve_flow_ctrl(bp);
1726 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001727 }
1728 return 0;
1729}
1730
1731static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001732bnx2_setup_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001733{
1734 if (bp->loopback == MAC_LOOPBACK)
1735 return 0;
1736
1737 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07001738 return (bnx2_setup_serdes_phy(bp, port));
Michael Chanb6016b72005-05-26 13:03:09 -07001739 }
1740 else {
1741 return (bnx2_setup_copper_phy(bp));
1742 }
1743}
1744
1745static int
Michael Chan27a005b2007-05-03 13:23:41 -07001746bnx2_init_5709s_phy(struct bnx2 *bp)
1747{
1748 u32 val;
1749
1750 bp->mii_bmcr = MII_BMCR + 0x10;
1751 bp->mii_bmsr = MII_BMSR + 0x10;
1752 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753 bp->mii_adv = MII_ADVERTISE + 0x10;
1754 bp->mii_lpa = MII_LPA + 0x10;
1755 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1756
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1759
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761 bnx2_reset_phy(bp);
1762
1763 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1764
1765 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1769
1770 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773 val |= BCM5708S_UP1_2G5;
1774 else
1775 val &= ~BCM5708S_UP1_2G5;
1776 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1777
1778 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1782
1783 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1784
1785 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1788
1789 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1790
1791 return 0;
1792}
1793
1794static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001795bnx2_init_5708s_phy(struct bnx2 *bp)
1796{
1797 u32 val;
1798
Michael Chan27a005b2007-05-03 13:23:41 -07001799 bnx2_reset_phy(bp);
1800
1801 bp->mii_up1 = BCM5708S_UP1;
1802
Michael Chan5b0c76a2005-11-04 08:45:49 -08001803 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1806
1807 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1810
1811 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1814
1815 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817 val |= BCM5708S_UP1_2G5;
1818 bnx2_write_phy(bp, BCM5708S_UP1, val);
1819 }
1820
1821 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001822 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001824 /* increase tx signal amplitude */
1825 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826 BCM5708S_BLK_ADDR_TX_MISC);
1827 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1831 }
1832
Michael Chane3648b32005-11-04 08:51:21 -08001833 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001834 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1835
1836 if (val) {
1837 u32 is_backplane;
1838
Michael Chane3648b32005-11-04 08:51:21 -08001839 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001840 BNX2_SHARED_HW_CFG_CONFIG);
1841 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843 BCM5708S_BLK_ADDR_TX_MISC);
1844 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846 BCM5708S_BLK_ADDR_DIG);
1847 }
1848 }
1849 return 0;
1850}
1851
1852static int
1853bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001854{
Michael Chan27a005b2007-05-03 13:23:41 -07001855 bnx2_reset_phy(bp);
1856
Michael Chanb6016b72005-05-26 13:03:09 -07001857 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1858
Michael Chan59b47d82006-11-19 14:10:45 -08001859 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001861
1862 if (bp->dev->mtu > 1500) {
1863 u32 val;
1864
1865 /* Set extended packet length bit */
1866 bnx2_write_phy(bp, 0x18, 0x7);
1867 bnx2_read_phy(bp, 0x18, &val);
1868 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1869
1870 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871 bnx2_read_phy(bp, 0x1c, &val);
1872 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1873 }
1874 else {
1875 u32 val;
1876
1877 bnx2_write_phy(bp, 0x18, 0x7);
1878 bnx2_read_phy(bp, 0x18, &val);
1879 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1880
1881 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882 bnx2_read_phy(bp, 0x1c, &val);
1883 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1884 }
1885
1886 return 0;
1887}
1888
1889static int
1890bnx2_init_copper_phy(struct bnx2 *bp)
1891{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001892 u32 val;
1893
Michael Chan27a005b2007-05-03 13:23:41 -07001894 bnx2_reset_phy(bp);
1895
Michael Chanb6016b72005-05-26 13:03:09 -07001896 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897 bnx2_write_phy(bp, 0x18, 0x0c00);
1898 bnx2_write_phy(bp, 0x17, 0x000a);
1899 bnx2_write_phy(bp, 0x15, 0x310b);
1900 bnx2_write_phy(bp, 0x17, 0x201f);
1901 bnx2_write_phy(bp, 0x15, 0x9506);
1902 bnx2_write_phy(bp, 0x17, 0x401f);
1903 bnx2_write_phy(bp, 0x15, 0x14e2);
1904 bnx2_write_phy(bp, 0x18, 0x0400);
1905 }
1906
Michael Chanb659f442007-02-02 00:46:35 -08001907 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909 MII_BNX2_DSP_EXPAND_REG | 0x8);
1910 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1911 val &= ~(1 << 8);
1912 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1913 }
1914
Michael Chanb6016b72005-05-26 13:03:09 -07001915 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001916 /* Set extended packet length bit */
1917 bnx2_write_phy(bp, 0x18, 0x7);
1918 bnx2_read_phy(bp, 0x18, &val);
1919 bnx2_write_phy(bp, 0x18, val | 0x4000);
1920
1921 bnx2_read_phy(bp, 0x10, &val);
1922 bnx2_write_phy(bp, 0x10, val | 0x1);
1923 }
1924 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001925 bnx2_write_phy(bp, 0x18, 0x7);
1926 bnx2_read_phy(bp, 0x18, &val);
1927 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1928
1929 bnx2_read_phy(bp, 0x10, &val);
1930 bnx2_write_phy(bp, 0x10, val & ~0x1);
1931 }
1932
Michael Chan5b0c76a2005-11-04 08:45:49 -08001933 /* ethernet@wirespeed */
1934 bnx2_write_phy(bp, 0x18, 0x7007);
1935 bnx2_read_phy(bp, 0x18, &val);
1936 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001937 return 0;
1938}
1939
1940
1941static int
1942bnx2_init_phy(struct bnx2 *bp)
1943{
1944 u32 val;
1945 int rc = 0;
1946
1947 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1949
Michael Chanca58c3a2007-05-03 13:22:52 -07001950 bp->mii_bmcr = MII_BMCR;
1951 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001952 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001953 bp->mii_adv = MII_ADVERTISE;
1954 bp->mii_lpa = MII_LPA;
1955
Michael Chanb6016b72005-05-26 13:03:09 -07001956 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1957
Michael Chan0d8a6572007-07-07 22:49:43 -07001958 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1959 goto setup_phy;
1960
Michael Chanb6016b72005-05-26 13:03:09 -07001961 bnx2_read_phy(bp, MII_PHYSID1, &val);
1962 bp->phy_id = val << 16;
1963 bnx2_read_phy(bp, MII_PHYSID2, &val);
1964 bp->phy_id |= val & 0xffff;
1965
1966 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001967 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968 rc = bnx2_init_5706s_phy(bp);
1969 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001971 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001973 }
1974 else {
1975 rc = bnx2_init_copper_phy(bp);
1976 }
1977
Michael Chan0d8a6572007-07-07 22:49:43 -07001978setup_phy:
1979 if (!rc)
1980 rc = bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07001981
1982 return rc;
1983}
1984
1985static int
1986bnx2_set_mac_loopback(struct bnx2 *bp)
1987{
1988 u32 mac_mode;
1989
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1994 bp->link_up = 1;
1995 return 0;
1996}
1997
Michael Chanbc5a0692006-01-23 16:13:22 -08001998static int bnx2_test_link(struct bnx2 *);
1999
2000static int
2001bnx2_set_phy_loopback(struct bnx2 *bp)
2002{
2003 u32 mac_mode;
2004 int rc, i;
2005
2006 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07002007 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08002008 BMCR_SPEED1000);
2009 spin_unlock_bh(&bp->phy_lock);
2010 if (rc)
2011 return rc;
2012
2013 for (i = 0; i < 10; i++) {
2014 if (bnx2_test_link(bp) == 0)
2015 break;
Michael Chan80be4432006-11-19 14:07:28 -08002016 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08002017 }
2018
2019 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08002022 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08002023
2024 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2026 bp->link_up = 1;
2027 return 0;
2028}
2029
Michael Chanb6016b72005-05-26 13:03:09 -07002030static int
Michael Chanb090ae22006-01-23 16:07:10 -08002031bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07002032{
2033 int i;
2034 u32 val;
2035
Michael Chanb6016b72005-05-26 13:03:09 -07002036 bp->fw_wr_seq++;
2037 msg_data |= bp->fw_wr_seq;
2038
Michael Chane3648b32005-11-04 08:51:21 -08002039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002040
2041 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08002042 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2043 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07002044
Michael Chane3648b32005-11-04 08:51:21 -08002045 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07002046
2047 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2048 break;
2049 }
Michael Chanb090ae22006-01-23 16:07:10 -08002050 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2051 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002052
2053 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08002054 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2055 if (!silent)
2056 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2057 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002058
2059 msg_data &= ~BNX2_DRV_MSG_CODE;
2060 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2061
Michael Chane3648b32005-11-04 08:51:21 -08002062 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002063
Michael Chanb6016b72005-05-26 13:03:09 -07002064 return -EBUSY;
2065 }
2066
Michael Chanb090ae22006-01-23 16:07:10 -08002067 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2068 return -EIO;
2069
Michael Chanb6016b72005-05-26 13:03:09 -07002070 return 0;
2071}
2072
Michael Chan59b47d82006-11-19 14:10:45 -08002073static int
2074bnx2_init_5709_context(struct bnx2 *bp)
2075{
2076 int i, ret = 0;
2077 u32 val;
2078
2079 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080 val |= (BCM_PAGE_BITS - 8) << 16;
2081 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07002082 for (i = 0; i < 10; i++) {
2083 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2085 break;
2086 udelay(2);
2087 }
2088 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2089 return -EBUSY;
2090
Michael Chan59b47d82006-11-19 14:10:45 -08002091 for (i = 0; i < bp->ctx_pages; i++) {
2092 int j;
2093
2094 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098 (u64) bp->ctx_blk_mapping[i] >> 32);
2099 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101 for (j = 0; j < 10; j++) {
2102
2103 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2105 break;
2106 udelay(5);
2107 }
2108 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2109 ret = -EBUSY;
2110 break;
2111 }
2112 }
2113 return ret;
2114}
2115
Michael Chanb6016b72005-05-26 13:03:09 -07002116static void
2117bnx2_init_context(struct bnx2 *bp)
2118{
2119 u32 vcid;
2120
2121 vcid = 96;
2122 while (vcid) {
2123 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07002124 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002125
2126 vcid--;
2127
2128 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2129 u32 new_vcid;
2130
2131 vcid_addr = GET_PCID_ADDR(vcid);
2132 if (vcid & 0x8) {
2133 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2134 }
2135 else {
2136 new_vcid = vcid;
2137 }
2138 pcid_addr = GET_PCID_ADDR(new_vcid);
2139 }
2140 else {
2141 vcid_addr = GET_CID_ADDR(vcid);
2142 pcid_addr = vcid_addr;
2143 }
2144
Michael Chan7947b202007-06-04 21:17:10 -07002145 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146 vcid_addr += (i << PHY_CTX_SHIFT);
2147 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07002148
Michael Chan5d5d0012007-12-12 11:17:43 -08002149 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
Michael Chan7947b202007-06-04 21:17:10 -07002150 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2151
2152 /* Zero out the context. */
2153 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
Michael Chan5d5d0012007-12-12 11:17:43 -08002154 CTX_WR(bp, vcid_addr, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002155 }
Michael Chanb6016b72005-05-26 13:03:09 -07002156 }
2157}
2158
2159static int
2160bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2161{
2162 u16 *good_mbuf;
2163 u32 good_mbuf_cnt;
2164 u32 val;
2165
2166 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167 if (good_mbuf == NULL) {
2168 printk(KERN_ERR PFX "Failed to allocate memory in "
2169 "bnx2_alloc_bad_rbuf\n");
2170 return -ENOMEM;
2171 }
2172
2173 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2175
2176 good_mbuf_cnt = 0;
2177
2178 /* Allocate a bunch of mbufs and save the good ones in an array. */
2179 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2182
2183 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2184
2185 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2186
2187 /* The addresses with Bit 9 set are bad memory blocks. */
2188 if (!(val & (1 << 9))) {
2189 good_mbuf[good_mbuf_cnt] = (u16) val;
2190 good_mbuf_cnt++;
2191 }
2192
2193 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2194 }
2195
2196 /* Free the good ones back to the mbuf pool thus discarding
2197 * all the bad ones. */
2198 while (good_mbuf_cnt) {
2199 good_mbuf_cnt--;
2200
2201 val = good_mbuf[good_mbuf_cnt];
2202 val = (val << 9) | val | 1;
2203
2204 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2205 }
2206 kfree(good_mbuf);
2207 return 0;
2208}
2209
2210static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002211bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07002212{
2213 u32 val;
2214 u8 *mac_addr = bp->dev->dev_addr;
2215
2216 val = (mac_addr[0] << 8) | mac_addr[1];
2217
2218 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2219
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002220 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07002221 (mac_addr[4] << 8) | mac_addr[5];
2222
2223 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2224}
2225
2226static inline int
Michael Chan47bf4242007-12-12 11:19:12 -08002227bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2228{
2229 dma_addr_t mapping;
2230 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231 struct rx_bd *rxbd =
2232 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233 struct page *page = alloc_page(GFP_ATOMIC);
2234
2235 if (!page)
2236 return -ENOMEM;
2237 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238 PCI_DMA_FROMDEVICE);
2239 rx_pg->page = page;
2240 pci_unmap_addr_set(rx_pg, mapping, mapping);
2241 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2243 return 0;
2244}
2245
2246static void
2247bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2248{
2249 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250 struct page *page = rx_pg->page;
2251
2252 if (!page)
2253 return;
2254
2255 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256 PCI_DMA_FROMDEVICE);
2257
2258 __free_page(page);
2259 rx_pg->page = NULL;
2260}
2261
2262static inline int
Michael Chanb6016b72005-05-26 13:03:09 -07002263bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2264{
2265 struct sk_buff *skb;
2266 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2267 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08002268 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07002269 unsigned long align;
2270
Michael Chan932f3772006-08-15 01:39:36 -07002271 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07002272 if (skb == NULL) {
2273 return -ENOMEM;
2274 }
2275
Michael Chan59b47d82006-11-19 14:10:45 -08002276 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07002278
Michael Chanb6016b72005-05-26 13:03:09 -07002279 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280 PCI_DMA_FROMDEVICE);
2281
2282 rx_buf->skb = skb;
2283 pci_unmap_addr_set(rx_buf, mapping, mapping);
2284
2285 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2287
2288 bp->rx_prod_bseq += bp->rx_buf_use_size;
2289
2290 return 0;
2291}
2292
Michael Chanda3e4fb2007-05-03 13:24:23 -07002293static int
2294bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2295{
2296 struct status_block *sblk = bp->status_blk;
2297 u32 new_link_state, old_link_state;
2298 int is_set = 1;
2299
2300 new_link_state = sblk->status_attn_bits & event;
2301 old_link_state = sblk->status_attn_bits_ack & event;
2302 if (new_link_state != old_link_state) {
2303 if (new_link_state)
2304 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2305 else
2306 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2307 } else
2308 is_set = 0;
2309
2310 return is_set;
2311}
2312
Michael Chanb6016b72005-05-26 13:03:09 -07002313static void
2314bnx2_phy_int(struct bnx2 *bp)
2315{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002316 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002318 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002319 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002320 }
Michael Chan0d8a6572007-07-07 22:49:43 -07002321 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322 bnx2_set_remote_link(bp);
2323
Michael Chanb6016b72005-05-26 13:03:09 -07002324}
2325
Michael Chanead72702007-12-20 19:55:39 -08002326static inline u16
2327bnx2_get_hw_tx_cons(struct bnx2 *bp)
2328{
2329 u16 cons;
2330
2331 cons = bp->status_blk->status_tx_quick_consumer_index0;
2332
2333 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2334 cons++;
2335 return cons;
2336}
2337
Michael Chanb6016b72005-05-26 13:03:09 -07002338static void
2339bnx2_tx_int(struct bnx2 *bp)
2340{
2341 u16 hw_cons, sw_cons, sw_ring_cons;
2342 int tx_free_bd = 0;
2343
Michael Chanead72702007-12-20 19:55:39 -08002344 hw_cons = bnx2_get_hw_tx_cons(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002345 sw_cons = bp->tx_cons;
2346
2347 while (sw_cons != hw_cons) {
2348 struct sw_bd *tx_buf;
2349 struct sk_buff *skb;
2350 int i, last;
2351
2352 sw_ring_cons = TX_RING_IDX(sw_cons);
2353
2354 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2355 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002356
Michael Chanb6016b72005-05-26 13:03:09 -07002357 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002358 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002359 u16 last_idx, last_ring_idx;
2360
2361 last_idx = sw_cons +
2362 skb_shinfo(skb)->nr_frags + 1;
2363 last_ring_idx = sw_ring_cons +
2364 skb_shinfo(skb)->nr_frags + 1;
2365 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2366 last_idx++;
2367 }
2368 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2369 break;
2370 }
2371 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002372
Michael Chanb6016b72005-05-26 13:03:09 -07002373 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2374 skb_headlen(skb), PCI_DMA_TODEVICE);
2375
2376 tx_buf->skb = NULL;
2377 last = skb_shinfo(skb)->nr_frags;
2378
2379 for (i = 0; i < last; i++) {
2380 sw_cons = NEXT_TX_BD(sw_cons);
2381
2382 pci_unmap_page(bp->pdev,
2383 pci_unmap_addr(
2384 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2385 mapping),
2386 skb_shinfo(skb)->frags[i].size,
2387 PCI_DMA_TODEVICE);
2388 }
2389
2390 sw_cons = NEXT_TX_BD(sw_cons);
2391
2392 tx_free_bd += last + 1;
2393
Michael Chan745720e2006-06-29 12:37:41 -07002394 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002395
Michael Chanead72702007-12-20 19:55:39 -08002396 hw_cons = bnx2_get_hw_tx_cons(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002397 }
2398
Michael Chanead72702007-12-20 19:55:39 -08002399 bp->hw_tx_cons = hw_cons;
Michael Chane89bbf12005-08-25 15:36:58 -07002400 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002401 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2402 * before checking for netif_queue_stopped(). Without the
2403 * memory barrier, there is a small possibility that bnx2_start_xmit()
2404 * will miss it and cause the queue to be stopped forever.
2405 */
2406 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002407
Michael Chan2f8af122006-08-15 01:39:10 -07002408 if (unlikely(netif_queue_stopped(bp->dev)) &&
2409 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2410 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002411 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002412 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002413 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002414 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002415 }
Michael Chanb6016b72005-05-26 13:03:09 -07002416}
2417
Michael Chan1db82f22007-12-12 11:19:35 -08002418static void
2419bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2420{
2421 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2422 struct rx_bd *cons_bd, *prod_bd;
2423 dma_addr_t mapping;
2424 int i;
2425 u16 hw_prod = bp->rx_pg_prod, prod;
2426 u16 cons = bp->rx_pg_cons;
2427
2428 for (i = 0; i < count; i++) {
2429 prod = RX_PG_RING_IDX(hw_prod);
2430
2431 prod_rx_pg = &bp->rx_pg_ring[prod];
2432 cons_rx_pg = &bp->rx_pg_ring[cons];
2433 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2434 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2435
2436 if (i == 0 && skb) {
2437 struct page *page;
2438 struct skb_shared_info *shinfo;
2439
2440 shinfo = skb_shinfo(skb);
2441 shinfo->nr_frags--;
2442 page = shinfo->frags[shinfo->nr_frags].page;
2443 shinfo->frags[shinfo->nr_frags].page = NULL;
2444 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2445 PCI_DMA_FROMDEVICE);
2446 cons_rx_pg->page = page;
2447 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2448 dev_kfree_skb(skb);
2449 }
2450 if (prod != cons) {
2451 prod_rx_pg->page = cons_rx_pg->page;
2452 cons_rx_pg->page = NULL;
2453 pci_unmap_addr_set(prod_rx_pg, mapping,
2454 pci_unmap_addr(cons_rx_pg, mapping));
2455
2456 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2457 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2458
2459 }
2460 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2461 hw_prod = NEXT_RX_BD(hw_prod);
2462 }
2463 bp->rx_pg_prod = hw_prod;
2464 bp->rx_pg_cons = cons;
2465}
2466
Michael Chanb6016b72005-05-26 13:03:09 -07002467static inline void
2468bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2469 u16 cons, u16 prod)
2470{
Michael Chan236b6392006-03-20 17:49:02 -08002471 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2472 struct rx_bd *cons_bd, *prod_bd;
2473
2474 cons_rx_buf = &bp->rx_buf_ring[cons];
2475 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002476
2477 pci_dma_sync_single_for_device(bp->pdev,
2478 pci_unmap_addr(cons_rx_buf, mapping),
2479 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2480
Michael Chan236b6392006-03-20 17:49:02 -08002481 bp->rx_prod_bseq += bp->rx_buf_use_size;
2482
2483 prod_rx_buf->skb = skb;
2484
2485 if (cons == prod)
2486 return;
2487
Michael Chanb6016b72005-05-26 13:03:09 -07002488 pci_unmap_addr_set(prod_rx_buf, mapping,
2489 pci_unmap_addr(cons_rx_buf, mapping));
2490
Michael Chan3fdfcc22006-03-20 17:49:49 -08002491 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2492 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002493 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2494 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002495}
2496
Michael Chan85833c62007-12-12 11:17:01 -08002497static int
2498bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
Michael Chan1db82f22007-12-12 11:19:35 -08002499 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
Michael Chan85833c62007-12-12 11:17:01 -08002500{
2501 int err;
2502 u16 prod = ring_idx & 0xffff;
2503
2504 err = bnx2_alloc_rx_skb(bp, prod);
2505 if (unlikely(err)) {
2506 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
Michael Chan1db82f22007-12-12 11:19:35 -08002507 if (hdr_len) {
2508 unsigned int raw_len = len + 4;
2509 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2510
2511 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2512 }
Michael Chan85833c62007-12-12 11:17:01 -08002513 return err;
2514 }
2515
2516 skb_reserve(skb, bp->rx_offset);
2517 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2518 PCI_DMA_FROMDEVICE);
2519
Michael Chan1db82f22007-12-12 11:19:35 -08002520 if (hdr_len == 0) {
2521 skb_put(skb, len);
2522 return 0;
2523 } else {
2524 unsigned int i, frag_len, frag_size, pages;
2525 struct sw_pg *rx_pg;
2526 u16 pg_cons = bp->rx_pg_cons;
2527 u16 pg_prod = bp->rx_pg_prod;
2528
2529 frag_size = len + 4 - hdr_len;
2530 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2531 skb_put(skb, hdr_len);
2532
2533 for (i = 0; i < pages; i++) {
2534 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2535 if (unlikely(frag_len <= 4)) {
2536 unsigned int tail = 4 - frag_len;
2537
2538 bp->rx_pg_cons = pg_cons;
2539 bp->rx_pg_prod = pg_prod;
2540 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2541 skb->len -= tail;
2542 if (i == 0) {
2543 skb->tail -= tail;
2544 } else {
2545 skb_frag_t *frag =
2546 &skb_shinfo(skb)->frags[i - 1];
2547 frag->size -= tail;
2548 skb->data_len -= tail;
2549 skb->truesize -= tail;
2550 }
2551 return 0;
2552 }
2553 rx_pg = &bp->rx_pg_ring[pg_cons];
2554
2555 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2556 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2557
2558 if (i == pages - 1)
2559 frag_len -= 4;
2560
2561 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2562 rx_pg->page = NULL;
2563
2564 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2565 if (unlikely(err)) {
2566 bp->rx_pg_cons = pg_cons;
2567 bp->rx_pg_prod = pg_prod;
2568 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2569 return err;
2570 }
2571
2572 frag_size -= frag_len;
2573 skb->data_len += frag_len;
2574 skb->truesize += frag_len;
2575 skb->len += frag_len;
2576
2577 pg_prod = NEXT_RX_BD(pg_prod);
2578 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2579 }
2580 bp->rx_pg_prod = pg_prod;
2581 bp->rx_pg_cons = pg_cons;
2582 }
Michael Chan85833c62007-12-12 11:17:01 -08002583 return 0;
2584}
2585
Michael Chanc09c2622007-12-10 17:18:37 -08002586static inline u16
2587bnx2_get_hw_rx_cons(struct bnx2 *bp)
2588{
2589 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2590
2591 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2592 cons++;
2593 return cons;
2594}
2595
Michael Chanb6016b72005-05-26 13:03:09 -07002596static int
2597bnx2_rx_int(struct bnx2 *bp, int budget)
2598{
2599 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2600 struct l2_fhdr *rx_hdr;
Michael Chan1db82f22007-12-12 11:19:35 -08002601 int rx_pkt = 0, pg_ring_used = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002602
Michael Chanc09c2622007-12-10 17:18:37 -08002603 hw_cons = bnx2_get_hw_rx_cons(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002604 sw_cons = bp->rx_cons;
2605 sw_prod = bp->rx_prod;
2606
2607 /* Memory barrier necessary as speculative reads of the rx
2608 * buffer can be ahead of the index in the status block
2609 */
2610 rmb();
2611 while (sw_cons != hw_cons) {
Michael Chan1db82f22007-12-12 11:19:35 -08002612 unsigned int len, hdr_len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002613 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002614 struct sw_bd *rx_buf;
2615 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002616 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002617
2618 sw_ring_cons = RX_RING_IDX(sw_cons);
2619 sw_ring_prod = RX_RING_IDX(sw_prod);
2620
2621 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2622 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002623
2624 rx_buf->skb = NULL;
2625
2626 dma_addr = pci_unmap_addr(rx_buf, mapping);
2627
2628 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002629 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2630
2631 rx_hdr = (struct l2_fhdr *) skb->data;
Michael Chan1db82f22007-12-12 11:19:35 -08002632 len = rx_hdr->l2_fhdr_pkt_len;
Michael Chanb6016b72005-05-26 13:03:09 -07002633
Michael Chanade2bfe2006-01-23 16:09:51 -08002634 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002635 (L2_FHDR_ERRORS_BAD_CRC |
2636 L2_FHDR_ERRORS_PHY_DECODE |
2637 L2_FHDR_ERRORS_ALIGNMENT |
2638 L2_FHDR_ERRORS_TOO_SHORT |
2639 L2_FHDR_ERRORS_GIANT_FRAME)) {
2640
Michael Chan85833c62007-12-12 11:17:01 -08002641 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2642 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002643 }
Michael Chan1db82f22007-12-12 11:19:35 -08002644 hdr_len = 0;
2645 if (status & L2_FHDR_STATUS_SPLIT) {
2646 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2647 pg_ring_used = 1;
2648 } else if (len > bp->rx_jumbo_thresh) {
2649 hdr_len = bp->rx_jumbo_thresh;
2650 pg_ring_used = 1;
2651 }
2652
2653 len -= 4;
Michael Chanb6016b72005-05-26 13:03:09 -07002654
Michael Chan5d5d0012007-12-12 11:17:43 -08002655 if (len <= bp->rx_copy_thresh) {
Michael Chanb6016b72005-05-26 13:03:09 -07002656 struct sk_buff *new_skb;
2657
Michael Chan932f3772006-08-15 01:39:36 -07002658 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chan85833c62007-12-12 11:17:01 -08002659 if (new_skb == NULL) {
2660 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2661 sw_ring_prod);
2662 goto next_rx;
2663 }
Michael Chanb6016b72005-05-26 13:03:09 -07002664
2665 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002666 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2667 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002668 skb_reserve(new_skb, 2);
2669 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002670
2671 bnx2_reuse_rx_skb(bp, skb,
2672 sw_ring_cons, sw_ring_prod);
2673
2674 skb = new_skb;
Michael Chan1db82f22007-12-12 11:19:35 -08002675 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
Michael Chan85833c62007-12-12 11:17:01 -08002676 (sw_ring_cons << 16) | sw_ring_prod)))
Michael Chanb6016b72005-05-26 13:03:09 -07002677 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002678
2679 skb->protocol = eth_type_trans(skb, bp->dev);
2680
2681 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002682 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002683
Michael Chan745720e2006-06-29 12:37:41 -07002684 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002685 goto next_rx;
2686
2687 }
2688
Michael Chanb6016b72005-05-26 13:03:09 -07002689 skb->ip_summed = CHECKSUM_NONE;
2690 if (bp->rx_csum &&
2691 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2692 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2693
Michael Chanade2bfe2006-01-23 16:09:51 -08002694 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2695 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002696 skb->ip_summed = CHECKSUM_UNNECESSARY;
2697 }
2698
2699#ifdef BCM_VLAN
2700 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2701 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2702 rx_hdr->l2_fhdr_vlan_tag);
2703 }
2704 else
2705#endif
2706 netif_receive_skb(skb);
2707
2708 bp->dev->last_rx = jiffies;
2709 rx_pkt++;
2710
2711next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002712 sw_cons = NEXT_RX_BD(sw_cons);
2713 sw_prod = NEXT_RX_BD(sw_prod);
2714
2715 if ((rx_pkt == budget))
2716 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002717
2718 /* Refresh hw_cons to see if there is new work */
2719 if (sw_cons == hw_cons) {
Michael Chanc09c2622007-12-10 17:18:37 -08002720 hw_cons = bnx2_get_hw_rx_cons(bp);
Michael Chanf4e418f2005-11-04 08:53:48 -08002721 rmb();
2722 }
Michael Chanb6016b72005-05-26 13:03:09 -07002723 }
2724 bp->rx_cons = sw_cons;
2725 bp->rx_prod = sw_prod;
2726
Michael Chan1db82f22007-12-12 11:19:35 -08002727 if (pg_ring_used)
2728 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2729 bp->rx_pg_prod);
2730
Michael Chanb6016b72005-05-26 13:03:09 -07002731 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2732
2733 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2734
2735 mmiowb();
2736
2737 return rx_pkt;
2738
2739}
2740
2741/* MSI ISR - The only difference between this and the INTx ISR
2742 * is that the MSI interrupt is always serviced.
2743 */
2744static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002745bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002746{
2747 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002748 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002749
Michael Chanc921e4c2005-09-08 13:15:32 -07002750 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002751 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2752 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2753 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2754
2755 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002756 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2757 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002758
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002759 netif_rx_schedule(dev, &bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07002760
Michael Chan73eef4c2005-08-25 15:39:15 -07002761 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002762}
2763
2764static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002765bnx2_msi_1shot(int irq, void *dev_instance)
2766{
2767 struct net_device *dev = dev_instance;
2768 struct bnx2 *bp = netdev_priv(dev);
2769
2770 prefetch(bp->status_blk);
2771
2772 /* Return here if interrupt is disabled. */
2773 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2774 return IRQ_HANDLED;
2775
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002776 netif_rx_schedule(dev, &bp->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07002777
2778 return IRQ_HANDLED;
2779}
2780
2781static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002782bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002783{
2784 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002785 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002786 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002787
2788 /* When using INTx, it is possible for the interrupt to arrive
2789 * at the CPU before the status block posted prior to the
2790 * interrupt. Reading a register will flush the status block.
2791 * When using MSI, the MSI message will always complete after
2792 * the status block write.
2793 */
Michael Chanb8a7ce72007-07-07 22:51:03 -07002794 if ((sblk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002795 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2796 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002797 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002798
2799 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2800 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2801 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2802
Michael Chanb8a7ce72007-07-07 22:51:03 -07002803 /* Read back to deassert IRQ immediately to avoid too many
2804 * spurious interrupts.
2805 */
2806 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2807
Michael Chanb6016b72005-05-26 13:03:09 -07002808 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002809 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2810 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002811
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002812 if (netif_rx_schedule_prep(dev, &bp->napi)) {
Michael Chanb8a7ce72007-07-07 22:51:03 -07002813 bp->last_status_idx = sblk->status_idx;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002814 __netif_rx_schedule(dev, &bp->napi);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002815 }
Michael Chanb6016b72005-05-26 13:03:09 -07002816
Michael Chan73eef4c2005-08-25 15:39:15 -07002817 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002818}
2819
Michael Chan0d8a6572007-07-07 22:49:43 -07002820#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2821 STATUS_ATTN_BITS_TIMER_ABORT)
Michael Chanda3e4fb2007-05-03 13:24:23 -07002822
Michael Chanf4e418f2005-11-04 08:53:48 -08002823static inline int
2824bnx2_has_work(struct bnx2 *bp)
2825{
2826 struct status_block *sblk = bp->status_blk;
2827
Michael Chanc09c2622007-12-10 17:18:37 -08002828 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
Michael Chanead72702007-12-20 19:55:39 -08002829 (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons))
Michael Chanf4e418f2005-11-04 08:53:48 -08002830 return 1;
2831
Michael Chanda3e4fb2007-05-03 13:24:23 -07002832 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2833 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002834 return 1;
2835
2836 return 0;
2837}
2838
David S. Miller6f535762007-10-11 18:08:29 -07002839static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
Michael Chanb6016b72005-05-26 13:03:09 -07002840{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002841 struct status_block *sblk = bp->status_blk;
2842 u32 status_attn_bits = sblk->status_attn_bits;
2843 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002844
Michael Chanda3e4fb2007-05-03 13:24:23 -07002845 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2846 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002847
Michael Chanb6016b72005-05-26 13:03:09 -07002848 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002849
2850 /* This is needed to take care of transient status
2851 * during link changes.
2852 */
2853 REG_WR(bp, BNX2_HC_COMMAND,
2854 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2855 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002856 }
2857
Michael Chanead72702007-12-20 19:55:39 -08002858 if (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002859 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002860
Michael Chanc09c2622007-12-10 17:18:37 -08002861 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
David S. Miller6f535762007-10-11 18:08:29 -07002862 work_done += bnx2_rx_int(bp, budget - work_done);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002863
David S. Miller6f535762007-10-11 18:08:29 -07002864 return work_done;
2865}
Michael Chanf4e418f2005-11-04 08:53:48 -08002866
David S. Miller6f535762007-10-11 18:08:29 -07002867static int bnx2_poll(struct napi_struct *napi, int budget)
2868{
2869 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2870 int work_done = 0;
Michael Chan6dee6422007-10-12 01:40:38 -07002871 struct status_block *sblk = bp->status_blk;
David S. Miller6f535762007-10-11 18:08:29 -07002872
2873 while (1) {
2874 work_done = bnx2_poll_work(bp, work_done, budget);
2875
2876 if (unlikely(work_done >= budget))
2877 break;
2878
Michael Chan6dee6422007-10-12 01:40:38 -07002879 /* bp->last_status_idx is used below to tell the hw how
2880 * much work has been processed, so we must read it before
2881 * checking for more work.
2882 */
2883 bp->last_status_idx = sblk->status_idx;
2884 rmb();
David S. Miller6f535762007-10-11 18:08:29 -07002885 if (likely(!bnx2_has_work(bp))) {
David S. Miller6f535762007-10-11 18:08:29 -07002886 netif_rx_complete(bp->dev, napi);
2887 if (likely(bp->flags & USING_MSI_FLAG)) {
2888 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2889 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2890 bp->last_status_idx);
Michael Chan6dee6422007-10-12 01:40:38 -07002891 break;
David S. Miller6f535762007-10-11 18:08:29 -07002892 }
2893 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2894 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2895 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2896 bp->last_status_idx);
2897
Michael Chan1269a8a2006-01-23 16:11:03 -08002898 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2899 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2900 bp->last_status_idx);
David S. Miller6f535762007-10-11 18:08:29 -07002901 break;
Michael Chan1269a8a2006-01-23 16:11:03 -08002902 }
Michael Chanb6016b72005-05-26 13:03:09 -07002903 }
2904
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002905 return work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002906}
2907
Herbert Xu932ff272006-06-09 12:20:56 -07002908/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002909 * from set_multicast.
2910 */
2911static void
2912bnx2_set_rx_mode(struct net_device *dev)
2913{
Michael Chan972ec0d2006-01-23 16:12:43 -08002914 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002915 u32 rx_mode, sort_mode;
2916 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002917
Michael Chanc770a652005-08-25 15:38:39 -07002918 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002919
2920 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2921 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2922 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2923#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002924 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002925 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002926#else
Michael Chane29054f2006-01-23 16:06:06 -08002927 if (!(bp->flags & ASF_ENABLE_FLAG))
2928 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002929#endif
2930 if (dev->flags & IFF_PROMISC) {
2931 /* Promiscuous mode. */
2932 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002933 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2934 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002935 }
2936 else if (dev->flags & IFF_ALLMULTI) {
2937 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2938 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2939 0xffffffff);
2940 }
2941 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2942 }
2943 else {
2944 /* Accept one or more multicast(s). */
2945 struct dev_mc_list *mclist;
2946 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2947 u32 regidx;
2948 u32 bit;
2949 u32 crc;
2950
2951 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2952
2953 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2954 i++, mclist = mclist->next) {
2955
2956 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2957 bit = crc & 0xff;
2958 regidx = (bit & 0xe0) >> 5;
2959 bit &= 0x1f;
2960 mc_filter[regidx] |= (1 << bit);
2961 }
2962
2963 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2964 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2965 mc_filter[i]);
2966 }
2967
2968 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2969 }
2970
2971 if (rx_mode != bp->rx_mode) {
2972 bp->rx_mode = rx_mode;
2973 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2974 }
2975
2976 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2977 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2978 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2979
Michael Chanc770a652005-08-25 15:38:39 -07002980 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002981}
2982
2983static void
2984load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2985 u32 rv2p_proc)
2986{
2987 int i;
2988 u32 val;
2989
2990
2991 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002992 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002993 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002994 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002995 rv2p_code++;
2996
2997 if (rv2p_proc == RV2P_PROC1) {
2998 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2999 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3000 }
3001 else {
3002 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3003 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3004 }
3005 }
3006
3007 /* Reset the processor, un-stall is done later. */
3008 if (rv2p_proc == RV2P_PROC1) {
3009 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3010 }
3011 else {
3012 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3013 }
3014}
3015
Michael Chanaf3ee512006-11-19 14:09:25 -08003016static int
Michael Chanb6016b72005-05-26 13:03:09 -07003017load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3018{
3019 u32 offset;
3020 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08003021 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003022
3023 /* Halt the CPU. */
3024 val = REG_RD_IND(bp, cpu_reg->mode);
3025 val |= cpu_reg->mode_value_halt;
3026 REG_WR_IND(bp, cpu_reg->mode, val);
3027 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3028
3029 /* Load the Text area. */
3030 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08003031 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07003032 int j;
3033
Michael Chanea1f8d52007-10-02 16:27:35 -07003034 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3035 fw->gz_text_len);
3036 if (rc < 0)
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003037 return rc;
Michael Chanea1f8d52007-10-02 16:27:35 -07003038
Michael Chanb6016b72005-05-26 13:03:09 -07003039 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003040 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07003041 }
3042 }
3043
3044 /* Load the Data area. */
3045 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3046 if (fw->data) {
3047 int j;
3048
3049 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3050 REG_WR_IND(bp, offset, fw->data[j]);
3051 }
3052 }
3053
3054 /* Load the SBSS area. */
3055 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07003056 if (fw->sbss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07003057 int j;
3058
3059 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003060 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003061 }
3062 }
3063
3064 /* Load the BSS area. */
3065 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07003066 if (fw->bss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07003067 int j;
3068
3069 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003070 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003071 }
3072 }
3073
3074 /* Load the Read-Only area. */
3075 offset = cpu_reg->spad_base +
3076 (fw->rodata_addr - cpu_reg->mips_view_base);
3077 if (fw->rodata) {
3078 int j;
3079
3080 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3081 REG_WR_IND(bp, offset, fw->rodata[j]);
3082 }
3083 }
3084
3085 /* Clear the pre-fetch instruction. */
3086 REG_WR_IND(bp, cpu_reg->inst, 0);
3087 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3088
3089 /* Start the CPU. */
3090 val = REG_RD_IND(bp, cpu_reg->mode);
3091 val &= ~cpu_reg->mode_value_halt;
3092 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3093 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08003094
3095 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003096}
3097
Michael Chanfba9fe92006-06-12 22:21:25 -07003098static int
Michael Chanb6016b72005-05-26 13:03:09 -07003099bnx2_init_cpus(struct bnx2 *bp)
3100{
3101 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08003102 struct fw_info *fw;
Michael Chan110d0ef2007-12-12 11:18:34 -08003103 int rc, rv2p_len;
3104 void *text, *rv2p;
Michael Chanb6016b72005-05-26 13:03:09 -07003105
3106 /* Initialize the RV2P processor. */
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003107 text = vmalloc(FW_BUF_SIZE);
3108 if (!text)
3109 return -ENOMEM;
Michael Chan110d0ef2007-12-12 11:18:34 -08003110 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3111 rv2p = bnx2_xi_rv2p_proc1;
3112 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3113 } else {
3114 rv2p = bnx2_rv2p_proc1;
3115 rv2p_len = sizeof(bnx2_rv2p_proc1);
3116 }
3117 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
Michael Chanea1f8d52007-10-02 16:27:35 -07003118 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07003119 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07003120
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003121 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
Michael Chanfba9fe92006-06-12 22:21:25 -07003122
Michael Chan110d0ef2007-12-12 11:18:34 -08003123 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3124 rv2p = bnx2_xi_rv2p_proc2;
3125 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3126 } else {
3127 rv2p = bnx2_rv2p_proc2;
3128 rv2p_len = sizeof(bnx2_rv2p_proc2);
3129 }
3130 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
Michael Chanea1f8d52007-10-02 16:27:35 -07003131 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07003132 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07003133
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003134 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07003135
3136 /* Initialize the RX Processor. */
3137 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3138 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3139 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3140 cpu_reg.state = BNX2_RXP_CPU_STATE;
3141 cpu_reg.state_value_clear = 0xffffff;
3142 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3143 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3144 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3145 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3146 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3147 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3148 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003149
Michael Chand43584c2006-11-19 14:14:35 -08003150 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3151 fw = &bnx2_rxp_fw_09;
3152 else
3153 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003154
Michael Chanea1f8d52007-10-02 16:27:35 -07003155 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003156 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003157 if (rc)
3158 goto init_cpu_err;
3159
Michael Chanb6016b72005-05-26 13:03:09 -07003160 /* Initialize the TX Processor. */
3161 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3162 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3163 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3164 cpu_reg.state = BNX2_TXP_CPU_STATE;
3165 cpu_reg.state_value_clear = 0xffffff;
3166 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3167 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3168 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3169 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3170 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3171 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3172 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003173
Michael Chand43584c2006-11-19 14:14:35 -08003174 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3175 fw = &bnx2_txp_fw_09;
3176 else
3177 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003178
Michael Chanea1f8d52007-10-02 16:27:35 -07003179 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003180 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003181 if (rc)
3182 goto init_cpu_err;
3183
Michael Chanb6016b72005-05-26 13:03:09 -07003184 /* Initialize the TX Patch-up Processor. */
3185 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3186 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3187 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3188 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3189 cpu_reg.state_value_clear = 0xffffff;
3190 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3191 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3192 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3193 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3194 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3195 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3196 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003197
Michael Chand43584c2006-11-19 14:14:35 -08003198 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3199 fw = &bnx2_tpat_fw_09;
3200 else
3201 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003202
Michael Chanea1f8d52007-10-02 16:27:35 -07003203 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003204 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003205 if (rc)
3206 goto init_cpu_err;
3207
Michael Chanb6016b72005-05-26 13:03:09 -07003208 /* Initialize the Completion Processor. */
3209 cpu_reg.mode = BNX2_COM_CPU_MODE;
3210 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3211 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3212 cpu_reg.state = BNX2_COM_CPU_STATE;
3213 cpu_reg.state_value_clear = 0xffffff;
3214 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3215 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3216 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3217 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3218 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3219 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3220 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003221
Michael Chand43584c2006-11-19 14:14:35 -08003222 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3223 fw = &bnx2_com_fw_09;
3224 else
3225 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003226
Michael Chanea1f8d52007-10-02 16:27:35 -07003227 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003228 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003229 if (rc)
3230 goto init_cpu_err;
3231
Michael Chand43584c2006-11-19 14:14:35 -08003232 /* Initialize the Command Processor. */
3233 cpu_reg.mode = BNX2_CP_CPU_MODE;
3234 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3235 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3236 cpu_reg.state = BNX2_CP_CPU_STATE;
3237 cpu_reg.state_value_clear = 0xffffff;
3238 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3239 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3240 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3241 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3242 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3243 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3244 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07003245
Michael Chan110d0ef2007-12-12 11:18:34 -08003246 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Michael Chand43584c2006-11-19 14:14:35 -08003247 fw = &bnx2_cp_fw_09;
Michael Chan110d0ef2007-12-12 11:18:34 -08003248 else
3249 fw = &bnx2_cp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003250
Michael Chan110d0ef2007-12-12 11:18:34 -08003251 fw->text = text;
3252 rc = load_cpu_fw(bp, &cpu_reg, fw);
3253
Michael Chanfba9fe92006-06-12 22:21:25 -07003254init_cpu_err:
Michael Chanea1f8d52007-10-02 16:27:35 -07003255 vfree(text);
Michael Chanfba9fe92006-06-12 22:21:25 -07003256 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003257}
3258
3259static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07003260bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07003261{
3262 u16 pmcsr;
3263
3264 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3265
3266 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07003267 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07003268 u32 val;
3269
3270 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3271 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3272 PCI_PM_CTRL_PME_STATUS);
3273
3274 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3275 /* delay required during transition out of D3hot */
3276 msleep(20);
3277
3278 val = REG_RD(bp, BNX2_EMAC_MODE);
3279 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3280 val &= ~BNX2_EMAC_MODE_MPKT;
3281 REG_WR(bp, BNX2_EMAC_MODE, val);
3282
3283 val = REG_RD(bp, BNX2_RPM_CONFIG);
3284 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3285 REG_WR(bp, BNX2_RPM_CONFIG, val);
3286 break;
3287 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07003288 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07003289 int i;
3290 u32 val, wol_msg;
3291
3292 if (bp->wol) {
3293 u32 advertising;
3294 u8 autoneg;
3295
3296 autoneg = bp->autoneg;
3297 advertising = bp->advertising;
3298
Michael Chan239cd342007-10-17 19:26:15 -07003299 if (bp->phy_port == PORT_TP) {
3300 bp->autoneg = AUTONEG_SPEED;
3301 bp->advertising = ADVERTISED_10baseT_Half |
3302 ADVERTISED_10baseT_Full |
3303 ADVERTISED_100baseT_Half |
3304 ADVERTISED_100baseT_Full |
3305 ADVERTISED_Autoneg;
3306 }
Michael Chanb6016b72005-05-26 13:03:09 -07003307
Michael Chan239cd342007-10-17 19:26:15 -07003308 spin_lock_bh(&bp->phy_lock);
3309 bnx2_setup_phy(bp, bp->phy_port);
3310 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003311
3312 bp->autoneg = autoneg;
3313 bp->advertising = advertising;
3314
3315 bnx2_set_mac_addr(bp);
3316
3317 val = REG_RD(bp, BNX2_EMAC_MODE);
3318
3319 /* Enable port mode. */
3320 val &= ~BNX2_EMAC_MODE_PORT;
Michael Chan239cd342007-10-17 19:26:15 -07003321 val |= BNX2_EMAC_MODE_MPKT_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003322 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003323 BNX2_EMAC_MODE_MPKT;
Michael Chan239cd342007-10-17 19:26:15 -07003324 if (bp->phy_port == PORT_TP)
3325 val |= BNX2_EMAC_MODE_PORT_MII;
3326 else {
3327 val |= BNX2_EMAC_MODE_PORT_GMII;
3328 if (bp->line_speed == SPEED_2500)
3329 val |= BNX2_EMAC_MODE_25G_MODE;
3330 }
Michael Chanb6016b72005-05-26 13:03:09 -07003331
3332 REG_WR(bp, BNX2_EMAC_MODE, val);
3333
3334 /* receive all multicast */
3335 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3336 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3337 0xffffffff);
3338 }
3339 REG_WR(bp, BNX2_EMAC_RX_MODE,
3340 BNX2_EMAC_RX_MODE_SORT_MODE);
3341
3342 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3343 BNX2_RPM_SORT_USER0_MC_EN;
3344 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3345 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3346 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3347 BNX2_RPM_SORT_USER0_ENA);
3348
3349 /* Need to enable EMAC and RPM for WOL. */
3350 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3351 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3352 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3353 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3354
3355 val = REG_RD(bp, BNX2_RPM_CONFIG);
3356 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3357 REG_WR(bp, BNX2_RPM_CONFIG, val);
3358
3359 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3360 }
3361 else {
3362 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3363 }
3364
Michael Chandda1e392006-01-23 16:08:14 -08003365 if (!(bp->flags & NO_WOL_FLAG))
3366 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003367
3368 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3369 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3370 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3371
3372 if (bp->wol)
3373 pmcsr |= 3;
3374 }
3375 else {
3376 pmcsr |= 3;
3377 }
3378 if (bp->wol) {
3379 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3380 }
3381 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3382 pmcsr);
3383
3384 /* No more memory access after this point until
3385 * device is brought back to D0.
3386 */
3387 udelay(50);
3388 break;
3389 }
3390 default:
3391 return -EINVAL;
3392 }
3393 return 0;
3394}
3395
3396static int
3397bnx2_acquire_nvram_lock(struct bnx2 *bp)
3398{
3399 u32 val;
3400 int j;
3401
3402 /* Request access to the flash interface. */
3403 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3404 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3405 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3406 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3407 break;
3408
3409 udelay(5);
3410 }
3411
3412 if (j >= NVRAM_TIMEOUT_COUNT)
3413 return -EBUSY;
3414
3415 return 0;
3416}
3417
3418static int
3419bnx2_release_nvram_lock(struct bnx2 *bp)
3420{
3421 int j;
3422 u32 val;
3423
3424 /* Relinquish nvram interface. */
3425 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3426
3427 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3428 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3429 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3430 break;
3431
3432 udelay(5);
3433 }
3434
3435 if (j >= NVRAM_TIMEOUT_COUNT)
3436 return -EBUSY;
3437
3438 return 0;
3439}
3440
3441
3442static int
3443bnx2_enable_nvram_write(struct bnx2 *bp)
3444{
3445 u32 val;
3446
3447 val = REG_RD(bp, BNX2_MISC_CFG);
3448 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3449
Michael Chane30372c2007-07-16 18:26:23 -07003450 if (bp->flash_info->flags & BNX2_NV_WREN) {
Michael Chanb6016b72005-05-26 13:03:09 -07003451 int j;
3452
3453 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3454 REG_WR(bp, BNX2_NVM_COMMAND,
3455 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3456
3457 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3458 udelay(5);
3459
3460 val = REG_RD(bp, BNX2_NVM_COMMAND);
3461 if (val & BNX2_NVM_COMMAND_DONE)
3462 break;
3463 }
3464
3465 if (j >= NVRAM_TIMEOUT_COUNT)
3466 return -EBUSY;
3467 }
3468 return 0;
3469}
3470
3471static void
3472bnx2_disable_nvram_write(struct bnx2 *bp)
3473{
3474 u32 val;
3475
3476 val = REG_RD(bp, BNX2_MISC_CFG);
3477 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3478}
3479
3480
3481static void
3482bnx2_enable_nvram_access(struct bnx2 *bp)
3483{
3484 u32 val;
3485
3486 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3487 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003488 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003489 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3490}
3491
3492static void
3493bnx2_disable_nvram_access(struct bnx2 *bp)
3494{
3495 u32 val;
3496
3497 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3498 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003499 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003500 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3501 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3502}
3503
3504static int
3505bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3506{
3507 u32 cmd;
3508 int j;
3509
Michael Chane30372c2007-07-16 18:26:23 -07003510 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
Michael Chanb6016b72005-05-26 13:03:09 -07003511 /* Buffered flash, no erase needed */
3512 return 0;
3513
3514 /* Build an erase command */
3515 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3516 BNX2_NVM_COMMAND_DOIT;
3517
3518 /* Need to clear DONE bit separately. */
3519 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3520
3521 /* Address of the NVRAM to read from. */
3522 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3523
3524 /* Issue an erase command. */
3525 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3526
3527 /* Wait for completion. */
3528 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3529 u32 val;
3530
3531 udelay(5);
3532
3533 val = REG_RD(bp, BNX2_NVM_COMMAND);
3534 if (val & BNX2_NVM_COMMAND_DONE)
3535 break;
3536 }
3537
3538 if (j >= NVRAM_TIMEOUT_COUNT)
3539 return -EBUSY;
3540
3541 return 0;
3542}
3543
3544static int
3545bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3546{
3547 u32 cmd;
3548 int j;
3549
3550 /* Build the command word. */
3551 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3552
Michael Chane30372c2007-07-16 18:26:23 -07003553 /* Calculate an offset of a buffered flash, not needed for 5709. */
3554 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003555 offset = ((offset / bp->flash_info->page_size) <<
3556 bp->flash_info->page_bits) +
3557 (offset % bp->flash_info->page_size);
3558 }
3559
3560 /* Need to clear DONE bit separately. */
3561 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3562
3563 /* Address of the NVRAM to read from. */
3564 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3565
3566 /* Issue a read command. */
3567 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3568
3569 /* Wait for completion. */
3570 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3571 u32 val;
3572
3573 udelay(5);
3574
3575 val = REG_RD(bp, BNX2_NVM_COMMAND);
3576 if (val & BNX2_NVM_COMMAND_DONE) {
3577 val = REG_RD(bp, BNX2_NVM_READ);
3578
3579 val = be32_to_cpu(val);
3580 memcpy(ret_val, &val, 4);
3581 break;
3582 }
3583 }
3584 if (j >= NVRAM_TIMEOUT_COUNT)
3585 return -EBUSY;
3586
3587 return 0;
3588}
3589
3590
3591static int
3592bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3593{
3594 u32 cmd, val32;
3595 int j;
3596
3597 /* Build the command word. */
3598 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3599
Michael Chane30372c2007-07-16 18:26:23 -07003600 /* Calculate an offset of a buffered flash, not needed for 5709. */
3601 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003602 offset = ((offset / bp->flash_info->page_size) <<
3603 bp->flash_info->page_bits) +
3604 (offset % bp->flash_info->page_size);
3605 }
3606
3607 /* Need to clear DONE bit separately. */
3608 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3609
3610 memcpy(&val32, val, 4);
3611 val32 = cpu_to_be32(val32);
3612
3613 /* Write the data. */
3614 REG_WR(bp, BNX2_NVM_WRITE, val32);
3615
3616 /* Address of the NVRAM to write to. */
3617 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3618
3619 /* Issue the write command. */
3620 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3621
3622 /* Wait for completion. */
3623 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3624 udelay(5);
3625
3626 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3627 break;
3628 }
3629 if (j >= NVRAM_TIMEOUT_COUNT)
3630 return -EBUSY;
3631
3632 return 0;
3633}
3634
3635static int
3636bnx2_init_nvram(struct bnx2 *bp)
3637{
3638 u32 val;
Michael Chane30372c2007-07-16 18:26:23 -07003639 int j, entry_count, rc = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003640 struct flash_spec *flash;
3641
Michael Chane30372c2007-07-16 18:26:23 -07003642 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3643 bp->flash_info = &flash_5709;
3644 goto get_flash_size;
3645 }
3646
Michael Chanb6016b72005-05-26 13:03:09 -07003647 /* Determine the selected interface. */
3648 val = REG_RD(bp, BNX2_NVM_CFG1);
3649
Denis Chengff8ac602007-09-02 18:30:18 +08003650 entry_count = ARRAY_SIZE(flash_table);
Michael Chanb6016b72005-05-26 13:03:09 -07003651
Michael Chanb6016b72005-05-26 13:03:09 -07003652 if (val & 0x40000000) {
3653
3654 /* Flash interface has been reconfigured */
3655 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003656 j++, flash++) {
3657 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3658 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003659 bp->flash_info = flash;
3660 break;
3661 }
3662 }
3663 }
3664 else {
Michael Chan37137702005-11-04 08:49:17 -08003665 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003666 /* Not yet been reconfigured */
3667
Michael Chan37137702005-11-04 08:49:17 -08003668 if (val & (1 << 23))
3669 mask = FLASH_BACKUP_STRAP_MASK;
3670 else
3671 mask = FLASH_STRAP_MASK;
3672
Michael Chanb6016b72005-05-26 13:03:09 -07003673 for (j = 0, flash = &flash_table[0]; j < entry_count;
3674 j++, flash++) {
3675
Michael Chan37137702005-11-04 08:49:17 -08003676 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003677 bp->flash_info = flash;
3678
3679 /* Request access to the flash interface. */
3680 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3681 return rc;
3682
3683 /* Enable access to flash interface */
3684 bnx2_enable_nvram_access(bp);
3685
3686 /* Reconfigure the flash interface */
3687 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3688 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3689 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3690 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3691
3692 /* Disable access to flash interface */
3693 bnx2_disable_nvram_access(bp);
3694 bnx2_release_nvram_lock(bp);
3695
3696 break;
3697 }
3698 }
3699 } /* if (val & 0x40000000) */
3700
3701 if (j == entry_count) {
3702 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003703 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003704 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003705 }
3706
Michael Chane30372c2007-07-16 18:26:23 -07003707get_flash_size:
Michael Chan1122db72006-01-23 16:11:42 -08003708 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3709 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3710 if (val)
3711 bp->flash_size = val;
3712 else
3713 bp->flash_size = bp->flash_info->total_size;
3714
Michael Chanb6016b72005-05-26 13:03:09 -07003715 return rc;
3716}
3717
3718static int
3719bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3720 int buf_size)
3721{
3722 int rc = 0;
3723 u32 cmd_flags, offset32, len32, extra;
3724
3725 if (buf_size == 0)
3726 return 0;
3727
3728 /* Request access to the flash interface. */
3729 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3730 return rc;
3731
3732 /* Enable access to flash interface */
3733 bnx2_enable_nvram_access(bp);
3734
3735 len32 = buf_size;
3736 offset32 = offset;
3737 extra = 0;
3738
3739 cmd_flags = 0;
3740
3741 if (offset32 & 3) {
3742 u8 buf[4];
3743 u32 pre_len;
3744
3745 offset32 &= ~3;
3746 pre_len = 4 - (offset & 3);
3747
3748 if (pre_len >= len32) {
3749 pre_len = len32;
3750 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3751 BNX2_NVM_COMMAND_LAST;
3752 }
3753 else {
3754 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3755 }
3756
3757 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3758
3759 if (rc)
3760 return rc;
3761
3762 memcpy(ret_buf, buf + (offset & 3), pre_len);
3763
3764 offset32 += 4;
3765 ret_buf += pre_len;
3766 len32 -= pre_len;
3767 }
3768 if (len32 & 3) {
3769 extra = 4 - (len32 & 3);
3770 len32 = (len32 + 4) & ~3;
3771 }
3772
3773 if (len32 == 4) {
3774 u8 buf[4];
3775
3776 if (cmd_flags)
3777 cmd_flags = BNX2_NVM_COMMAND_LAST;
3778 else
3779 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3780 BNX2_NVM_COMMAND_LAST;
3781
3782 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3783
3784 memcpy(ret_buf, buf, 4 - extra);
3785 }
3786 else if (len32 > 0) {
3787 u8 buf[4];
3788
3789 /* Read the first word. */
3790 if (cmd_flags)
3791 cmd_flags = 0;
3792 else
3793 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3794
3795 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3796
3797 /* Advance to the next dword. */
3798 offset32 += 4;
3799 ret_buf += 4;
3800 len32 -= 4;
3801
3802 while (len32 > 4 && rc == 0) {
3803 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3804
3805 /* Advance to the next dword. */
3806 offset32 += 4;
3807 ret_buf += 4;
3808 len32 -= 4;
3809 }
3810
3811 if (rc)
3812 return rc;
3813
3814 cmd_flags = BNX2_NVM_COMMAND_LAST;
3815 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3816
3817 memcpy(ret_buf, buf, 4 - extra);
3818 }
3819
3820 /* Disable access to flash interface */
3821 bnx2_disable_nvram_access(bp);
3822
3823 bnx2_release_nvram_lock(bp);
3824
3825 return rc;
3826}
3827
3828static int
3829bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3830 int buf_size)
3831{
3832 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003833 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003834 int rc = 0;
3835 int align_start, align_end;
3836
3837 buf = data_buf;
3838 offset32 = offset;
3839 len32 = buf_size;
3840 align_start = align_end = 0;
3841
3842 if ((align_start = (offset32 & 3))) {
3843 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003844 len32 += align_start;
3845 if (len32 < 4)
3846 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003847 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3848 return rc;
3849 }
3850
3851 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003852 align_end = 4 - (len32 & 3);
3853 len32 += align_end;
3854 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3855 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003856 }
3857
3858 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003859 align_buf = kmalloc(len32, GFP_KERNEL);
3860 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003861 return -ENOMEM;
3862 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003863 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003864 }
3865 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003866 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003867 }
Michael Chane6be7632007-01-08 19:56:13 -08003868 memcpy(align_buf + align_start, data_buf, buf_size);
3869 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003870 }
3871
Michael Chane30372c2007-07-16 18:26:23 -07003872 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanae181bc2006-05-22 16:39:20 -07003873 flash_buffer = kmalloc(264, GFP_KERNEL);
3874 if (flash_buffer == NULL) {
3875 rc = -ENOMEM;
3876 goto nvram_write_end;
3877 }
3878 }
3879
Michael Chanb6016b72005-05-26 13:03:09 -07003880 written = 0;
3881 while ((written < len32) && (rc == 0)) {
3882 u32 page_start, page_end, data_start, data_end;
3883 u32 addr, cmd_flags;
3884 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003885
3886 /* Find the page_start addr */
3887 page_start = offset32 + written;
3888 page_start -= (page_start % bp->flash_info->page_size);
3889 /* Find the page_end addr */
3890 page_end = page_start + bp->flash_info->page_size;
3891 /* Find the data_start addr */
3892 data_start = (written == 0) ? offset32 : page_start;
3893 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003894 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003895 (offset32 + len32) : page_end;
3896
3897 /* Request access to the flash interface. */
3898 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3899 goto nvram_write_end;
3900
3901 /* Enable access to flash interface */
3902 bnx2_enable_nvram_access(bp);
3903
3904 cmd_flags = BNX2_NVM_COMMAND_FIRST;
Michael Chane30372c2007-07-16 18:26:23 -07003905 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003906 int j;
3907
3908 /* Read the whole page into the buffer
3909 * (non-buffer flash only) */
3910 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3911 if (j == (bp->flash_info->page_size - 4)) {
3912 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3913 }
3914 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003915 page_start + j,
3916 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003917 cmd_flags);
3918
3919 if (rc)
3920 goto nvram_write_end;
3921
3922 cmd_flags = 0;
3923 }
3924 }
3925
3926 /* Enable writes to flash interface (unlock write-protect) */
3927 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3928 goto nvram_write_end;
3929
Michael Chanb6016b72005-05-26 13:03:09 -07003930 /* Loop to write back the buffer data from page_start to
3931 * data_start */
3932 i = 0;
Michael Chane30372c2007-07-16 18:26:23 -07003933 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanc8738792007-03-30 14:53:06 -07003934 /* Erase the page */
3935 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3936 goto nvram_write_end;
3937
3938 /* Re-enable the write again for the actual write */
3939 bnx2_enable_nvram_write(bp);
3940
Michael Chanb6016b72005-05-26 13:03:09 -07003941 for (addr = page_start; addr < data_start;
3942 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003943
Michael Chanb6016b72005-05-26 13:03:09 -07003944 rc = bnx2_nvram_write_dword(bp, addr,
3945 &flash_buffer[i], cmd_flags);
3946
3947 if (rc != 0)
3948 goto nvram_write_end;
3949
3950 cmd_flags = 0;
3951 }
3952 }
3953
3954 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003955 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003956 if ((addr == page_end - 4) ||
Michael Chane30372c2007-07-16 18:26:23 -07003957 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
Michael Chanb6016b72005-05-26 13:03:09 -07003958 (addr == data_end - 4))) {
3959
3960 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3961 }
3962 rc = bnx2_nvram_write_dword(bp, addr, buf,
3963 cmd_flags);
3964
3965 if (rc != 0)
3966 goto nvram_write_end;
3967
3968 cmd_flags = 0;
3969 buf += 4;
3970 }
3971
3972 /* Loop to write back the buffer data from data_end
3973 * to page_end */
Michael Chane30372c2007-07-16 18:26:23 -07003974 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003975 for (addr = data_end; addr < page_end;
3976 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003977
Michael Chanb6016b72005-05-26 13:03:09 -07003978 if (addr == page_end-4) {
3979 cmd_flags = BNX2_NVM_COMMAND_LAST;
3980 }
3981 rc = bnx2_nvram_write_dword(bp, addr,
3982 &flash_buffer[i], cmd_flags);
3983
3984 if (rc != 0)
3985 goto nvram_write_end;
3986
3987 cmd_flags = 0;
3988 }
3989 }
3990
3991 /* Disable writes to flash interface (lock write-protect) */
3992 bnx2_disable_nvram_write(bp);
3993
3994 /* Disable access to flash interface */
3995 bnx2_disable_nvram_access(bp);
3996 bnx2_release_nvram_lock(bp);
3997
3998 /* Increment written */
3999 written += data_end - data_start;
4000 }
4001
4002nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08004003 kfree(flash_buffer);
4004 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07004005 return rc;
4006}
4007
Michael Chan0d8a6572007-07-07 22:49:43 -07004008static void
4009bnx2_init_remote_phy(struct bnx2 *bp)
4010{
4011 u32 val;
4012
4013 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4014 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4015 return;
4016
4017 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4018 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4019 return;
4020
4021 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
Michael Chan0d8a6572007-07-07 22:49:43 -07004022 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4023
4024 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4025 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4026 bp->phy_port = PORT_FIBRE;
4027 else
4028 bp->phy_port = PORT_TP;
Michael Chan489310a2007-10-10 16:16:31 -07004029
4030 if (netif_running(bp->dev)) {
4031 u32 sig;
4032
4033 if (val & BNX2_LINK_STATUS_LINK_UP) {
4034 bp->link_up = 1;
4035 netif_carrier_on(bp->dev);
4036 } else {
4037 bp->link_up = 0;
4038 netif_carrier_off(bp->dev);
4039 }
4040 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4041 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4042 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4043 sig);
4044 }
Michael Chan0d8a6572007-07-07 22:49:43 -07004045 }
4046}
4047
Michael Chanb6016b72005-05-26 13:03:09 -07004048static int
4049bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4050{
4051 u32 val;
4052 int i, rc = 0;
Michael Chan489310a2007-10-10 16:16:31 -07004053 u8 old_port;
Michael Chanb6016b72005-05-26 13:03:09 -07004054
4055 /* Wait for the current PCI transaction to complete before
4056 * issuing a reset. */
4057 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4058 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4059 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4060 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4061 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4062 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4063 udelay(5);
4064
Michael Chanb090ae22006-01-23 16:07:10 -08004065 /* Wait for the firmware to tell us it is ok to issue a reset. */
4066 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4067
Michael Chanb6016b72005-05-26 13:03:09 -07004068 /* Deposit a driver reset signature so the firmware knows that
4069 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08004070 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07004071 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4072
Michael Chanb6016b72005-05-26 13:03:09 -07004073 /* Do a dummy read to force the chip to complete all current transaction
4074 * before we issue a reset. */
4075 val = REG_RD(bp, BNX2_MISC_ID);
4076
Michael Chan234754d2006-11-19 14:11:41 -08004077 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4078 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4079 REG_RD(bp, BNX2_MISC_COMMAND);
4080 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07004081
Michael Chan234754d2006-11-19 14:11:41 -08004082 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4083 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07004084
Michael Chan234754d2006-11-19 14:11:41 -08004085 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004086
Michael Chan234754d2006-11-19 14:11:41 -08004087 } else {
4088 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4089 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4090 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4091
4092 /* Chip reset. */
4093 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4094
Michael Chan594a9df2007-08-28 15:39:42 -07004095 /* Reading back any register after chip reset will hang the
4096 * bus on 5706 A0 and A1. The msleep below provides plenty
4097 * of margin for write posting.
4098 */
Michael Chan234754d2006-11-19 14:11:41 -08004099 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
Arjan van de Ven8e545882007-08-28 14:34:43 -07004100 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4101 msleep(20);
Michael Chanb6016b72005-05-26 13:03:09 -07004102
Michael Chan234754d2006-11-19 14:11:41 -08004103 /* Reset takes approximate 30 usec */
4104 for (i = 0; i < 10; i++) {
4105 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4106 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4107 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4108 break;
4109 udelay(10);
4110 }
4111
4112 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4113 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4114 printk(KERN_ERR PFX "Chip reset did not complete\n");
4115 return -EBUSY;
4116 }
Michael Chanb6016b72005-05-26 13:03:09 -07004117 }
4118
4119 /* Make sure byte swapping is properly configured. */
4120 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4121 if (val != 0x01020304) {
4122 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4123 return -ENODEV;
4124 }
4125
Michael Chanb6016b72005-05-26 13:03:09 -07004126 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08004127 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4128 if (rc)
4129 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004130
Michael Chan0d8a6572007-07-07 22:49:43 -07004131 spin_lock_bh(&bp->phy_lock);
Michael Chan489310a2007-10-10 16:16:31 -07004132 old_port = bp->phy_port;
Michael Chan0d8a6572007-07-07 22:49:43 -07004133 bnx2_init_remote_phy(bp);
Michael Chan489310a2007-10-10 16:16:31 -07004134 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
Michael Chan0d8a6572007-07-07 22:49:43 -07004135 bnx2_set_default_remote_link(bp);
4136 spin_unlock_bh(&bp->phy_lock);
4137
Michael Chanb6016b72005-05-26 13:03:09 -07004138 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4139 /* Adjust the voltage regular to two steps lower. The default
4140 * of this register is 0x0000000e. */
4141 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4142
4143 /* Remove bad rbuf memory from the free pool. */
4144 rc = bnx2_alloc_bad_rbuf(bp);
4145 }
4146
4147 return rc;
4148}
4149
4150static int
4151bnx2_init_chip(struct bnx2 *bp)
4152{
4153 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08004154 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004155
4156 /* Make sure the interrupt is not active. */
4157 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4158
4159 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4160 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4161#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004162 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07004163#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004164 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07004165 DMA_READ_CHANS << 12 |
4166 DMA_WRITE_CHANS << 16;
4167
4168 val |= (0x2 << 20) | (1 << 11);
4169
Michael Chandda1e392006-01-23 16:08:14 -08004170 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07004171 val |= (1 << 23);
4172
4173 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4174 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4175 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4176
4177 REG_WR(bp, BNX2_DMA_CONFIG, val);
4178
4179 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4180 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4181 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4182 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4183 }
4184
4185 if (bp->flags & PCIX_FLAG) {
4186 u16 val16;
4187
4188 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4189 &val16);
4190 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4191 val16 & ~PCI_X_CMD_ERO);
4192 }
4193
4194 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4195 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4196 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4197 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4198
4199 /* Initialize context mapping and zero out the quick contexts. The
4200 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07004201 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4202 rc = bnx2_init_5709_context(bp);
4203 if (rc)
4204 return rc;
4205 } else
Michael Chan59b47d82006-11-19 14:10:45 -08004206 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004207
Michael Chanfba9fe92006-06-12 22:21:25 -07004208 if ((rc = bnx2_init_cpus(bp)) != 0)
4209 return rc;
4210
Michael Chanb6016b72005-05-26 13:03:09 -07004211 bnx2_init_nvram(bp);
4212
4213 bnx2_set_mac_addr(bp);
4214
4215 val = REG_RD(bp, BNX2_MQ_CONFIG);
4216 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4217 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07004218 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4219 val |= BNX2_MQ_CONFIG_HALT_DIS;
4220
Michael Chanb6016b72005-05-26 13:03:09 -07004221 REG_WR(bp, BNX2_MQ_CONFIG, val);
4222
4223 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4224 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4225 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4226
4227 val = (BCM_PAGE_BITS - 8) << 24;
4228 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4229
4230 /* Configure page size. */
4231 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4232 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4233 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4234 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4235
4236 val = bp->mac_addr[0] +
4237 (bp->mac_addr[1] << 8) +
4238 (bp->mac_addr[2] << 16) +
4239 bp->mac_addr[3] +
4240 (bp->mac_addr[4] << 8) +
4241 (bp->mac_addr[5] << 16);
4242 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4243
4244 /* Program the MTU. Also include 4 bytes for CRC32. */
4245 val = bp->dev->mtu + ETH_HLEN + 4;
4246 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4247 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4248 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4249
4250 bp->last_status_idx = 0;
4251 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4252
4253 /* Set up how to generate a link change interrupt. */
4254 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4255
4256 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4257 (u64) bp->status_blk_mapping & 0xffffffff);
4258 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4259
4260 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4261 (u64) bp->stats_blk_mapping & 0xffffffff);
4262 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4263 (u64) bp->stats_blk_mapping >> 32);
4264
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004265 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07004266 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4267
4268 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4269 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4270
4271 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4272 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4273
4274 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4275
4276 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4277
4278 REG_WR(bp, BNX2_HC_COM_TICKS,
4279 (bp->com_ticks_int << 16) | bp->com_ticks);
4280
4281 REG_WR(bp, BNX2_HC_CMD_TICKS,
4282 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4283
Michael Chan02537b062007-06-04 21:24:07 -07004284 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4285 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4286 else
Michael Chan7ea69202007-07-16 18:27:10 -07004287 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
Michael Chanb6016b72005-05-26 13:03:09 -07004288 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4289
4290 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07004291 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004292 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004293 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4294 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004295 }
4296
Michael Chan8e6a72c2007-05-03 13:24:48 -07004297 if (bp->flags & ONE_SHOT_MSI_FLAG)
4298 val |= BNX2_HC_CONFIG_ONE_SHOT;
4299
4300 REG_WR(bp, BNX2_HC_CONFIG, val);
4301
Michael Chanb6016b72005-05-26 13:03:09 -07004302 /* Clear internal stats counters. */
4303 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4304
Michael Chanda3e4fb2007-05-03 13:24:23 -07004305 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07004306
4307 /* Initialize the receive filter. */
4308 bnx2_set_rx_mode(bp->dev);
4309
Michael Chan0aa38df2007-06-04 21:23:06 -07004310 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4311 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4312 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4313 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4314 }
Michael Chanb090ae22006-01-23 16:07:10 -08004315 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4316 0);
Michael Chanb6016b72005-05-26 13:03:09 -07004317
Michael Chandf149d72007-07-07 22:51:36 -07004318 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
Michael Chanb6016b72005-05-26 13:03:09 -07004319 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4320
4321 udelay(20);
4322
Michael Chanbf5295b2006-03-23 01:11:56 -08004323 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4324
Michael Chanb090ae22006-01-23 16:07:10 -08004325 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004326}
4327
Michael Chan59b47d82006-11-19 14:10:45 -08004328static void
4329bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4330{
4331 u32 val, offset0, offset1, offset2, offset3;
4332
4333 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4334 offset0 = BNX2_L2CTX_TYPE_XI;
4335 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4336 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4337 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4338 } else {
4339 offset0 = BNX2_L2CTX_TYPE;
4340 offset1 = BNX2_L2CTX_CMD_TYPE;
4341 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4342 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4343 }
4344 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4345 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4346
4347 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4348 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4349
4350 val = (u64) bp->tx_desc_mapping >> 32;
4351 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4352
4353 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4354 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4355}
Michael Chanb6016b72005-05-26 13:03:09 -07004356
4357static void
4358bnx2_init_tx_ring(struct bnx2 *bp)
4359{
4360 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08004361 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07004362
Michael Chan2f8af122006-08-15 01:39:10 -07004363 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4364
Michael Chanb6016b72005-05-26 13:03:09 -07004365 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004366
Michael Chanb6016b72005-05-26 13:03:09 -07004367 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4368 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4369
4370 bp->tx_prod = 0;
4371 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08004372 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004373 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004374
Michael Chan59b47d82006-11-19 14:10:45 -08004375 cid = TX_CID;
4376 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4377 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07004378
Michael Chan59b47d82006-11-19 14:10:45 -08004379 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07004380}
4381
4382static void
Michael Chan5d5d0012007-12-12 11:17:43 -08004383bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4384 int num_rings)
Michael Chanb6016b72005-05-26 13:03:09 -07004385{
Michael Chanb6016b72005-05-26 13:03:09 -07004386 int i;
Michael Chan5d5d0012007-12-12 11:17:43 -08004387 struct rx_bd *rxbd;
Michael Chanb6016b72005-05-26 13:03:09 -07004388
Michael Chan5d5d0012007-12-12 11:17:43 -08004389 for (i = 0; i < num_rings; i++) {
Michael Chan13daffa2006-03-20 17:49:20 -08004390 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07004391
Michael Chan5d5d0012007-12-12 11:17:43 -08004392 rxbd = &rx_ring[i][0];
Michael Chan13daffa2006-03-20 17:49:20 -08004393 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
Michael Chan5d5d0012007-12-12 11:17:43 -08004394 rxbd->rx_bd_len = buf_size;
Michael Chan13daffa2006-03-20 17:49:20 -08004395 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4396 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004397 if (i == (num_rings - 1))
Michael Chan13daffa2006-03-20 17:49:20 -08004398 j = 0;
4399 else
4400 j = i + 1;
Michael Chan5d5d0012007-12-12 11:17:43 -08004401 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4402 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
Michael Chan13daffa2006-03-20 17:49:20 -08004403 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004404}
4405
4406static void
4407bnx2_init_rx_ring(struct bnx2 *bp)
4408{
4409 int i;
4410 u16 prod, ring_prod;
4411 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4412
4413 bp->rx_prod = 0;
4414 bp->rx_cons = 0;
4415 bp->rx_prod_bseq = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08004416 bp->rx_pg_prod = 0;
4417 bp->rx_pg_cons = 0;
Michael Chan5d5d0012007-12-12 11:17:43 -08004418
4419 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4420 bp->rx_buf_use_size, bp->rx_max_ring);
4421
4422 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
Michael Chan47bf4242007-12-12 11:19:12 -08004423 if (bp->rx_pg_ring_size) {
4424 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4425 bp->rx_pg_desc_mapping,
4426 PAGE_SIZE, bp->rx_max_pg_ring);
4427 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4428 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4429 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4430 BNX2_L2CTX_RBDC_JUMBO_KEY);
4431
4432 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4433 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4434
4435 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4436 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4437
4438 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4439 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4440 }
Michael Chanb6016b72005-05-26 13:03:09 -07004441
4442 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4443 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4444 val |= 0x02 << 8;
Michael Chan5d5d0012007-12-12 11:17:43 -08004445 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004446
Michael Chan13daffa2006-03-20 17:49:20 -08004447 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chan5d5d0012007-12-12 11:17:43 -08004448 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004449
Michael Chan13daffa2006-03-20 17:49:20 -08004450 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chan5d5d0012007-12-12 11:17:43 -08004451 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004452
Michael Chan47bf4242007-12-12 11:19:12 -08004453 ring_prod = prod = bp->rx_pg_prod;
4454 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4455 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4456 break;
4457 prod = NEXT_RX_BD(prod);
4458 ring_prod = RX_PG_RING_IDX(prod);
4459 }
4460 bp->rx_pg_prod = prod;
4461
Michael Chan5d5d0012007-12-12 11:17:43 -08004462 ring_prod = prod = bp->rx_prod;
Michael Chan236b6392006-03-20 17:49:02 -08004463 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004464 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4465 break;
4466 }
4467 prod = NEXT_RX_BD(prod);
4468 ring_prod = RX_RING_IDX(prod);
4469 }
4470 bp->rx_prod = prod;
4471
Michael Chan47bf4242007-12-12 11:19:12 -08004472 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
Michael Chanb6016b72005-05-26 13:03:09 -07004473 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4474
4475 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4476}
4477
Michael Chan5d5d0012007-12-12 11:17:43 -08004478static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
Michael Chan13daffa2006-03-20 17:49:20 -08004479{
Michael Chan5d5d0012007-12-12 11:17:43 -08004480 u32 max, num_rings = 1;
Michael Chan13daffa2006-03-20 17:49:20 -08004481
Michael Chan5d5d0012007-12-12 11:17:43 -08004482 while (ring_size > MAX_RX_DESC_CNT) {
4483 ring_size -= MAX_RX_DESC_CNT;
Michael Chan13daffa2006-03-20 17:49:20 -08004484 num_rings++;
4485 }
4486 /* round to next power of 2 */
Michael Chan5d5d0012007-12-12 11:17:43 -08004487 max = max_size;
Michael Chan13daffa2006-03-20 17:49:20 -08004488 while ((max & num_rings) == 0)
4489 max >>= 1;
4490
4491 if (num_rings != max)
4492 max <<= 1;
4493
Michael Chan5d5d0012007-12-12 11:17:43 -08004494 return max;
4495}
4496
4497static void
4498bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4499{
Michael Chan84eaa182007-12-12 11:19:57 -08004500 u32 rx_size, rx_space, jumbo_size;
Michael Chan5d5d0012007-12-12 11:17:43 -08004501
4502 /* 8 for CRC and VLAN */
4503 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4504
Michael Chan84eaa182007-12-12 11:19:57 -08004505 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4506 sizeof(struct skb_shared_info);
4507
Michael Chan5d5d0012007-12-12 11:17:43 -08004508 bp->rx_copy_thresh = RX_COPY_THRESH;
Michael Chan47bf4242007-12-12 11:19:12 -08004509 bp->rx_pg_ring_size = 0;
4510 bp->rx_max_pg_ring = 0;
4511 bp->rx_max_pg_ring_idx = 0;
Michael Chan84eaa182007-12-12 11:19:57 -08004512 if (rx_space > PAGE_SIZE) {
4513 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4514
4515 jumbo_size = size * pages;
4516 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4517 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4518
4519 bp->rx_pg_ring_size = jumbo_size;
4520 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4521 MAX_RX_PG_RINGS);
4522 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4523 rx_size = RX_COPY_THRESH + bp->rx_offset;
4524 bp->rx_copy_thresh = 0;
4525 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004526
4527 bp->rx_buf_use_size = rx_size;
4528 /* hw alignment */
4529 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chan1db82f22007-12-12 11:19:35 -08004530 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
Michael Chan5d5d0012007-12-12 11:17:43 -08004531 bp->rx_ring_size = size;
4532 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
Michael Chan13daffa2006-03-20 17:49:20 -08004533 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4534}
4535
4536static void
Michael Chanb6016b72005-05-26 13:03:09 -07004537bnx2_free_tx_skbs(struct bnx2 *bp)
4538{
4539 int i;
4540
4541 if (bp->tx_buf_ring == NULL)
4542 return;
4543
4544 for (i = 0; i < TX_DESC_CNT; ) {
4545 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4546 struct sk_buff *skb = tx_buf->skb;
4547 int j, last;
4548
4549 if (skb == NULL) {
4550 i++;
4551 continue;
4552 }
4553
4554 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4555 skb_headlen(skb), PCI_DMA_TODEVICE);
4556
4557 tx_buf->skb = NULL;
4558
4559 last = skb_shinfo(skb)->nr_frags;
4560 for (j = 0; j < last; j++) {
4561 tx_buf = &bp->tx_buf_ring[i + j + 1];
4562 pci_unmap_page(bp->pdev,
4563 pci_unmap_addr(tx_buf, mapping),
4564 skb_shinfo(skb)->frags[j].size,
4565 PCI_DMA_TODEVICE);
4566 }
Michael Chan745720e2006-06-29 12:37:41 -07004567 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004568 i += j + 1;
4569 }
4570
4571}
4572
4573static void
4574bnx2_free_rx_skbs(struct bnx2 *bp)
4575{
4576 int i;
4577
4578 if (bp->rx_buf_ring == NULL)
4579 return;
4580
Michael Chan13daffa2006-03-20 17:49:20 -08004581 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004582 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4583 struct sk_buff *skb = rx_buf->skb;
4584
Michael Chan05d0f1c2005-11-04 08:53:48 -08004585 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004586 continue;
4587
4588 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4589 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4590
4591 rx_buf->skb = NULL;
4592
Michael Chan745720e2006-06-29 12:37:41 -07004593 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004594 }
Michael Chan47bf4242007-12-12 11:19:12 -08004595 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4596 bnx2_free_rx_page(bp, i);
Michael Chanb6016b72005-05-26 13:03:09 -07004597}
4598
4599static void
4600bnx2_free_skbs(struct bnx2 *bp)
4601{
4602 bnx2_free_tx_skbs(bp);
4603 bnx2_free_rx_skbs(bp);
4604}
4605
4606static int
4607bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4608{
4609 int rc;
4610
4611 rc = bnx2_reset_chip(bp, reset_code);
4612 bnx2_free_skbs(bp);
4613 if (rc)
4614 return rc;
4615
Michael Chanfba9fe92006-06-12 22:21:25 -07004616 if ((rc = bnx2_init_chip(bp)) != 0)
4617 return rc;
4618
Michael Chanb6016b72005-05-26 13:03:09 -07004619 bnx2_init_tx_ring(bp);
4620 bnx2_init_rx_ring(bp);
4621 return 0;
4622}
4623
4624static int
4625bnx2_init_nic(struct bnx2 *bp)
4626{
4627 int rc;
4628
4629 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4630 return rc;
4631
Michael Chan80be4432006-11-19 14:07:28 -08004632 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004633 bnx2_init_phy(bp);
4634 bnx2_set_link(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07004635 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004636 return 0;
4637}
4638
4639static int
4640bnx2_test_registers(struct bnx2 *bp)
4641{
4642 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004643 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004644 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004645 u16 offset;
4646 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004647#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004648 u32 rw_mask;
4649 u32 ro_mask;
4650 } reg_tbl[] = {
4651 { 0x006c, 0, 0x00000000, 0x0000003f },
4652 { 0x0090, 0, 0xffffffff, 0x00000000 },
4653 { 0x0094, 0, 0x00000000, 0x00000000 },
4654
Michael Chan5bae30c2007-05-03 13:18:46 -07004655 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4656 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4657 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4658 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4659 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4660 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4661 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4662 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4663 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004664
Michael Chan5bae30c2007-05-03 13:18:46 -07004665 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4666 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4667 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4668 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4669 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4670 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004671
Michael Chan5bae30c2007-05-03 13:18:46 -07004672 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4673 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4674 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004675
4676 { 0x1000, 0, 0x00000000, 0x00000001 },
4677 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004678
4679 { 0x1408, 0, 0x01c00800, 0x00000000 },
4680 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4681 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004682 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004683 { 0x14b0, 0, 0x00000002, 0x00000001 },
4684 { 0x14b8, 0, 0x00000000, 0x00000000 },
4685 { 0x14c0, 0, 0x00000000, 0x00000009 },
4686 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4687 { 0x14cc, 0, 0x00000000, 0x00000001 },
4688 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004689
4690 { 0x1800, 0, 0x00000000, 0x00000001 },
4691 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004692
4693 { 0x2800, 0, 0x00000000, 0x00000001 },
4694 { 0x2804, 0, 0x00000000, 0x00003f01 },
4695 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4696 { 0x2810, 0, 0xffff0000, 0x00000000 },
4697 { 0x2814, 0, 0xffff0000, 0x00000000 },
4698 { 0x2818, 0, 0xffff0000, 0x00000000 },
4699 { 0x281c, 0, 0xffff0000, 0x00000000 },
4700 { 0x2834, 0, 0xffffffff, 0x00000000 },
4701 { 0x2840, 0, 0x00000000, 0xffffffff },
4702 { 0x2844, 0, 0x00000000, 0xffffffff },
4703 { 0x2848, 0, 0xffffffff, 0x00000000 },
4704 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4705
4706 { 0x2c00, 0, 0x00000000, 0x00000011 },
4707 { 0x2c04, 0, 0x00000000, 0x00030007 },
4708
Michael Chanb6016b72005-05-26 13:03:09 -07004709 { 0x3c00, 0, 0x00000000, 0x00000001 },
4710 { 0x3c04, 0, 0x00000000, 0x00070000 },
4711 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4712 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4713 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4714 { 0x3c14, 0, 0x00000000, 0xffffffff },
4715 { 0x3c18, 0, 0x00000000, 0xffffffff },
4716 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4717 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004718
4719 { 0x5004, 0, 0x00000000, 0x0000007f },
4720 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004721
Michael Chanb6016b72005-05-26 13:03:09 -07004722 { 0x5c00, 0, 0x00000000, 0x00000001 },
4723 { 0x5c04, 0, 0x00000000, 0x0003000f },
4724 { 0x5c08, 0, 0x00000003, 0x00000000 },
4725 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4726 { 0x5c10, 0, 0x00000000, 0xffffffff },
4727 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4728 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4729 { 0x5c88, 0, 0x00000000, 0x00077373 },
4730 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4731
4732 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4733 { 0x680c, 0, 0xffffffff, 0x00000000 },
4734 { 0x6810, 0, 0xffffffff, 0x00000000 },
4735 { 0x6814, 0, 0xffffffff, 0x00000000 },
4736 { 0x6818, 0, 0xffffffff, 0x00000000 },
4737 { 0x681c, 0, 0xffffffff, 0x00000000 },
4738 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4739 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4740 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4741 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4742 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4743 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4744 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4745 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4746 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4747 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4748 { 0x684c, 0, 0xffffffff, 0x00000000 },
4749 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4750 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4751 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4752 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4753 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4754 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4755
4756 { 0xffff, 0, 0x00000000, 0x00000000 },
4757 };
4758
4759 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004760 is_5709 = 0;
4761 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4762 is_5709 = 1;
4763
Michael Chanb6016b72005-05-26 13:03:09 -07004764 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4765 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004766 u16 flags = reg_tbl[i].flags;
4767
4768 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4769 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004770
4771 offset = (u32) reg_tbl[i].offset;
4772 rw_mask = reg_tbl[i].rw_mask;
4773 ro_mask = reg_tbl[i].ro_mask;
4774
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004775 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004776
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004777 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004778
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004779 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004780 if ((val & rw_mask) != 0) {
4781 goto reg_test_err;
4782 }
4783
4784 if ((val & ro_mask) != (save_val & ro_mask)) {
4785 goto reg_test_err;
4786 }
4787
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004788 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004789
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004790 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004791 if ((val & rw_mask) != rw_mask) {
4792 goto reg_test_err;
4793 }
4794
4795 if ((val & ro_mask) != (save_val & ro_mask)) {
4796 goto reg_test_err;
4797 }
4798
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004799 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004800 continue;
4801
4802reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004803 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004804 ret = -ENODEV;
4805 break;
4806 }
4807 return ret;
4808}
4809
4810static int
4811bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4812{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004813 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004814 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4815 int i;
4816
4817 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4818 u32 offset;
4819
4820 for (offset = 0; offset < size; offset += 4) {
4821
4822 REG_WR_IND(bp, start + offset, test_pattern[i]);
4823
4824 if (REG_RD_IND(bp, start + offset) !=
4825 test_pattern[i]) {
4826 return -ENODEV;
4827 }
4828 }
4829 }
4830 return 0;
4831}
4832
4833static int
4834bnx2_test_memory(struct bnx2 *bp)
4835{
4836 int ret = 0;
4837 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004838 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004839 u32 offset;
4840 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004841 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004842 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004843 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004844 { 0xe0000, 0x4000 },
4845 { 0x120000, 0x4000 },
4846 { 0x1a0000, 0x4000 },
4847 { 0x160000, 0x4000 },
4848 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004849 },
4850 mem_tbl_5709[] = {
4851 { 0x60000, 0x4000 },
4852 { 0xa0000, 0x3000 },
4853 { 0xe0000, 0x4000 },
4854 { 0x120000, 0x4000 },
4855 { 0x1a0000, 0x4000 },
4856 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004857 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004858 struct mem_entry *mem_tbl;
4859
4860 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4861 mem_tbl = mem_tbl_5709;
4862 else
4863 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004864
4865 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4866 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4867 mem_tbl[i].len)) != 0) {
4868 return ret;
4869 }
4870 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004871
Michael Chanb6016b72005-05-26 13:03:09 -07004872 return ret;
4873}
4874
Michael Chanbc5a0692006-01-23 16:13:22 -08004875#define BNX2_MAC_LOOPBACK 0
4876#define BNX2_PHY_LOOPBACK 1
4877
Michael Chanb6016b72005-05-26 13:03:09 -07004878static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004879bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004880{
4881 unsigned int pkt_size, num_pkts, i;
4882 struct sk_buff *skb, *rx_skb;
4883 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004884 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004885 dma_addr_t map;
4886 struct tx_bd *txbd;
4887 struct sw_bd *rx_buf;
4888 struct l2_fhdr *rx_hdr;
4889 int ret = -ENODEV;
4890
Michael Chanbc5a0692006-01-23 16:13:22 -08004891 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4892 bp->loopback = MAC_LOOPBACK;
4893 bnx2_set_mac_loopback(bp);
4894 }
4895 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan489310a2007-10-10 16:16:31 -07004896 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4897 return 0;
4898
Michael Chan80be4432006-11-19 14:07:28 -08004899 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004900 bnx2_set_phy_loopback(bp);
4901 }
4902 else
4903 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004904
Michael Chan84eaa182007-12-12 11:19:57 -08004905 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
Michael Chan932f3772006-08-15 01:39:36 -07004906 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004907 if (!skb)
4908 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004909 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004910 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004911 memset(packet + 6, 0x0, 8);
4912 for (i = 14; i < pkt_size; i++)
4913 packet[i] = (unsigned char) (i & 0xff);
4914
4915 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4916 PCI_DMA_TODEVICE);
4917
Michael Chanbf5295b2006-03-23 01:11:56 -08004918 REG_WR(bp, BNX2_HC_COMMAND,
4919 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4920
Michael Chanb6016b72005-05-26 13:03:09 -07004921 REG_RD(bp, BNX2_HC_COMMAND);
4922
4923 udelay(5);
Michael Chanead72702007-12-20 19:55:39 -08004924 rx_start_idx = bnx2_get_hw_rx_cons(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004925
Michael Chanb6016b72005-05-26 13:03:09 -07004926 num_pkts = 0;
4927
Michael Chanbc5a0692006-01-23 16:13:22 -08004928 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004929
4930 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4931 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4932 txbd->tx_bd_mss_nbytes = pkt_size;
4933 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4934
4935 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004936 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4937 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004938
Michael Chan234754d2006-11-19 14:11:41 -08004939 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4940 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004941
4942 udelay(100);
4943
Michael Chanbf5295b2006-03-23 01:11:56 -08004944 REG_WR(bp, BNX2_HC_COMMAND,
4945 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4946
Michael Chanb6016b72005-05-26 13:03:09 -07004947 REG_RD(bp, BNX2_HC_COMMAND);
4948
4949 udelay(5);
4950
4951 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004952 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004953
Michael Chanead72702007-12-20 19:55:39 -08004954 if (bnx2_get_hw_tx_cons(bp) != bp->tx_prod)
Michael Chanb6016b72005-05-26 13:03:09 -07004955 goto loopback_test_done;
Michael Chanb6016b72005-05-26 13:03:09 -07004956
Michael Chanead72702007-12-20 19:55:39 -08004957 rx_idx = bnx2_get_hw_rx_cons(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004958 if (rx_idx != rx_start_idx + num_pkts) {
4959 goto loopback_test_done;
4960 }
4961
4962 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4963 rx_skb = rx_buf->skb;
4964
4965 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4966 skb_reserve(rx_skb, bp->rx_offset);
4967
4968 pci_dma_sync_single_for_cpu(bp->pdev,
4969 pci_unmap_addr(rx_buf, mapping),
4970 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4971
Michael Chanade2bfe2006-01-23 16:09:51 -08004972 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004973 (L2_FHDR_ERRORS_BAD_CRC |
4974 L2_FHDR_ERRORS_PHY_DECODE |
4975 L2_FHDR_ERRORS_ALIGNMENT |
4976 L2_FHDR_ERRORS_TOO_SHORT |
4977 L2_FHDR_ERRORS_GIANT_FRAME)) {
4978
4979 goto loopback_test_done;
4980 }
4981
4982 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4983 goto loopback_test_done;
4984 }
4985
4986 for (i = 14; i < pkt_size; i++) {
4987 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4988 goto loopback_test_done;
4989 }
4990 }
4991
4992 ret = 0;
4993
4994loopback_test_done:
4995 bp->loopback = 0;
4996 return ret;
4997}
4998
Michael Chanbc5a0692006-01-23 16:13:22 -08004999#define BNX2_MAC_LOOPBACK_FAILED 1
5000#define BNX2_PHY_LOOPBACK_FAILED 2
5001#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5002 BNX2_PHY_LOOPBACK_FAILED)
5003
5004static int
5005bnx2_test_loopback(struct bnx2 *bp)
5006{
5007 int rc = 0;
5008
5009 if (!netif_running(bp->dev))
5010 return BNX2_LOOPBACK_FAILED;
5011
5012 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5013 spin_lock_bh(&bp->phy_lock);
5014 bnx2_init_phy(bp);
5015 spin_unlock_bh(&bp->phy_lock);
5016 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5017 rc |= BNX2_MAC_LOOPBACK_FAILED;
5018 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5019 rc |= BNX2_PHY_LOOPBACK_FAILED;
5020 return rc;
5021}
5022
Michael Chanb6016b72005-05-26 13:03:09 -07005023#define NVRAM_SIZE 0x200
5024#define CRC32_RESIDUAL 0xdebb20e3
5025
5026static int
5027bnx2_test_nvram(struct bnx2 *bp)
5028{
5029 u32 buf[NVRAM_SIZE / 4];
5030 u8 *data = (u8 *) buf;
5031 int rc = 0;
5032 u32 magic, csum;
5033
5034 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5035 goto test_nvram_done;
5036
5037 magic = be32_to_cpu(buf[0]);
5038 if (magic != 0x669955aa) {
5039 rc = -ENODEV;
5040 goto test_nvram_done;
5041 }
5042
5043 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5044 goto test_nvram_done;
5045
5046 csum = ether_crc_le(0x100, data);
5047 if (csum != CRC32_RESIDUAL) {
5048 rc = -ENODEV;
5049 goto test_nvram_done;
5050 }
5051
5052 csum = ether_crc_le(0x100, data + 0x100);
5053 if (csum != CRC32_RESIDUAL) {
5054 rc = -ENODEV;
5055 }
5056
5057test_nvram_done:
5058 return rc;
5059}
5060
5061static int
5062bnx2_test_link(struct bnx2 *bp)
5063{
5064 u32 bmsr;
5065
Michael Chan489310a2007-10-10 16:16:31 -07005066 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5067 if (bp->link_up)
5068 return 0;
5069 return -ENODEV;
5070 }
Michael Chanc770a652005-08-25 15:38:39 -07005071 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07005072 bnx2_enable_bmsr1(bp);
5073 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5074 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5075 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07005076 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005077
Michael Chanb6016b72005-05-26 13:03:09 -07005078 if (bmsr & BMSR_LSTATUS) {
5079 return 0;
5080 }
5081 return -ENODEV;
5082}
5083
5084static int
5085bnx2_test_intr(struct bnx2 *bp)
5086{
5087 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07005088 u16 status_idx;
5089
5090 if (!netif_running(bp->dev))
5091 return -ENODEV;
5092
5093 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5094
5095 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08005096 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07005097 REG_RD(bp, BNX2_HC_COMMAND);
5098
5099 for (i = 0; i < 10; i++) {
5100 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5101 status_idx) {
5102
5103 break;
5104 }
5105
5106 msleep_interruptible(10);
5107 }
5108 if (i < 10)
5109 return 0;
5110
5111 return -ENODEV;
5112}
5113
5114static void
Michael Chan48b01e22006-11-19 14:08:00 -08005115bnx2_5706_serdes_timer(struct bnx2 *bp)
5116{
5117 spin_lock(&bp->phy_lock);
5118 if (bp->serdes_an_pending)
5119 bp->serdes_an_pending--;
5120 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5121 u32 bmcr;
5122
5123 bp->current_interval = bp->timer_interval;
5124
Michael Chanca58c3a2007-05-03 13:22:52 -07005125 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005126
5127 if (bmcr & BMCR_ANENABLE) {
5128 u32 phy1, phy2;
5129
5130 bnx2_write_phy(bp, 0x1c, 0x7c00);
5131 bnx2_read_phy(bp, 0x1c, &phy1);
5132
5133 bnx2_write_phy(bp, 0x17, 0x0f01);
5134 bnx2_read_phy(bp, 0x15, &phy2);
5135 bnx2_write_phy(bp, 0x17, 0x0f01);
5136 bnx2_read_phy(bp, 0x15, &phy2);
5137
5138 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5139 !(phy2 & 0x20)) { /* no CONFIG */
5140
5141 bmcr &= ~BMCR_ANENABLE;
5142 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07005143 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005144 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5145 }
5146 }
5147 }
5148 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5149 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5150 u32 phy2;
5151
5152 bnx2_write_phy(bp, 0x17, 0x0f01);
5153 bnx2_read_phy(bp, 0x15, &phy2);
5154 if (phy2 & 0x20) {
5155 u32 bmcr;
5156
Michael Chanca58c3a2007-05-03 13:22:52 -07005157 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005158 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07005159 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005160
5161 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5162 }
5163 } else
5164 bp->current_interval = bp->timer_interval;
5165
5166 spin_unlock(&bp->phy_lock);
5167}
5168
5169static void
Michael Chanf8dd0642006-11-19 14:08:29 -08005170bnx2_5708_serdes_timer(struct bnx2 *bp)
5171{
Michael Chan0d8a6572007-07-07 22:49:43 -07005172 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5173 return;
5174
Michael Chanf8dd0642006-11-19 14:08:29 -08005175 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5176 bp->serdes_an_pending = 0;
5177 return;
5178 }
5179
5180 spin_lock(&bp->phy_lock);
5181 if (bp->serdes_an_pending)
5182 bp->serdes_an_pending--;
5183 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5184 u32 bmcr;
5185
Michael Chanca58c3a2007-05-03 13:22:52 -07005186 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08005187 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07005188 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08005189 bp->current_interval = SERDES_FORCED_TIMEOUT;
5190 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07005191 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08005192 bp->serdes_an_pending = 2;
5193 bp->current_interval = bp->timer_interval;
5194 }
5195
5196 } else
5197 bp->current_interval = bp->timer_interval;
5198
5199 spin_unlock(&bp->phy_lock);
5200}
5201
5202static void
Michael Chanb6016b72005-05-26 13:03:09 -07005203bnx2_timer(unsigned long data)
5204{
5205 struct bnx2 *bp = (struct bnx2 *) data;
Michael Chanb6016b72005-05-26 13:03:09 -07005206
Michael Chancd339a02005-08-25 15:35:24 -07005207 if (!netif_running(bp->dev))
5208 return;
5209
Michael Chanb6016b72005-05-26 13:03:09 -07005210 if (atomic_read(&bp->intr_sem) != 0)
5211 goto bnx2_restart_timer;
5212
Michael Chandf149d72007-07-07 22:51:36 -07005213 bnx2_send_heart_beat(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005214
Michael Chancea94db2006-06-12 22:16:13 -07005215 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5216
Michael Chan02537b062007-06-04 21:24:07 -07005217 /* workaround occasional corrupted counters */
5218 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5219 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5220 BNX2_HC_COMMAND_STATS_NOW);
5221
Michael Chanf8dd0642006-11-19 14:08:29 -08005222 if (bp->phy_flags & PHY_SERDES_FLAG) {
5223 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5224 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07005225 else
Michael Chanf8dd0642006-11-19 14:08:29 -08005226 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005227 }
5228
5229bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07005230 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005231}
5232
Michael Chan8e6a72c2007-05-03 13:24:48 -07005233static int
5234bnx2_request_irq(struct bnx2 *bp)
5235{
5236 struct net_device *dev = bp->dev;
Michael Chan6d866ff2007-12-20 19:56:09 -08005237 unsigned long flags;
5238 struct bnx2_irq *irq = &bp->irq_tbl[0];
5239 int rc;
Michael Chan8e6a72c2007-05-03 13:24:48 -07005240
Michael Chan6d866ff2007-12-20 19:56:09 -08005241 if (bp->flags & USING_MSI_FLAG)
5242 flags = 0;
5243 else
5244 flags = IRQF_SHARED;
5245 rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005246 return rc;
5247}
5248
5249static void
5250bnx2_free_irq(struct bnx2 *bp)
5251{
5252 struct net_device *dev = bp->dev;
5253
Michael Chan6d866ff2007-12-20 19:56:09 -08005254 free_irq(bp->irq_tbl[0].vector, dev);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005255 if (bp->flags & USING_MSI_FLAG) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07005256 pci_disable_msi(bp->pdev);
5257 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
Michael Chan6d866ff2007-12-20 19:56:09 -08005258 }
5259}
5260
5261static void
5262bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5263{
5264 bp->irq_tbl[0].handler = bnx2_interrupt;
5265 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5266
5267 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5268 if (pci_enable_msi(bp->pdev) == 0) {
5269 bp->flags |= USING_MSI_FLAG;
5270 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5271 bp->flags |= ONE_SHOT_MSI_FLAG;
5272 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5273 } else
5274 bp->irq_tbl[0].handler = bnx2_msi;
5275 }
5276 }
5277
5278 bp->irq_tbl[0].vector = bp->pdev->irq;
Michael Chan8e6a72c2007-05-03 13:24:48 -07005279}
5280
Michael Chanb6016b72005-05-26 13:03:09 -07005281/* Called with rtnl_lock */
5282static int
5283bnx2_open(struct net_device *dev)
5284{
Michael Chan972ec0d2006-01-23 16:12:43 -08005285 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005286 int rc;
5287
Michael Chan1b2f9222007-05-03 13:20:19 -07005288 netif_carrier_off(dev);
5289
Pavel Machek829ca9a2005-09-03 15:56:56 -07005290 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005291 bnx2_disable_int(bp);
5292
5293 rc = bnx2_alloc_mem(bp);
5294 if (rc)
5295 return rc;
5296
Michael Chan6d866ff2007-12-20 19:56:09 -08005297 bnx2_setup_int_mode(bp, disable_msi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005298 napi_enable(&bp->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005299 rc = bnx2_request_irq(bp);
5300
Michael Chanb6016b72005-05-26 13:03:09 -07005301 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005302 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005303 bnx2_free_mem(bp);
5304 return rc;
5305 }
5306
5307 rc = bnx2_init_nic(bp);
5308
5309 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005310 napi_disable(&bp->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005311 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005312 bnx2_free_skbs(bp);
5313 bnx2_free_mem(bp);
5314 return rc;
5315 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005316
Michael Chancd339a02005-08-25 15:35:24 -07005317 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005318
5319 atomic_set(&bp->intr_sem, 0);
5320
5321 bnx2_enable_int(bp);
5322
5323 if (bp->flags & USING_MSI_FLAG) {
5324 /* Test MSI to make sure it is working
5325 * If MSI test fails, go back to INTx mode
5326 */
5327 if (bnx2_test_intr(bp) != 0) {
5328 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5329 " using MSI, switching to INTx mode. Please"
5330 " report this failure to the PCI maintainer"
5331 " and include system chipset information.\n",
5332 bp->dev->name);
5333
5334 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005335 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005336
Michael Chan6d866ff2007-12-20 19:56:09 -08005337 bnx2_setup_int_mode(bp, 1);
5338
Michael Chanb6016b72005-05-26 13:03:09 -07005339 rc = bnx2_init_nic(bp);
5340
Michael Chan8e6a72c2007-05-03 13:24:48 -07005341 if (!rc)
5342 rc = bnx2_request_irq(bp);
5343
Michael Chanb6016b72005-05-26 13:03:09 -07005344 if (rc) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005345 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005346 bnx2_free_skbs(bp);
5347 bnx2_free_mem(bp);
5348 del_timer_sync(&bp->timer);
5349 return rc;
5350 }
5351 bnx2_enable_int(bp);
5352 }
5353 }
5354 if (bp->flags & USING_MSI_FLAG) {
5355 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5356 }
5357
5358 netif_start_queue(dev);
5359
5360 return 0;
5361}
5362
5363static void
David Howellsc4028952006-11-22 14:57:56 +00005364bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07005365{
David Howellsc4028952006-11-22 14:57:56 +00005366 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005367
Michael Chanafdc08b2005-08-25 15:34:29 -07005368 if (!netif_running(bp->dev))
5369 return;
5370
5371 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07005372 bnx2_netif_stop(bp);
5373
5374 bnx2_init_nic(bp);
5375
5376 atomic_set(&bp->intr_sem, 1);
5377 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07005378 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005379}
5380
5381static void
5382bnx2_tx_timeout(struct net_device *dev)
5383{
Michael Chan972ec0d2006-01-23 16:12:43 -08005384 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005385
5386 /* This allows the netif to be shutdown gracefully before resetting */
5387 schedule_work(&bp->reset_task);
5388}
5389
5390#ifdef BCM_VLAN
5391/* Called with rtnl_lock */
5392static void
5393bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5394{
Michael Chan972ec0d2006-01-23 16:12:43 -08005395 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005396
5397 bnx2_netif_stop(bp);
5398
5399 bp->vlgrp = vlgrp;
5400 bnx2_set_rx_mode(dev);
5401
5402 bnx2_netif_start(bp);
5403}
Michael Chanb6016b72005-05-26 13:03:09 -07005404#endif
5405
Herbert Xu932ff272006-06-09 12:20:56 -07005406/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07005407 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5408 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07005409 */
5410static int
5411bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5412{
Michael Chan972ec0d2006-01-23 16:12:43 -08005413 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005414 dma_addr_t mapping;
5415 struct tx_bd *txbd;
5416 struct sw_bd *tx_buf;
5417 u32 len, vlan_tag_flags, last_frag, mss;
5418 u16 prod, ring_prod;
5419 int i;
5420
Michael Chane89bbf12005-08-25 15:36:58 -07005421 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07005422 netif_stop_queue(dev);
5423 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5424 dev->name);
5425
5426 return NETDEV_TX_BUSY;
5427 }
5428 len = skb_headlen(skb);
5429 prod = bp->tx_prod;
5430 ring_prod = TX_RING_IDX(prod);
5431
5432 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005433 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07005434 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5435 }
5436
5437 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5438 vlan_tag_flags |=
5439 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5440 }
Michael Chanfde82052007-05-03 17:23:35 -07005441 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07005442 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005443 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07005444
Michael Chanb6016b72005-05-26 13:03:09 -07005445 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5446
Michael Chan4666f872007-05-03 13:22:28 -07005447 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005448
Michael Chan4666f872007-05-03 13:22:28 -07005449 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5450 u32 tcp_off = skb_transport_offset(skb) -
5451 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07005452
Michael Chan4666f872007-05-03 13:22:28 -07005453 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5454 TX_BD_FLAGS_SW_FLAGS;
5455 if (likely(tcp_off == 0))
5456 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5457 else {
5458 tcp_off >>= 3;
5459 vlan_tag_flags |= ((tcp_off & 0x3) <<
5460 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5461 ((tcp_off & 0x10) <<
5462 TX_BD_FLAGS_TCP6_OFF4_SHL);
5463 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5464 }
5465 } else {
5466 if (skb_header_cloned(skb) &&
5467 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5468 dev_kfree_skb(skb);
5469 return NETDEV_TX_OK;
5470 }
5471
5472 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5473
5474 iph = ip_hdr(skb);
5475 iph->check = 0;
5476 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5477 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5478 iph->daddr, 0,
5479 IPPROTO_TCP,
5480 0);
5481 if (tcp_opt_len || (iph->ihl > 5)) {
5482 vlan_tag_flags |= ((iph->ihl - 5) +
5483 (tcp_opt_len >> 2)) << 8;
5484 }
Michael Chanb6016b72005-05-26 13:03:09 -07005485 }
Michael Chan4666f872007-05-03 13:22:28 -07005486 } else
Michael Chanb6016b72005-05-26 13:03:09 -07005487 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005488
5489 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005490
Michael Chanb6016b72005-05-26 13:03:09 -07005491 tx_buf = &bp->tx_buf_ring[ring_prod];
5492 tx_buf->skb = skb;
5493 pci_unmap_addr_set(tx_buf, mapping, mapping);
5494
5495 txbd = &bp->tx_desc_ring[ring_prod];
5496
5497 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5498 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5499 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5500 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5501
5502 last_frag = skb_shinfo(skb)->nr_frags;
5503
5504 for (i = 0; i < last_frag; i++) {
5505 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5506
5507 prod = NEXT_TX_BD(prod);
5508 ring_prod = TX_RING_IDX(prod);
5509 txbd = &bp->tx_desc_ring[ring_prod];
5510
5511 len = frag->size;
5512 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5513 len, PCI_DMA_TODEVICE);
5514 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5515 mapping, mapping);
5516
5517 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5518 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5519 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5520 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5521
5522 }
5523 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5524
5525 prod = NEXT_TX_BD(prod);
5526 bp->tx_prod_bseq += skb->len;
5527
Michael Chan234754d2006-11-19 14:11:41 -08005528 REG_WR16(bp, bp->tx_bidx_addr, prod);
5529 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07005530
5531 mmiowb();
5532
5533 bp->tx_prod = prod;
5534 dev->trans_start = jiffies;
5535
Michael Chane89bbf12005-08-25 15:36:58 -07005536 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07005537 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07005538 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07005539 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005540 }
5541
5542 return NETDEV_TX_OK;
5543}
5544
5545/* Called with rtnl_lock */
5546static int
5547bnx2_close(struct net_device *dev)
5548{
Michael Chan972ec0d2006-01-23 16:12:43 -08005549 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005550 u32 reset_code;
5551
Michael Chanafdc08b2005-08-25 15:34:29 -07005552 /* Calling flush_scheduled_work() may deadlock because
5553 * linkwatch_event() may be on the workqueue and it will try to get
5554 * the rtnl_lock which we are holding.
5555 */
5556 while (bp->in_reset_task)
5557 msleep(1);
5558
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005559 bnx2_disable_int_sync(bp);
5560 napi_disable(&bp->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07005561 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005562 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07005563 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005564 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005565 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5566 else
5567 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5568 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005569 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005570 bnx2_free_skbs(bp);
5571 bnx2_free_mem(bp);
5572 bp->link_up = 0;
5573 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005574 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07005575 return 0;
5576}
5577
5578#define GET_NET_STATS64(ctr) \
5579 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5580 (unsigned long) (ctr##_lo)
5581
5582#define GET_NET_STATS32(ctr) \
5583 (ctr##_lo)
5584
5585#if (BITS_PER_LONG == 64)
5586#define GET_NET_STATS GET_NET_STATS64
5587#else
5588#define GET_NET_STATS GET_NET_STATS32
5589#endif
5590
5591static struct net_device_stats *
5592bnx2_get_stats(struct net_device *dev)
5593{
Michael Chan972ec0d2006-01-23 16:12:43 -08005594 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005595 struct statistics_block *stats_blk = bp->stats_blk;
5596 struct net_device_stats *net_stats = &bp->net_stats;
5597
5598 if (bp->stats_blk == NULL) {
5599 return net_stats;
5600 }
5601 net_stats->rx_packets =
5602 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5603 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5604 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5605
5606 net_stats->tx_packets =
5607 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5608 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5609 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5610
5611 net_stats->rx_bytes =
5612 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5613
5614 net_stats->tx_bytes =
5615 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5616
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005617 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005618 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5619
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005620 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005621 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5622
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005623 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005624 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5625 stats_blk->stat_EtherStatsOverrsizePkts);
5626
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005627 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005628 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5629
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005630 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005631 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5632
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005633 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005634 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5635
5636 net_stats->rx_errors = net_stats->rx_length_errors +
5637 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5638 net_stats->rx_crc_errors;
5639
5640 net_stats->tx_aborted_errors =
5641 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5642 stats_blk->stat_Dot3StatsLateCollisions);
5643
Michael Chan5b0c76a2005-11-04 08:45:49 -08005644 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5645 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005646 net_stats->tx_carrier_errors = 0;
5647 else {
5648 net_stats->tx_carrier_errors =
5649 (unsigned long)
5650 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5651 }
5652
5653 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005654 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005655 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5656 +
5657 net_stats->tx_aborted_errors +
5658 net_stats->tx_carrier_errors;
5659
Michael Chancea94db2006-06-12 22:16:13 -07005660 net_stats->rx_missed_errors =
5661 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5662 stats_blk->stat_FwRxDrop);
5663
Michael Chanb6016b72005-05-26 13:03:09 -07005664 return net_stats;
5665}
5666
5667/* All ethtool functions called with rtnl_lock */
5668
5669static int
5670bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5671{
Michael Chan972ec0d2006-01-23 16:12:43 -08005672 struct bnx2 *bp = netdev_priv(dev);
Michael Chan7b6b8342007-07-07 22:50:15 -07005673 int support_serdes = 0, support_copper = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005674
5675 cmd->supported = SUPPORTED_Autoneg;
Michael Chan7b6b8342007-07-07 22:50:15 -07005676 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5677 support_serdes = 1;
5678 support_copper = 1;
5679 } else if (bp->phy_port == PORT_FIBRE)
5680 support_serdes = 1;
5681 else
5682 support_copper = 1;
5683
5684 if (support_serdes) {
Michael Chanb6016b72005-05-26 13:03:09 -07005685 cmd->supported |= SUPPORTED_1000baseT_Full |
5686 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005687 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5688 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005689
Michael Chanb6016b72005-05-26 13:03:09 -07005690 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005691 if (support_copper) {
Michael Chanb6016b72005-05-26 13:03:09 -07005692 cmd->supported |= SUPPORTED_10baseT_Half |
5693 SUPPORTED_10baseT_Full |
5694 SUPPORTED_100baseT_Half |
5695 SUPPORTED_100baseT_Full |
5696 SUPPORTED_1000baseT_Full |
5697 SUPPORTED_TP;
5698
Michael Chanb6016b72005-05-26 13:03:09 -07005699 }
5700
Michael Chan7b6b8342007-07-07 22:50:15 -07005701 spin_lock_bh(&bp->phy_lock);
5702 cmd->port = bp->phy_port;
Michael Chanb6016b72005-05-26 13:03:09 -07005703 cmd->advertising = bp->advertising;
5704
5705 if (bp->autoneg & AUTONEG_SPEED) {
5706 cmd->autoneg = AUTONEG_ENABLE;
5707 }
5708 else {
5709 cmd->autoneg = AUTONEG_DISABLE;
5710 }
5711
5712 if (netif_carrier_ok(dev)) {
5713 cmd->speed = bp->line_speed;
5714 cmd->duplex = bp->duplex;
5715 }
5716 else {
5717 cmd->speed = -1;
5718 cmd->duplex = -1;
5719 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005720 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005721
5722 cmd->transceiver = XCVR_INTERNAL;
5723 cmd->phy_address = bp->phy_addr;
5724
5725 return 0;
5726}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005727
Michael Chanb6016b72005-05-26 13:03:09 -07005728static int
5729bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5730{
Michael Chan972ec0d2006-01-23 16:12:43 -08005731 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005732 u8 autoneg = bp->autoneg;
5733 u8 req_duplex = bp->req_duplex;
5734 u16 req_line_speed = bp->req_line_speed;
5735 u32 advertising = bp->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005736 int err = -EINVAL;
5737
5738 spin_lock_bh(&bp->phy_lock);
5739
5740 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5741 goto err_out_unlock;
5742
5743 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5744 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005745
5746 if (cmd->autoneg == AUTONEG_ENABLE) {
5747 autoneg |= AUTONEG_SPEED;
5748
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005749 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005750
5751 /* allow advertising 1 speed */
5752 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5753 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5754 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5755 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5756
Michael Chan7b6b8342007-07-07 22:50:15 -07005757 if (cmd->port == PORT_FIBRE)
5758 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005759
5760 advertising = cmd->advertising;
5761
Michael Chan27a005b2007-05-03 13:23:41 -07005762 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
Michael Chan7b6b8342007-07-07 22:50:15 -07005763 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5764 (cmd->port == PORT_TP))
5765 goto err_out_unlock;
5766 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07005767 advertising = cmd->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005768 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5769 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005770 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005771 if (cmd->port == PORT_FIBRE)
Michael Chanb6016b72005-05-26 13:03:09 -07005772 advertising = ETHTOOL_ALL_FIBRE_SPEED;
Michael Chan7b6b8342007-07-07 22:50:15 -07005773 else
Michael Chanb6016b72005-05-26 13:03:09 -07005774 advertising = ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005775 }
5776 advertising |= ADVERTISED_Autoneg;
5777 }
5778 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005779 if (cmd->port == PORT_FIBRE) {
Michael Chan80be4432006-11-19 14:07:28 -08005780 if ((cmd->speed != SPEED_1000 &&
5781 cmd->speed != SPEED_2500) ||
5782 (cmd->duplex != DUPLEX_FULL))
Michael Chan7b6b8342007-07-07 22:50:15 -07005783 goto err_out_unlock;
Michael Chan80be4432006-11-19 14:07:28 -08005784
5785 if (cmd->speed == SPEED_2500 &&
5786 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
Michael Chan7b6b8342007-07-07 22:50:15 -07005787 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005788 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005789 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5790 goto err_out_unlock;
5791
Michael Chanb6016b72005-05-26 13:03:09 -07005792 autoneg &= ~AUTONEG_SPEED;
5793 req_line_speed = cmd->speed;
5794 req_duplex = cmd->duplex;
5795 advertising = 0;
5796 }
5797
5798 bp->autoneg = autoneg;
5799 bp->advertising = advertising;
5800 bp->req_line_speed = req_line_speed;
5801 bp->req_duplex = req_duplex;
5802
Michael Chan7b6b8342007-07-07 22:50:15 -07005803 err = bnx2_setup_phy(bp, cmd->port);
Michael Chanb6016b72005-05-26 13:03:09 -07005804
Michael Chan7b6b8342007-07-07 22:50:15 -07005805err_out_unlock:
Michael Chanc770a652005-08-25 15:38:39 -07005806 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005807
Michael Chan7b6b8342007-07-07 22:50:15 -07005808 return err;
Michael Chanb6016b72005-05-26 13:03:09 -07005809}
5810
5811static void
5812bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5813{
Michael Chan972ec0d2006-01-23 16:12:43 -08005814 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005815
5816 strcpy(info->driver, DRV_MODULE_NAME);
5817 strcpy(info->version, DRV_MODULE_VERSION);
5818 strcpy(info->bus_info, pci_name(bp->pdev));
Michael Chan58fc2ea2007-07-07 22:52:02 -07005819 strcpy(info->fw_version, bp->fw_version);
Michael Chanb6016b72005-05-26 13:03:09 -07005820}
5821
Michael Chan244ac4f2006-03-20 17:48:46 -08005822#define BNX2_REGDUMP_LEN (32 * 1024)
5823
5824static int
5825bnx2_get_regs_len(struct net_device *dev)
5826{
5827 return BNX2_REGDUMP_LEN;
5828}
5829
5830static void
5831bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5832{
5833 u32 *p = _p, i, offset;
5834 u8 *orig_p = _p;
5835 struct bnx2 *bp = netdev_priv(dev);
5836 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5837 0x0800, 0x0880, 0x0c00, 0x0c10,
5838 0x0c30, 0x0d08, 0x1000, 0x101c,
5839 0x1040, 0x1048, 0x1080, 0x10a4,
5840 0x1400, 0x1490, 0x1498, 0x14f0,
5841 0x1500, 0x155c, 0x1580, 0x15dc,
5842 0x1600, 0x1658, 0x1680, 0x16d8,
5843 0x1800, 0x1820, 0x1840, 0x1854,
5844 0x1880, 0x1894, 0x1900, 0x1984,
5845 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5846 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5847 0x2000, 0x2030, 0x23c0, 0x2400,
5848 0x2800, 0x2820, 0x2830, 0x2850,
5849 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5850 0x3c00, 0x3c94, 0x4000, 0x4010,
5851 0x4080, 0x4090, 0x43c0, 0x4458,
5852 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5853 0x4fc0, 0x5010, 0x53c0, 0x5444,
5854 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5855 0x5fc0, 0x6000, 0x6400, 0x6428,
5856 0x6800, 0x6848, 0x684c, 0x6860,
5857 0x6888, 0x6910, 0x8000 };
5858
5859 regs->version = 0;
5860
5861 memset(p, 0, BNX2_REGDUMP_LEN);
5862
5863 if (!netif_running(bp->dev))
5864 return;
5865
5866 i = 0;
5867 offset = reg_boundaries[0];
5868 p += offset;
5869 while (offset < BNX2_REGDUMP_LEN) {
5870 *p++ = REG_RD(bp, offset);
5871 offset += 4;
5872 if (offset == reg_boundaries[i + 1]) {
5873 offset = reg_boundaries[i + 2];
5874 p = (u32 *) (orig_p + offset);
5875 i += 2;
5876 }
5877 }
5878}
5879
Michael Chanb6016b72005-05-26 13:03:09 -07005880static void
5881bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5882{
Michael Chan972ec0d2006-01-23 16:12:43 -08005883 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005884
5885 if (bp->flags & NO_WOL_FLAG) {
5886 wol->supported = 0;
5887 wol->wolopts = 0;
5888 }
5889 else {
5890 wol->supported = WAKE_MAGIC;
5891 if (bp->wol)
5892 wol->wolopts = WAKE_MAGIC;
5893 else
5894 wol->wolopts = 0;
5895 }
5896 memset(&wol->sopass, 0, sizeof(wol->sopass));
5897}
5898
5899static int
5900bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5901{
Michael Chan972ec0d2006-01-23 16:12:43 -08005902 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005903
5904 if (wol->wolopts & ~WAKE_MAGIC)
5905 return -EINVAL;
5906
5907 if (wol->wolopts & WAKE_MAGIC) {
5908 if (bp->flags & NO_WOL_FLAG)
5909 return -EINVAL;
5910
5911 bp->wol = 1;
5912 }
5913 else {
5914 bp->wol = 0;
5915 }
5916 return 0;
5917}
5918
5919static int
5920bnx2_nway_reset(struct net_device *dev)
5921{
Michael Chan972ec0d2006-01-23 16:12:43 -08005922 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005923 u32 bmcr;
5924
5925 if (!(bp->autoneg & AUTONEG_SPEED)) {
5926 return -EINVAL;
5927 }
5928
Michael Chanc770a652005-08-25 15:38:39 -07005929 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005930
Michael Chan7b6b8342007-07-07 22:50:15 -07005931 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5932 int rc;
5933
5934 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5935 spin_unlock_bh(&bp->phy_lock);
5936 return rc;
5937 }
5938
Michael Chanb6016b72005-05-26 13:03:09 -07005939 /* Force a link down visible on the other side */
5940 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005941 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005942 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005943
5944 msleep(20);
5945
Michael Chanc770a652005-08-25 15:38:39 -07005946 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005947
5948 bp->current_interval = SERDES_AN_TIMEOUT;
5949 bp->serdes_an_pending = 1;
5950 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005951 }
5952
Michael Chanca58c3a2007-05-03 13:22:52 -07005953 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005954 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005955 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005956
Michael Chanc770a652005-08-25 15:38:39 -07005957 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005958
5959 return 0;
5960}
5961
5962static int
5963bnx2_get_eeprom_len(struct net_device *dev)
5964{
Michael Chan972ec0d2006-01-23 16:12:43 -08005965 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005966
Michael Chan1122db72006-01-23 16:11:42 -08005967 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005968 return 0;
5969
Michael Chan1122db72006-01-23 16:11:42 -08005970 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005971}
5972
5973static int
5974bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5975 u8 *eebuf)
5976{
Michael Chan972ec0d2006-01-23 16:12:43 -08005977 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005978 int rc;
5979
John W. Linville1064e942005-11-10 12:58:24 -08005980 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005981
5982 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5983
5984 return rc;
5985}
5986
5987static int
5988bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5989 u8 *eebuf)
5990{
Michael Chan972ec0d2006-01-23 16:12:43 -08005991 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005992 int rc;
5993
John W. Linville1064e942005-11-10 12:58:24 -08005994 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005995
5996 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5997
5998 return rc;
5999}
6000
6001static int
6002bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6003{
Michael Chan972ec0d2006-01-23 16:12:43 -08006004 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006005
6006 memset(coal, 0, sizeof(struct ethtool_coalesce));
6007
6008 coal->rx_coalesce_usecs = bp->rx_ticks;
6009 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6010 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6011 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6012
6013 coal->tx_coalesce_usecs = bp->tx_ticks;
6014 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6015 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6016 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6017
6018 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6019
6020 return 0;
6021}
6022
6023static int
6024bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6025{
Michael Chan972ec0d2006-01-23 16:12:43 -08006026 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006027
6028 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6029 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6030
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006031 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07006032 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6033
6034 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6035 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6036
6037 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6038 if (bp->rx_quick_cons_trip_int > 0xff)
6039 bp->rx_quick_cons_trip_int = 0xff;
6040
6041 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6042 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6043
6044 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6045 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6046
6047 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6048 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6049
6050 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6051 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6052 0xff;
6053
6054 bp->stats_ticks = coal->stats_block_coalesce_usecs;
Michael Chan02537b062007-06-04 21:24:07 -07006055 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6056 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6057 bp->stats_ticks = USEC_PER_SEC;
6058 }
Michael Chan7ea69202007-07-16 18:27:10 -07006059 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6060 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6061 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07006062
6063 if (netif_running(bp->dev)) {
6064 bnx2_netif_stop(bp);
6065 bnx2_init_nic(bp);
6066 bnx2_netif_start(bp);
6067 }
6068
6069 return 0;
6070}
6071
6072static void
6073bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6074{
Michael Chan972ec0d2006-01-23 16:12:43 -08006075 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006076
Michael Chan13daffa2006-03-20 17:49:20 -08006077 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07006078 ering->rx_mini_max_pending = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08006079 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07006080
6081 ering->rx_pending = bp->rx_ring_size;
6082 ering->rx_mini_pending = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08006083 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
Michael Chanb6016b72005-05-26 13:03:09 -07006084
6085 ering->tx_max_pending = MAX_TX_DESC_CNT;
6086 ering->tx_pending = bp->tx_ring_size;
6087}
6088
6089static int
Michael Chan5d5d0012007-12-12 11:17:43 -08006090bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
Michael Chanb6016b72005-05-26 13:03:09 -07006091{
Michael Chan13daffa2006-03-20 17:49:20 -08006092 if (netif_running(bp->dev)) {
6093 bnx2_netif_stop(bp);
6094 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6095 bnx2_free_skbs(bp);
6096 bnx2_free_mem(bp);
6097 }
6098
Michael Chan5d5d0012007-12-12 11:17:43 -08006099 bnx2_set_rx_ring_size(bp, rx);
6100 bp->tx_ring_size = tx;
Michael Chanb6016b72005-05-26 13:03:09 -07006101
6102 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08006103 int rc;
6104
6105 rc = bnx2_alloc_mem(bp);
6106 if (rc)
6107 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07006108 bnx2_init_nic(bp);
6109 bnx2_netif_start(bp);
6110 }
Michael Chanb6016b72005-05-26 13:03:09 -07006111 return 0;
6112}
6113
Michael Chan5d5d0012007-12-12 11:17:43 -08006114static int
6115bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6116{
6117 struct bnx2 *bp = netdev_priv(dev);
6118 int rc;
6119
6120 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6121 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6122 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6123
6124 return -EINVAL;
6125 }
6126 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6127 return rc;
6128}
6129
Michael Chanb6016b72005-05-26 13:03:09 -07006130static void
6131bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6132{
Michael Chan972ec0d2006-01-23 16:12:43 -08006133 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006134
6135 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6136 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6137 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6138}
6139
6140static int
6141bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6142{
Michael Chan972ec0d2006-01-23 16:12:43 -08006143 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006144
6145 bp->req_flow_ctrl = 0;
6146 if (epause->rx_pause)
6147 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6148 if (epause->tx_pause)
6149 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6150
6151 if (epause->autoneg) {
6152 bp->autoneg |= AUTONEG_FLOW_CTRL;
6153 }
6154 else {
6155 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6156 }
6157
Michael Chanc770a652005-08-25 15:38:39 -07006158 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006159
Michael Chan0d8a6572007-07-07 22:49:43 -07006160 bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07006161
Michael Chanc770a652005-08-25 15:38:39 -07006162 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006163
6164 return 0;
6165}
6166
6167static u32
6168bnx2_get_rx_csum(struct net_device *dev)
6169{
Michael Chan972ec0d2006-01-23 16:12:43 -08006170 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006171
6172 return bp->rx_csum;
6173}
6174
6175static int
6176bnx2_set_rx_csum(struct net_device *dev, u32 data)
6177{
Michael Chan972ec0d2006-01-23 16:12:43 -08006178 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006179
6180 bp->rx_csum = data;
6181 return 0;
6182}
6183
Michael Chanb11d6212006-06-29 12:31:21 -07006184static int
6185bnx2_set_tso(struct net_device *dev, u32 data)
6186{
Michael Chan4666f872007-05-03 13:22:28 -07006187 struct bnx2 *bp = netdev_priv(dev);
6188
6189 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07006190 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006191 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6192 dev->features |= NETIF_F_TSO6;
6193 } else
6194 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6195 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07006196 return 0;
6197}
6198
Michael Chancea94db2006-06-12 22:16:13 -07006199#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07006200
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006201static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006202 char string[ETH_GSTRING_LEN];
6203} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6204 { "rx_bytes" },
6205 { "rx_error_bytes" },
6206 { "tx_bytes" },
6207 { "tx_error_bytes" },
6208 { "rx_ucast_packets" },
6209 { "rx_mcast_packets" },
6210 { "rx_bcast_packets" },
6211 { "tx_ucast_packets" },
6212 { "tx_mcast_packets" },
6213 { "tx_bcast_packets" },
6214 { "tx_mac_errors" },
6215 { "tx_carrier_errors" },
6216 { "rx_crc_errors" },
6217 { "rx_align_errors" },
6218 { "tx_single_collisions" },
6219 { "tx_multi_collisions" },
6220 { "tx_deferred" },
6221 { "tx_excess_collisions" },
6222 { "tx_late_collisions" },
6223 { "tx_total_collisions" },
6224 { "rx_fragments" },
6225 { "rx_jabbers" },
6226 { "rx_undersize_packets" },
6227 { "rx_oversize_packets" },
6228 { "rx_64_byte_packets" },
6229 { "rx_65_to_127_byte_packets" },
6230 { "rx_128_to_255_byte_packets" },
6231 { "rx_256_to_511_byte_packets" },
6232 { "rx_512_to_1023_byte_packets" },
6233 { "rx_1024_to_1522_byte_packets" },
6234 { "rx_1523_to_9022_byte_packets" },
6235 { "tx_64_byte_packets" },
6236 { "tx_65_to_127_byte_packets" },
6237 { "tx_128_to_255_byte_packets" },
6238 { "tx_256_to_511_byte_packets" },
6239 { "tx_512_to_1023_byte_packets" },
6240 { "tx_1024_to_1522_byte_packets" },
6241 { "tx_1523_to_9022_byte_packets" },
6242 { "rx_xon_frames" },
6243 { "rx_xoff_frames" },
6244 { "tx_xon_frames" },
6245 { "tx_xoff_frames" },
6246 { "rx_mac_ctrl_frames" },
6247 { "rx_filtered_packets" },
6248 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07006249 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07006250};
6251
6252#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6253
Arjan van de Venf71e1302006-03-03 21:33:57 -05006254static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006255 STATS_OFFSET32(stat_IfHCInOctets_hi),
6256 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6257 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6258 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6259 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6260 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6261 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6262 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6263 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6264 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6265 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006266 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6267 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6268 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6269 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6270 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6271 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6272 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6273 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6274 STATS_OFFSET32(stat_EtherStatsCollisions),
6275 STATS_OFFSET32(stat_EtherStatsFragments),
6276 STATS_OFFSET32(stat_EtherStatsJabbers),
6277 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6278 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6279 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6280 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6281 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6282 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6283 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6284 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6285 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6286 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6287 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6288 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6289 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6290 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6291 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6292 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6293 STATS_OFFSET32(stat_XonPauseFramesReceived),
6294 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6295 STATS_OFFSET32(stat_OutXonSent),
6296 STATS_OFFSET32(stat_OutXoffSent),
6297 STATS_OFFSET32(stat_MacControlFramesReceived),
6298 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6299 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07006300 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07006301};
6302
6303/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6304 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006305 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006306static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006307 8,0,8,8,8,8,8,8,8,8,
6308 4,0,4,4,4,4,4,4,4,4,
6309 4,4,4,4,4,4,4,4,4,4,
6310 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006311 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07006312};
6313
Michael Chan5b0c76a2005-11-04 08:45:49 -08006314static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6315 8,0,8,8,8,8,8,8,8,8,
6316 4,4,4,4,4,4,4,4,4,4,
6317 4,4,4,4,4,4,4,4,4,4,
6318 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006319 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08006320};
6321
Michael Chanb6016b72005-05-26 13:03:09 -07006322#define BNX2_NUM_TESTS 6
6323
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006324static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006325 char string[ETH_GSTRING_LEN];
6326} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6327 { "register_test (offline)" },
6328 { "memory_test (offline)" },
6329 { "loopback_test (offline)" },
6330 { "nvram_test (online)" },
6331 { "interrupt_test (online)" },
6332 { "link_test (online)" },
6333};
6334
6335static int
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006336bnx2_get_sset_count(struct net_device *dev, int sset)
Michael Chanb6016b72005-05-26 13:03:09 -07006337{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006338 switch (sset) {
6339 case ETH_SS_TEST:
6340 return BNX2_NUM_TESTS;
6341 case ETH_SS_STATS:
6342 return BNX2_NUM_STATS;
6343 default:
6344 return -EOPNOTSUPP;
6345 }
Michael Chanb6016b72005-05-26 13:03:09 -07006346}
6347
6348static void
6349bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6350{
Michael Chan972ec0d2006-01-23 16:12:43 -08006351 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006352
6353 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6354 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08006355 int i;
6356
Michael Chanb6016b72005-05-26 13:03:09 -07006357 bnx2_netif_stop(bp);
6358 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6359 bnx2_free_skbs(bp);
6360
6361 if (bnx2_test_registers(bp) != 0) {
6362 buf[0] = 1;
6363 etest->flags |= ETH_TEST_FL_FAILED;
6364 }
6365 if (bnx2_test_memory(bp) != 0) {
6366 buf[1] = 1;
6367 etest->flags |= ETH_TEST_FL_FAILED;
6368 }
Michael Chanbc5a0692006-01-23 16:13:22 -08006369 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07006370 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07006371
6372 if (!netif_running(bp->dev)) {
6373 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6374 }
6375 else {
6376 bnx2_init_nic(bp);
6377 bnx2_netif_start(bp);
6378 }
6379
6380 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08006381 for (i = 0; i < 7; i++) {
6382 if (bp->link_up)
6383 break;
6384 msleep_interruptible(1000);
6385 }
Michael Chanb6016b72005-05-26 13:03:09 -07006386 }
6387
6388 if (bnx2_test_nvram(bp) != 0) {
6389 buf[3] = 1;
6390 etest->flags |= ETH_TEST_FL_FAILED;
6391 }
6392 if (bnx2_test_intr(bp) != 0) {
6393 buf[4] = 1;
6394 etest->flags |= ETH_TEST_FL_FAILED;
6395 }
6396
6397 if (bnx2_test_link(bp) != 0) {
6398 buf[5] = 1;
6399 etest->flags |= ETH_TEST_FL_FAILED;
6400
6401 }
6402}
6403
6404static void
6405bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6406{
6407 switch (stringset) {
6408 case ETH_SS_STATS:
6409 memcpy(buf, bnx2_stats_str_arr,
6410 sizeof(bnx2_stats_str_arr));
6411 break;
6412 case ETH_SS_TEST:
6413 memcpy(buf, bnx2_tests_str_arr,
6414 sizeof(bnx2_tests_str_arr));
6415 break;
6416 }
6417}
6418
Michael Chanb6016b72005-05-26 13:03:09 -07006419static void
6420bnx2_get_ethtool_stats(struct net_device *dev,
6421 struct ethtool_stats *stats, u64 *buf)
6422{
Michael Chan972ec0d2006-01-23 16:12:43 -08006423 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006424 int i;
6425 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006426 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006427
6428 if (hw_stats == NULL) {
6429 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6430 return;
6431 }
6432
Michael Chan5b0c76a2005-11-04 08:45:49 -08006433 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6434 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6435 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6436 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07006437 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006438 else
6439 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07006440
6441 for (i = 0; i < BNX2_NUM_STATS; i++) {
6442 if (stats_len_arr[i] == 0) {
6443 /* skip this counter */
6444 buf[i] = 0;
6445 continue;
6446 }
6447 if (stats_len_arr[i] == 4) {
6448 /* 4-byte counter */
6449 buf[i] = (u64)
6450 *(hw_stats + bnx2_stats_offset_arr[i]);
6451 continue;
6452 }
6453 /* 8-byte counter */
6454 buf[i] = (((u64) *(hw_stats +
6455 bnx2_stats_offset_arr[i])) << 32) +
6456 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6457 }
6458}
6459
6460static int
6461bnx2_phys_id(struct net_device *dev, u32 data)
6462{
Michael Chan972ec0d2006-01-23 16:12:43 -08006463 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006464 int i;
6465 u32 save;
6466
6467 if (data == 0)
6468 data = 2;
6469
6470 save = REG_RD(bp, BNX2_MISC_CFG);
6471 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6472
6473 for (i = 0; i < (data * 2); i++) {
6474 if ((i % 2) == 0) {
6475 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6476 }
6477 else {
6478 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6479 BNX2_EMAC_LED_1000MB_OVERRIDE |
6480 BNX2_EMAC_LED_100MB_OVERRIDE |
6481 BNX2_EMAC_LED_10MB_OVERRIDE |
6482 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6483 BNX2_EMAC_LED_TRAFFIC);
6484 }
6485 msleep_interruptible(500);
6486 if (signal_pending(current))
6487 break;
6488 }
6489 REG_WR(bp, BNX2_EMAC_LED, 0);
6490 REG_WR(bp, BNX2_MISC_CFG, save);
6491 return 0;
6492}
6493
Michael Chan4666f872007-05-03 13:22:28 -07006494static int
6495bnx2_set_tx_csum(struct net_device *dev, u32 data)
6496{
6497 struct bnx2 *bp = netdev_priv(dev);
6498
6499 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Michael Chan6460d942007-07-14 19:07:52 -07006500 return (ethtool_op_set_tx_ipv6_csum(dev, data));
Michael Chan4666f872007-05-03 13:22:28 -07006501 else
6502 return (ethtool_op_set_tx_csum(dev, data));
6503}
6504
Jeff Garzik7282d492006-09-13 14:30:00 -04006505static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07006506 .get_settings = bnx2_get_settings,
6507 .set_settings = bnx2_set_settings,
6508 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08006509 .get_regs_len = bnx2_get_regs_len,
6510 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07006511 .get_wol = bnx2_get_wol,
6512 .set_wol = bnx2_set_wol,
6513 .nway_reset = bnx2_nway_reset,
6514 .get_link = ethtool_op_get_link,
6515 .get_eeprom_len = bnx2_get_eeprom_len,
6516 .get_eeprom = bnx2_get_eeprom,
6517 .set_eeprom = bnx2_set_eeprom,
6518 .get_coalesce = bnx2_get_coalesce,
6519 .set_coalesce = bnx2_set_coalesce,
6520 .get_ringparam = bnx2_get_ringparam,
6521 .set_ringparam = bnx2_set_ringparam,
6522 .get_pauseparam = bnx2_get_pauseparam,
6523 .set_pauseparam = bnx2_set_pauseparam,
6524 .get_rx_csum = bnx2_get_rx_csum,
6525 .set_rx_csum = bnx2_set_rx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07006526 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07006527 .set_sg = ethtool_op_set_sg,
Michael Chanb11d6212006-06-29 12:31:21 -07006528 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07006529 .self_test = bnx2_self_test,
6530 .get_strings = bnx2_get_strings,
6531 .phys_id = bnx2_phys_id,
Michael Chanb6016b72005-05-26 13:03:09 -07006532 .get_ethtool_stats = bnx2_get_ethtool_stats,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006533 .get_sset_count = bnx2_get_sset_count,
Michael Chanb6016b72005-05-26 13:03:09 -07006534};
6535
6536/* Called with rtnl_lock */
6537static int
6538bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6539{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006540 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08006541 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006542 int err;
6543
6544 switch(cmd) {
6545 case SIOCGMIIPHY:
6546 data->phy_id = bp->phy_addr;
6547
6548 /* fallthru */
6549 case SIOCGMIIREG: {
6550 u32 mii_regval;
6551
Michael Chan7b6b8342007-07-07 22:50:15 -07006552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6553 return -EOPNOTSUPP;
6554
Michael Chandad3e452007-05-03 13:18:03 -07006555 if (!netif_running(dev))
6556 return -EAGAIN;
6557
Michael Chanc770a652005-08-25 15:38:39 -07006558 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006559 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07006560 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006561
6562 data->val_out = mii_regval;
6563
6564 return err;
6565 }
6566
6567 case SIOCSMIIREG:
6568 if (!capable(CAP_NET_ADMIN))
6569 return -EPERM;
6570
Michael Chan7b6b8342007-07-07 22:50:15 -07006571 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6572 return -EOPNOTSUPP;
6573
Michael Chandad3e452007-05-03 13:18:03 -07006574 if (!netif_running(dev))
6575 return -EAGAIN;
6576
Michael Chanc770a652005-08-25 15:38:39 -07006577 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006578 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07006579 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006580
6581 return err;
6582
6583 default:
6584 /* do nothing */
6585 break;
6586 }
6587 return -EOPNOTSUPP;
6588}
6589
6590/* Called with rtnl_lock */
6591static int
6592bnx2_change_mac_addr(struct net_device *dev, void *p)
6593{
6594 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08006595 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006596
Michael Chan73eef4c2005-08-25 15:39:15 -07006597 if (!is_valid_ether_addr(addr->sa_data))
6598 return -EINVAL;
6599
Michael Chanb6016b72005-05-26 13:03:09 -07006600 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6601 if (netif_running(dev))
6602 bnx2_set_mac_addr(bp);
6603
6604 return 0;
6605}
6606
6607/* Called with rtnl_lock */
6608static int
6609bnx2_change_mtu(struct net_device *dev, int new_mtu)
6610{
Michael Chan972ec0d2006-01-23 16:12:43 -08006611 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006612
6613 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6614 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6615 return -EINVAL;
6616
6617 dev->mtu = new_mtu;
Michael Chan5d5d0012007-12-12 11:17:43 -08006618 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
Michael Chanb6016b72005-05-26 13:03:09 -07006619}
6620
6621#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6622static void
6623poll_bnx2(struct net_device *dev)
6624{
Michael Chan972ec0d2006-01-23 16:12:43 -08006625 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006626
6627 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006628 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006629 enable_irq(bp->pdev->irq);
6630}
6631#endif
6632
Michael Chan253c8b72007-01-08 19:56:01 -08006633static void __devinit
6634bnx2_get_5709_media(struct bnx2 *bp)
6635{
6636 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6637 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6638 u32 strap;
6639
6640 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6641 return;
6642 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6643 bp->phy_flags |= PHY_SERDES_FLAG;
6644 return;
6645 }
6646
6647 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6648 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6649 else
6650 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6651
6652 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6653 switch (strap) {
6654 case 0x4:
6655 case 0x5:
6656 case 0x6:
6657 bp->phy_flags |= PHY_SERDES_FLAG;
6658 return;
6659 }
6660 } else {
6661 switch (strap) {
6662 case 0x1:
6663 case 0x2:
6664 case 0x4:
6665 bp->phy_flags |= PHY_SERDES_FLAG;
6666 return;
6667 }
6668 }
6669}
6670
Michael Chan883e5152007-05-03 13:25:11 -07006671static void __devinit
6672bnx2_get_pci_speed(struct bnx2 *bp)
6673{
6674 u32 reg;
6675
6676 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6677 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6678 u32 clkreg;
6679
6680 bp->flags |= PCIX_FLAG;
6681
6682 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6683
6684 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6685 switch (clkreg) {
6686 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6687 bp->bus_speed_mhz = 133;
6688 break;
6689
6690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6691 bp->bus_speed_mhz = 100;
6692 break;
6693
6694 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6695 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6696 bp->bus_speed_mhz = 66;
6697 break;
6698
6699 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6700 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6701 bp->bus_speed_mhz = 50;
6702 break;
6703
6704 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6705 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6706 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6707 bp->bus_speed_mhz = 33;
6708 break;
6709 }
6710 }
6711 else {
6712 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6713 bp->bus_speed_mhz = 66;
6714 else
6715 bp->bus_speed_mhz = 33;
6716 }
6717
6718 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6719 bp->flags |= PCI_32BIT_FLAG;
6720
6721}
6722
Michael Chanb6016b72005-05-26 13:03:09 -07006723static int __devinit
6724bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6725{
6726 struct bnx2 *bp;
6727 unsigned long mem_len;
Michael Chan58fc2ea2007-07-07 22:52:02 -07006728 int rc, i, j;
Michael Chanb6016b72005-05-26 13:03:09 -07006729 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006730 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006731
Michael Chanb6016b72005-05-26 13:03:09 -07006732 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006733 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006734
6735 bp->flags = 0;
6736 bp->phy_flags = 0;
6737
6738 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6739 rc = pci_enable_device(pdev);
6740 if (rc) {
Joe Perches898eb712007-10-18 03:06:30 -07006741 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006742 goto err_out;
6743 }
6744
6745 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006746 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006747 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006748 rc = -ENODEV;
6749 goto err_out_disable;
6750 }
6751
6752 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6753 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006754 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006755 goto err_out_disable;
6756 }
6757
6758 pci_set_master(pdev);
6759
6760 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6761 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006762 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006763 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006764 rc = -EIO;
6765 goto err_out_release;
6766 }
6767
Michael Chanb6016b72005-05-26 13:03:09 -07006768 bp->dev = dev;
6769 bp->pdev = pdev;
6770
6771 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006772 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006773 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006774
6775 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006776 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006777 dev->mem_end = dev->mem_start + mem_len;
6778 dev->irq = pdev->irq;
6779
6780 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6781
6782 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006783 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006784 rc = -ENOMEM;
6785 goto err_out_release;
6786 }
6787
6788 /* Configure byte swap and enable write to the reg_window registers.
6789 * Rely on CPU to do target byte swapping on big endian systems
6790 * The chip's target access swapping will not swap all accesses
6791 */
6792 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6793 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6794 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6795
Pavel Machek829ca9a2005-09-03 15:56:56 -07006796 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006797
6798 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6799
Michael Chan883e5152007-05-03 13:25:11 -07006800 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6801 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6802 dev_err(&pdev->dev,
6803 "Cannot find PCIE capability, aborting.\n");
6804 rc = -EIO;
6805 goto err_out_unmap;
6806 }
6807 bp->flags |= PCIE_FLAG;
6808 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006809 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6810 if (bp->pcix_cap == 0) {
6811 dev_err(&pdev->dev,
6812 "Cannot find PCIX capability, aborting.\n");
6813 rc = -EIO;
6814 goto err_out_unmap;
6815 }
6816 }
6817
Michael Chan8e6a72c2007-05-03 13:24:48 -07006818 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6819 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6820 bp->flags |= MSI_CAP_FLAG;
6821 }
6822
Michael Chan40453c82007-05-03 13:19:18 -07006823 /* 5708 cannot support DMA addresses > 40-bit. */
6824 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6825 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6826 else
6827 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6828
6829 /* Configure DMA attributes. */
6830 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6831 dev->features |= NETIF_F_HIGHDMA;
6832 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6833 if (rc) {
6834 dev_err(&pdev->dev,
6835 "pci_set_consistent_dma_mask failed, aborting.\n");
6836 goto err_out_unmap;
6837 }
6838 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6839 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6840 goto err_out_unmap;
6841 }
6842
Michael Chan883e5152007-05-03 13:25:11 -07006843 if (!(bp->flags & PCIE_FLAG))
6844 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006845
6846 /* 5706A0 may falsely detect SERR and PERR. */
6847 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6848 reg = REG_RD(bp, PCI_COMMAND);
6849 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6850 REG_WR(bp, PCI_COMMAND, reg);
6851 }
6852 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6853 !(bp->flags & PCIX_FLAG)) {
6854
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006855 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006856 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006857 goto err_out_unmap;
6858 }
6859
6860 bnx2_init_nvram(bp);
6861
Michael Chane3648b32005-11-04 08:51:21 -08006862 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6863
6864 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006865 BNX2_SHM_HDR_SIGNATURE_SIG) {
6866 u32 off = PCI_FUNC(pdev->devfn) << 2;
6867
6868 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6869 } else
Michael Chane3648b32005-11-04 08:51:21 -08006870 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6871
Michael Chanb6016b72005-05-26 13:03:09 -07006872 /* Get the permanent MAC address. First we need to make sure the
6873 * firmware is actually running.
6874 */
Michael Chane3648b32005-11-04 08:51:21 -08006875 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006876
6877 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6878 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006879 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006880 rc = -ENODEV;
6881 goto err_out_unmap;
6882 }
6883
Michael Chan58fc2ea2007-07-07 22:52:02 -07006884 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6885 for (i = 0, j = 0; i < 3; i++) {
6886 u8 num, k, skip0;
6887
6888 num = (u8) (reg >> (24 - (i * 8)));
6889 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6890 if (num >= k || !skip0 || k == 1) {
6891 bp->fw_version[j++] = (num / k) + '0';
6892 skip0 = 0;
6893 }
6894 }
6895 if (i != 2)
6896 bp->fw_version[j++] = '.';
6897 }
Michael Chan846f5c62007-10-10 16:16:51 -07006898 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6899 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6900 bp->wol = 1;
6901
6902 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
Michael Chanc2d3db82007-07-16 18:26:43 -07006903 bp->flags |= ASF_ENABLE_FLAG;
6904
6905 for (i = 0; i < 30; i++) {
6906 reg = REG_RD_IND(bp, bp->shmem_base +
6907 BNX2_BC_STATE_CONDITION);
6908 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6909 break;
6910 msleep(10);
6911 }
6912 }
Michael Chan58fc2ea2007-07-07 22:52:02 -07006913 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6914 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6915 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6916 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6917 int i;
6918 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6919
6920 bp->fw_version[j++] = ' ';
6921 for (i = 0; i < 3; i++) {
6922 reg = REG_RD_IND(bp, addr + i * 4);
6923 reg = swab32(reg);
6924 memcpy(&bp->fw_version[j], &reg, 4);
6925 j += 4;
6926 }
6927 }
Michael Chanb6016b72005-05-26 13:03:09 -07006928
Michael Chane3648b32005-11-04 08:51:21 -08006929 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006930 bp->mac_addr[0] = (u8) (reg >> 8);
6931 bp->mac_addr[1] = (u8) reg;
6932
Michael Chane3648b32005-11-04 08:51:21 -08006933 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006934 bp->mac_addr[2] = (u8) (reg >> 24);
6935 bp->mac_addr[3] = (u8) (reg >> 16);
6936 bp->mac_addr[4] = (u8) (reg >> 8);
6937 bp->mac_addr[5] = (u8) reg;
6938
Michael Chan5d5d0012007-12-12 11:17:43 -08006939 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6940
Michael Chanb6016b72005-05-26 13:03:09 -07006941 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006942 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006943
6944 bp->rx_csum = 1;
6945
Michael Chanb6016b72005-05-26 13:03:09 -07006946 bp->tx_quick_cons_trip_int = 20;
6947 bp->tx_quick_cons_trip = 20;
6948 bp->tx_ticks_int = 80;
6949 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006950
Michael Chanb6016b72005-05-26 13:03:09 -07006951 bp->rx_quick_cons_trip_int = 6;
6952 bp->rx_quick_cons_trip = 6;
6953 bp->rx_ticks_int = 18;
6954 bp->rx_ticks = 18;
6955
Michael Chan7ea69202007-07-16 18:27:10 -07006956 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07006957
6958 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006959 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006960
Michael Chan5b0c76a2005-11-04 08:45:49 -08006961 bp->phy_addr = 1;
6962
Michael Chanb6016b72005-05-26 13:03:09 -07006963 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006964 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6965 bnx2_get_5709_media(bp);
6966 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006967 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006968
Michael Chan0d8a6572007-07-07 22:49:43 -07006969 bp->phy_port = PORT_TP;
Michael Chanbac0dff2006-11-19 14:15:05 -08006970 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07006971 bp->phy_port = PORT_FIBRE;
Michael Chan846f5c62007-10-10 16:16:51 -07006972 reg = REG_RD_IND(bp, bp->shmem_base +
6973 BNX2_SHARED_HW_CFG_CONFIG);
6974 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6975 bp->flags |= NO_WOL_FLAG;
6976 bp->wol = 0;
6977 }
Michael Chanbac0dff2006-11-19 14:15:05 -08006978 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006979 bp->phy_addr = 2;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006980 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6981 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6982 }
Michael Chan0d8a6572007-07-07 22:49:43 -07006983 bnx2_init_remote_phy(bp);
6984
Michael Chan261dd5c2007-01-08 19:55:46 -08006985 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6986 CHIP_NUM(bp) == CHIP_NUM_5708)
6987 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanfb0c18b2007-12-10 17:18:23 -08006988 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6989 (CHIP_REV(bp) == CHIP_REV_Ax ||
6990 CHIP_REV(bp) == CHIP_REV_Bx))
Michael Chanb659f442007-02-02 00:46:35 -08006991 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006992
Michael Chan16088272006-06-12 22:16:43 -07006993 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6994 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
Michael Chan846f5c62007-10-10 16:16:51 -07006995 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chandda1e392006-01-23 16:08:14 -08006996 bp->flags |= NO_WOL_FLAG;
Michael Chan846f5c62007-10-10 16:16:51 -07006997 bp->wol = 0;
6998 }
Michael Chandda1e392006-01-23 16:08:14 -08006999
Michael Chanb6016b72005-05-26 13:03:09 -07007000 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7001 bp->tx_quick_cons_trip_int =
7002 bp->tx_quick_cons_trip;
7003 bp->tx_ticks_int = bp->tx_ticks;
7004 bp->rx_quick_cons_trip_int =
7005 bp->rx_quick_cons_trip;
7006 bp->rx_ticks_int = bp->rx_ticks;
7007 bp->comp_prod_trip_int = bp->comp_prod_trip;
7008 bp->com_ticks_int = bp->com_ticks;
7009 bp->cmd_ticks_int = bp->cmd_ticks;
7010 }
7011
Michael Chanf9317a42006-09-29 17:06:23 -07007012 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7013 *
7014 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7015 * with byte enables disabled on the unused 32-bit word. This is legal
7016 * but causes problems on the AMD 8132 which will eventually stop
7017 * responding after a while.
7018 *
7019 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11007020 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07007021 */
7022 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7023 struct pci_dev *amd_8132 = NULL;
7024
7025 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7026 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7027 amd_8132))) {
Michael Chanf9317a42006-09-29 17:06:23 -07007028
Auke Kok44c10132007-06-08 15:46:36 -07007029 if (amd_8132->revision >= 0x10 &&
7030 amd_8132->revision <= 0x13) {
Michael Chanf9317a42006-09-29 17:06:23 -07007031 disable_msi = 1;
7032 pci_dev_put(amd_8132);
7033 break;
7034 }
7035 }
7036 }
7037
Michael Chandeaf3912007-07-07 22:48:00 -07007038 bnx2_set_default_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07007039 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7040
Michael Chancd339a02005-08-25 15:35:24 -07007041 init_timer(&bp->timer);
7042 bp->timer.expires = RUN_AT(bp->timer_interval);
7043 bp->timer.data = (unsigned long) bp;
7044 bp->timer.function = bnx2_timer;
7045
Michael Chanb6016b72005-05-26 13:03:09 -07007046 return 0;
7047
7048err_out_unmap:
7049 if (bp->regview) {
7050 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07007051 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07007052 }
7053
7054err_out_release:
7055 pci_release_regions(pdev);
7056
7057err_out_disable:
7058 pci_disable_device(pdev);
7059 pci_set_drvdata(pdev, NULL);
7060
7061err_out:
7062 return rc;
7063}
7064
Michael Chan883e5152007-05-03 13:25:11 -07007065static char * __devinit
7066bnx2_bus_string(struct bnx2 *bp, char *str)
7067{
7068 char *s = str;
7069
7070 if (bp->flags & PCIE_FLAG) {
7071 s += sprintf(s, "PCI Express");
7072 } else {
7073 s += sprintf(s, "PCI");
7074 if (bp->flags & PCIX_FLAG)
7075 s += sprintf(s, "-X");
7076 if (bp->flags & PCI_32BIT_FLAG)
7077 s += sprintf(s, " 32-bit");
7078 else
7079 s += sprintf(s, " 64-bit");
7080 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7081 }
7082 return str;
7083}
7084
Michael Chanb6016b72005-05-26 13:03:09 -07007085static int __devinit
7086bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7087{
7088 static int version_printed = 0;
7089 struct net_device *dev = NULL;
7090 struct bnx2 *bp;
Joe Perches0795af52007-10-03 17:59:30 -07007091 int rc;
Michael Chan883e5152007-05-03 13:25:11 -07007092 char str[40];
Joe Perches0795af52007-10-03 17:59:30 -07007093 DECLARE_MAC_BUF(mac);
Michael Chanb6016b72005-05-26 13:03:09 -07007094
7095 if (version_printed++ == 0)
7096 printk(KERN_INFO "%s", version);
7097
7098 /* dev zeroed in init_etherdev */
7099 dev = alloc_etherdev(sizeof(*bp));
7100
7101 if (!dev)
7102 return -ENOMEM;
7103
7104 rc = bnx2_init_board(pdev, dev);
7105 if (rc < 0) {
7106 free_netdev(dev);
7107 return rc;
7108 }
7109
7110 dev->open = bnx2_open;
7111 dev->hard_start_xmit = bnx2_start_xmit;
7112 dev->stop = bnx2_close;
7113 dev->get_stats = bnx2_get_stats;
7114 dev->set_multicast_list = bnx2_set_rx_mode;
7115 dev->do_ioctl = bnx2_ioctl;
7116 dev->set_mac_address = bnx2_change_mac_addr;
7117 dev->change_mtu = bnx2_change_mtu;
7118 dev->tx_timeout = bnx2_tx_timeout;
7119 dev->watchdog_timeo = TX_TIMEOUT;
7120#ifdef BCM_VLAN
7121 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07007122#endif
Michael Chanb6016b72005-05-26 13:03:09 -07007123 dev->ethtool_ops = &bnx2_ethtool_ops;
Michael Chanb6016b72005-05-26 13:03:09 -07007124
Michael Chan972ec0d2006-01-23 16:12:43 -08007125 bp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007126 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
Michael Chanb6016b72005-05-26 13:03:09 -07007127
7128#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7129 dev->poll_controller = poll_bnx2;
7130#endif
7131
Michael Chan1b2f9222007-05-03 13:20:19 -07007132 pci_set_drvdata(pdev, dev);
7133
7134 memcpy(dev->dev_addr, bp->mac_addr, 6);
7135 memcpy(dev->perm_addr, bp->mac_addr, 6);
7136 bp->name = board_info[ent->driver_data].name;
7137
Stephen Hemmingerd212f872007-06-27 00:47:37 -07007138 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan4666f872007-05-03 13:22:28 -07007139 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Stephen Hemmingerd212f872007-06-27 00:47:37 -07007140 dev->features |= NETIF_F_IPV6_CSUM;
7141
Michael Chan1b2f9222007-05-03 13:20:19 -07007142#ifdef BCM_VLAN
7143 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7144#endif
7145 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07007146 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7147 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07007148
Michael Chanb6016b72005-05-26 13:03:09 -07007149 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04007150 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07007151 if (bp->regview)
7152 iounmap(bp->regview);
7153 pci_release_regions(pdev);
7154 pci_disable_device(pdev);
7155 pci_set_drvdata(pdev, NULL);
7156 free_netdev(dev);
7157 return rc;
7158 }
7159
Michael Chan883e5152007-05-03 13:25:11 -07007160 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Joe Perches0795af52007-10-03 17:59:30 -07007161 "IRQ %d, node addr %s\n",
Michael Chanb6016b72005-05-26 13:03:09 -07007162 dev->name,
7163 bp->name,
7164 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7165 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07007166 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07007167 dev->base_addr,
Joe Perches0795af52007-10-03 17:59:30 -07007168 bp->pdev->irq, print_mac(mac, dev->dev_addr));
Michael Chanb6016b72005-05-26 13:03:09 -07007169
Michael Chanb6016b72005-05-26 13:03:09 -07007170 return 0;
7171}
7172
7173static void __devexit
7174bnx2_remove_one(struct pci_dev *pdev)
7175{
7176 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007177 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007178
Michael Chanafdc08b2005-08-25 15:34:29 -07007179 flush_scheduled_work();
7180
Michael Chanb6016b72005-05-26 13:03:09 -07007181 unregister_netdev(dev);
7182
7183 if (bp->regview)
7184 iounmap(bp->regview);
7185
7186 free_netdev(dev);
7187 pci_release_regions(pdev);
7188 pci_disable_device(pdev);
7189 pci_set_drvdata(pdev, NULL);
7190}
7191
7192static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07007193bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07007194{
7195 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007196 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007197 u32 reset_code;
7198
Michael Chan6caebb02007-08-03 20:57:25 -07007199 /* PCI register 4 needs to be saved whether netif_running() or not.
7200 * MSI address and data need to be saved if using MSI and
7201 * netif_running().
7202 */
7203 pci_save_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07007204 if (!netif_running(dev))
7205 return 0;
7206
Michael Chan1d60290f2006-03-20 17:50:08 -08007207 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07007208 bnx2_netif_stop(bp);
7209 netif_device_detach(dev);
7210 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08007211 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07007212 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08007213 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07007214 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7215 else
7216 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7217 bnx2_reset_chip(bp, reset_code);
7218 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07007219 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07007220 return 0;
7221}
7222
7223static int
7224bnx2_resume(struct pci_dev *pdev)
7225{
7226 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007227 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007228
Michael Chan6caebb02007-08-03 20:57:25 -07007229 pci_restore_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07007230 if (!netif_running(dev))
7231 return 0;
7232
Pavel Machek829ca9a2005-09-03 15:56:56 -07007233 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07007234 netif_device_attach(dev);
7235 bnx2_init_nic(bp);
7236 bnx2_netif_start(bp);
7237 return 0;
7238}
7239
7240static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07007241 .name = DRV_MODULE_NAME,
7242 .id_table = bnx2_pci_tbl,
7243 .probe = bnx2_init_one,
7244 .remove = __devexit_p(bnx2_remove_one),
7245 .suspend = bnx2_suspend,
7246 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07007247};
7248
7249static int __init bnx2_init(void)
7250{
Jeff Garzik29917622006-08-19 17:48:59 -04007251 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07007252}
7253
7254static void __exit bnx2_cleanup(void)
7255{
7256 pci_unregister_driver(&bnx2_pci_driver);
7257}
7258
7259module_init(bnx2_init);
7260module_exit(bnx2_cleanup);
7261
7262
7263