blob: 0300a759728c782a049643f57e1bb29ca8148005 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070029#include <linux/bitops.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080030#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
Linus Torvaldsde081fa2007-07-12 16:40:08 -070043#include <net/tcp.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080044#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
Michael Chan110d0ef2007-12-12 11:18:34 -080055#define FW_BUF_SIZE 0x10000
Denys Vlasenkob3448b02007-09-30 17:55:51 -070056
Michael Chanb6016b72005-05-26 13:03:09 -070057#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chana0d142c2007-12-12 11:20:22 -080059#define DRV_MODULE_VERSION "1.7.0"
60#define DRV_MODULE_RELDATE "December 11, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080088 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070089 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070090} board_t;
91
92/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050093static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070094 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
Michael Chane30372c2007-07-16 18:26:23 -0700131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chane30372c2007-07-16 18:26:23 -0700203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
Michael Chane30372c2007-07-16 18:26:23 -0700213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700216};
217
Michael Chane30372c2007-07-16 18:26:23 -0700218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
Michael Chanb6016b72005-05-26 13:03:09 -0700227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
Michael Chana550c992007-12-20 19:56:59 -0800229static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
Michael Chane89bbf12005-08-25 15:36:58 -0700230{
Michael Chan2f8af122006-08-15 01:39:10 -0700231 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700232
Michael Chan2f8af122006-08-15 01:39:10 -0700233 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
Michael Chana550c992007-12-20 19:56:59 -0800238 diff = bp->tx_prod - bnapi->tx_cons;
Michael Chanfaac9c42006-12-14 15:56:32 -0800239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
Michael Chane89bbf12005-08-25 15:36:58 -0700244 return (bp->tx_ring_size - diff);
245}
246
Michael Chanb6016b72005-05-26 13:03:09 -0700247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
Michael Chan1b8227c2007-05-03 13:24:05 -0700250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
Michael Chan1b8227c2007-05-03 13:24:05 -0700262 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700265 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700272 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700290 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400370
Michael Chanb6016b72005-05-26 13:03:09 -0700371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
Michael Chan35efa7c2007-12-20 19:56:37 -0800410 struct bnx2_napi *bnapi = &bp->bnx2_napi;
Michael Chan1269a8a2006-01-23 16:11:03 -0800411
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan35efa7c2007-12-20 19:56:37 -0800413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
414 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx);
415
416 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
417 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -0700418
Michael Chanbf5295b2006-03-23 01:11:56 -0800419 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700420}
421
422static void
423bnx2_disable_int_sync(struct bnx2 *bp)
424{
425 atomic_inc(&bp->intr_sem);
426 bnx2_disable_int(bp);
427 synchronize_irq(bp->pdev->irq);
428}
429
430static void
Michael Chan35efa7c2007-12-20 19:56:37 -0800431bnx2_napi_disable(struct bnx2 *bp)
432{
433 napi_disable(&bp->bnx2_napi.napi);
434}
435
436static void
437bnx2_napi_enable(struct bnx2 *bp)
438{
439 napi_enable(&bp->bnx2_napi.napi);
440}
441
442static void
Michael Chanb6016b72005-05-26 13:03:09 -0700443bnx2_netif_stop(struct bnx2 *bp)
444{
445 bnx2_disable_int_sync(bp);
446 if (netif_running(bp->dev)) {
Michael Chan35efa7c2007-12-20 19:56:37 -0800447 bnx2_napi_disable(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700448 netif_tx_disable(bp->dev);
449 bp->dev->trans_start = jiffies; /* prevent tx timeout */
450 }
451}
452
453static void
454bnx2_netif_start(struct bnx2 *bp)
455{
456 if (atomic_dec_and_test(&bp->intr_sem)) {
457 if (netif_running(bp->dev)) {
458 netif_wake_queue(bp->dev);
Michael Chan35efa7c2007-12-20 19:56:37 -0800459 bnx2_napi_enable(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700460 bnx2_enable_int(bp);
461 }
462 }
463}
464
465static void
466bnx2_free_mem(struct bnx2 *bp)
467{
Michael Chan13daffa2006-03-20 17:49:20 -0800468 int i;
469
Michael Chan59b47d82006-11-19 14:10:45 -0800470 for (i = 0; i < bp->ctx_pages; i++) {
471 if (bp->ctx_blk[i]) {
472 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
473 bp->ctx_blk[i],
474 bp->ctx_blk_mapping[i]);
475 bp->ctx_blk[i] = NULL;
476 }
477 }
Michael Chanb6016b72005-05-26 13:03:09 -0700478 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800479 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700480 bp->status_blk, bp->status_blk_mapping);
481 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700483 }
484 if (bp->tx_desc_ring) {
Michael Chane343d552007-12-12 11:16:19 -0800485 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700486 bp->tx_desc_ring, bp->tx_desc_mapping);
487 bp->tx_desc_ring = NULL;
488 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400489 kfree(bp->tx_buf_ring);
490 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800491 for (i = 0; i < bp->rx_max_ring; i++) {
492 if (bp->rx_desc_ring[i])
Michael Chane343d552007-12-12 11:16:19 -0800493 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_desc_ring[i],
495 bp->rx_desc_mapping[i]);
496 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700497 }
Michael Chan13daffa2006-03-20 17:49:20 -0800498 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400499 bp->rx_buf_ring = NULL;
Michael Chan47bf4242007-12-12 11:19:12 -0800500 for (i = 0; i < bp->rx_max_pg_ring; i++) {
501 if (bp->rx_pg_desc_ring[i])
502 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
503 bp->rx_pg_desc_ring[i],
504 bp->rx_pg_desc_mapping[i]);
505 bp->rx_pg_desc_ring[i] = NULL;
506 }
507 if (bp->rx_pg_ring)
508 vfree(bp->rx_pg_ring);
509 bp->rx_pg_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700510}
511
512static int
513bnx2_alloc_mem(struct bnx2 *bp)
514{
Michael Chan0f31f992006-03-23 01:12:38 -0800515 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800516
Michael Chane343d552007-12-12 11:16:19 -0800517 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700518 if (bp->tx_buf_ring == NULL)
519 return -ENOMEM;
520
Michael Chane343d552007-12-12 11:16:19 -0800521 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
Michael Chanb6016b72005-05-26 13:03:09 -0700522 &bp->tx_desc_mapping);
523 if (bp->tx_desc_ring == NULL)
524 goto alloc_mem_err;
525
Michael Chane343d552007-12-12 11:16:19 -0800526 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700527 if (bp->rx_buf_ring == NULL)
528 goto alloc_mem_err;
529
Michael Chane343d552007-12-12 11:16:19 -0800530 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
Michael Chan13daffa2006-03-20 17:49:20 -0800531
532 for (i = 0; i < bp->rx_max_ring; i++) {
533 bp->rx_desc_ring[i] =
Michael Chane343d552007-12-12 11:16:19 -0800534 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
Michael Chan13daffa2006-03-20 17:49:20 -0800535 &bp->rx_desc_mapping[i]);
536 if (bp->rx_desc_ring[i] == NULL)
537 goto alloc_mem_err;
538
539 }
Michael Chanb6016b72005-05-26 13:03:09 -0700540
Michael Chan47bf4242007-12-12 11:19:12 -0800541 if (bp->rx_pg_ring_size) {
542 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
543 bp->rx_max_pg_ring);
544 if (bp->rx_pg_ring == NULL)
545 goto alloc_mem_err;
546
547 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
548 bp->rx_max_pg_ring);
549 }
550
551 for (i = 0; i < bp->rx_max_pg_ring; i++) {
552 bp->rx_pg_desc_ring[i] =
553 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
554 &bp->rx_pg_desc_mapping[i]);
555 if (bp->rx_pg_desc_ring[i] == NULL)
556 goto alloc_mem_err;
557
558 }
559
Michael Chan0f31f992006-03-23 01:12:38 -0800560 /* Combine status and statistics blocks into one allocation. */
561 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
562 bp->status_stats_size = status_blk_size +
563 sizeof(struct statistics_block);
564
565 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700566 &bp->status_blk_mapping);
567 if (bp->status_blk == NULL)
568 goto alloc_mem_err;
569
Michael Chan0f31f992006-03-23 01:12:38 -0800570 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700571
Michael Chan35efa7c2007-12-20 19:56:37 -0800572 bp->bnx2_napi.status_blk = bp->status_blk;
573
Michael Chan0f31f992006-03-23 01:12:38 -0800574 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
575 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700576
Michael Chan0f31f992006-03-23 01:12:38 -0800577 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700578
Michael Chan59b47d82006-11-19 14:10:45 -0800579 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
580 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
581 if (bp->ctx_pages == 0)
582 bp->ctx_pages = 1;
583 for (i = 0; i < bp->ctx_pages; i++) {
584 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
585 BCM_PAGE_SIZE,
586 &bp->ctx_blk_mapping[i]);
587 if (bp->ctx_blk[i] == NULL)
588 goto alloc_mem_err;
589 }
590 }
Michael Chanb6016b72005-05-26 13:03:09 -0700591 return 0;
592
593alloc_mem_err:
594 bnx2_free_mem(bp);
595 return -ENOMEM;
596}
597
598static void
Michael Chane3648b32005-11-04 08:51:21 -0800599bnx2_report_fw_link(struct bnx2 *bp)
600{
601 u32 fw_link_status = 0;
602
Michael Chan0d8a6572007-07-07 22:49:43 -0700603 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
604 return;
605
Michael Chane3648b32005-11-04 08:51:21 -0800606 if (bp->link_up) {
607 u32 bmsr;
608
609 switch (bp->line_speed) {
610 case SPEED_10:
611 if (bp->duplex == DUPLEX_HALF)
612 fw_link_status = BNX2_LINK_STATUS_10HALF;
613 else
614 fw_link_status = BNX2_LINK_STATUS_10FULL;
615 break;
616 case SPEED_100:
617 if (bp->duplex == DUPLEX_HALF)
618 fw_link_status = BNX2_LINK_STATUS_100HALF;
619 else
620 fw_link_status = BNX2_LINK_STATUS_100FULL;
621 break;
622 case SPEED_1000:
623 if (bp->duplex == DUPLEX_HALF)
624 fw_link_status = BNX2_LINK_STATUS_1000HALF;
625 else
626 fw_link_status = BNX2_LINK_STATUS_1000FULL;
627 break;
628 case SPEED_2500:
629 if (bp->duplex == DUPLEX_HALF)
630 fw_link_status = BNX2_LINK_STATUS_2500HALF;
631 else
632 fw_link_status = BNX2_LINK_STATUS_2500FULL;
633 break;
634 }
635
636 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
637
638 if (bp->autoneg) {
639 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
640
Michael Chanca58c3a2007-05-03 13:22:52 -0700641 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
642 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800643
644 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
645 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
646 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
647 else
648 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
649 }
650 }
651 else
652 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
653
654 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
655}
656
Michael Chan9b1084b2007-07-07 22:50:37 -0700657static char *
658bnx2_xceiver_str(struct bnx2 *bp)
659{
660 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
661 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
662 "Copper"));
663}
664
Michael Chane3648b32005-11-04 08:51:21 -0800665static void
Michael Chanb6016b72005-05-26 13:03:09 -0700666bnx2_report_link(struct bnx2 *bp)
667{
668 if (bp->link_up) {
669 netif_carrier_on(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700670 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
671 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700672
673 printk("%d Mbps ", bp->line_speed);
674
675 if (bp->duplex == DUPLEX_FULL)
676 printk("full duplex");
677 else
678 printk("half duplex");
679
680 if (bp->flow_ctrl) {
681 if (bp->flow_ctrl & FLOW_CTRL_RX) {
682 printk(", receive ");
683 if (bp->flow_ctrl & FLOW_CTRL_TX)
684 printk("& transmit ");
685 }
686 else {
687 printk(", transmit ");
688 }
689 printk("flow control ON");
690 }
691 printk("\n");
692 }
693 else {
694 netif_carrier_off(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700695 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
696 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700697 }
Michael Chane3648b32005-11-04 08:51:21 -0800698
699 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700700}
701
702static void
703bnx2_resolve_flow_ctrl(struct bnx2 *bp)
704{
705 u32 local_adv, remote_adv;
706
707 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400708 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700709 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
710
711 if (bp->duplex == DUPLEX_FULL) {
712 bp->flow_ctrl = bp->req_flow_ctrl;
713 }
714 return;
715 }
716
717 if (bp->duplex != DUPLEX_FULL) {
718 return;
719 }
720
Michael Chan5b0c76a2005-11-04 08:45:49 -0800721 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
722 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
723 u32 val;
724
725 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
726 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
727 bp->flow_ctrl |= FLOW_CTRL_TX;
728 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
729 bp->flow_ctrl |= FLOW_CTRL_RX;
730 return;
731 }
732
Michael Chanca58c3a2007-05-03 13:22:52 -0700733 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
734 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700735
736 if (bp->phy_flags & PHY_SERDES_FLAG) {
737 u32 new_local_adv = 0;
738 u32 new_remote_adv = 0;
739
740 if (local_adv & ADVERTISE_1000XPAUSE)
741 new_local_adv |= ADVERTISE_PAUSE_CAP;
742 if (local_adv & ADVERTISE_1000XPSE_ASYM)
743 new_local_adv |= ADVERTISE_PAUSE_ASYM;
744 if (remote_adv & ADVERTISE_1000XPAUSE)
745 new_remote_adv |= ADVERTISE_PAUSE_CAP;
746 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
747 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
748
749 local_adv = new_local_adv;
750 remote_adv = new_remote_adv;
751 }
752
753 /* See Table 28B-3 of 802.3ab-1999 spec. */
754 if (local_adv & ADVERTISE_PAUSE_CAP) {
755 if(local_adv & ADVERTISE_PAUSE_ASYM) {
756 if (remote_adv & ADVERTISE_PAUSE_CAP) {
757 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
758 }
759 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
760 bp->flow_ctrl = FLOW_CTRL_RX;
761 }
762 }
763 else {
764 if (remote_adv & ADVERTISE_PAUSE_CAP) {
765 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
766 }
767 }
768 }
769 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
770 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
771 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
772
773 bp->flow_ctrl = FLOW_CTRL_TX;
774 }
775 }
776}
777
778static int
Michael Chan27a005b2007-05-03 13:23:41 -0700779bnx2_5709s_linkup(struct bnx2 *bp)
780{
781 u32 val, speed;
782
783 bp->link_up = 1;
784
785 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
786 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
787 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
788
789 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
790 bp->line_speed = bp->req_line_speed;
791 bp->duplex = bp->req_duplex;
792 return 0;
793 }
794 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
795 switch (speed) {
796 case MII_BNX2_GP_TOP_AN_SPEED_10:
797 bp->line_speed = SPEED_10;
798 break;
799 case MII_BNX2_GP_TOP_AN_SPEED_100:
800 bp->line_speed = SPEED_100;
801 break;
802 case MII_BNX2_GP_TOP_AN_SPEED_1G:
803 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
804 bp->line_speed = SPEED_1000;
805 break;
806 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
807 bp->line_speed = SPEED_2500;
808 break;
809 }
810 if (val & MII_BNX2_GP_TOP_AN_FD)
811 bp->duplex = DUPLEX_FULL;
812 else
813 bp->duplex = DUPLEX_HALF;
814 return 0;
815}
816
817static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800818bnx2_5708s_linkup(struct bnx2 *bp)
819{
820 u32 val;
821
822 bp->link_up = 1;
823 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
824 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
825 case BCM5708S_1000X_STAT1_SPEED_10:
826 bp->line_speed = SPEED_10;
827 break;
828 case BCM5708S_1000X_STAT1_SPEED_100:
829 bp->line_speed = SPEED_100;
830 break;
831 case BCM5708S_1000X_STAT1_SPEED_1G:
832 bp->line_speed = SPEED_1000;
833 break;
834 case BCM5708S_1000X_STAT1_SPEED_2G5:
835 bp->line_speed = SPEED_2500;
836 break;
837 }
838 if (val & BCM5708S_1000X_STAT1_FD)
839 bp->duplex = DUPLEX_FULL;
840 else
841 bp->duplex = DUPLEX_HALF;
842
843 return 0;
844}
845
846static int
847bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700848{
849 u32 bmcr, local_adv, remote_adv, common;
850
851 bp->link_up = 1;
852 bp->line_speed = SPEED_1000;
853
Michael Chanca58c3a2007-05-03 13:22:52 -0700854 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700855 if (bmcr & BMCR_FULLDPLX) {
856 bp->duplex = DUPLEX_FULL;
857 }
858 else {
859 bp->duplex = DUPLEX_HALF;
860 }
861
862 if (!(bmcr & BMCR_ANENABLE)) {
863 return 0;
864 }
865
Michael Chanca58c3a2007-05-03 13:22:52 -0700866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700868
869 common = local_adv & remote_adv;
870 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
871
872 if (common & ADVERTISE_1000XFULL) {
873 bp->duplex = DUPLEX_FULL;
874 }
875 else {
876 bp->duplex = DUPLEX_HALF;
877 }
878 }
879
880 return 0;
881}
882
883static int
884bnx2_copper_linkup(struct bnx2 *bp)
885{
886 u32 bmcr;
887
Michael Chanca58c3a2007-05-03 13:22:52 -0700888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700889 if (bmcr & BMCR_ANENABLE) {
890 u32 local_adv, remote_adv, common;
891
892 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
893 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
894
895 common = local_adv & (remote_adv >> 2);
896 if (common & ADVERTISE_1000FULL) {
897 bp->line_speed = SPEED_1000;
898 bp->duplex = DUPLEX_FULL;
899 }
900 else if (common & ADVERTISE_1000HALF) {
901 bp->line_speed = SPEED_1000;
902 bp->duplex = DUPLEX_HALF;
903 }
904 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700905 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
906 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700907
908 common = local_adv & remote_adv;
909 if (common & ADVERTISE_100FULL) {
910 bp->line_speed = SPEED_100;
911 bp->duplex = DUPLEX_FULL;
912 }
913 else if (common & ADVERTISE_100HALF) {
914 bp->line_speed = SPEED_100;
915 bp->duplex = DUPLEX_HALF;
916 }
917 else if (common & ADVERTISE_10FULL) {
918 bp->line_speed = SPEED_10;
919 bp->duplex = DUPLEX_FULL;
920 }
921 else if (common & ADVERTISE_10HALF) {
922 bp->line_speed = SPEED_10;
923 bp->duplex = DUPLEX_HALF;
924 }
925 else {
926 bp->line_speed = 0;
927 bp->link_up = 0;
928 }
929 }
930 }
931 else {
932 if (bmcr & BMCR_SPEED100) {
933 bp->line_speed = SPEED_100;
934 }
935 else {
936 bp->line_speed = SPEED_10;
937 }
938 if (bmcr & BMCR_FULLDPLX) {
939 bp->duplex = DUPLEX_FULL;
940 }
941 else {
942 bp->duplex = DUPLEX_HALF;
943 }
944 }
945
946 return 0;
947}
948
949static int
950bnx2_set_mac_link(struct bnx2 *bp)
951{
952 u32 val;
953
954 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
955 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
956 (bp->duplex == DUPLEX_HALF)) {
957 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
958 }
959
960 /* Configure the EMAC mode register. */
961 val = REG_RD(bp, BNX2_EMAC_MODE);
962
963 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800964 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800965 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700966
967 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800968 switch (bp->line_speed) {
969 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800970 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
971 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800972 break;
973 }
974 /* fall through */
975 case SPEED_100:
976 val |= BNX2_EMAC_MODE_PORT_MII;
977 break;
978 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800979 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800980 /* fall through */
981 case SPEED_1000:
982 val |= BNX2_EMAC_MODE_PORT_GMII;
983 break;
984 }
Michael Chanb6016b72005-05-26 13:03:09 -0700985 }
986 else {
987 val |= BNX2_EMAC_MODE_PORT_GMII;
988 }
989
990 /* Set the MAC to operate in the appropriate duplex mode. */
991 if (bp->duplex == DUPLEX_HALF)
992 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
993 REG_WR(bp, BNX2_EMAC_MODE, val);
994
995 /* Enable/disable rx PAUSE. */
996 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
997
998 if (bp->flow_ctrl & FLOW_CTRL_RX)
999 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1000 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1001
1002 /* Enable/disable tx PAUSE. */
1003 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1004 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1005
1006 if (bp->flow_ctrl & FLOW_CTRL_TX)
1007 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1008 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1009
1010 /* Acknowledge the interrupt. */
1011 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1012
1013 return 0;
1014}
1015
Michael Chan27a005b2007-05-03 13:23:41 -07001016static void
1017bnx2_enable_bmsr1(struct bnx2 *bp)
1018{
1019 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1020 (CHIP_NUM(bp) == CHIP_NUM_5709))
1021 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1022 MII_BNX2_BLK_ADDR_GP_STATUS);
1023}
1024
1025static void
1026bnx2_disable_bmsr1(struct bnx2 *bp)
1027{
1028 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1029 (CHIP_NUM(bp) == CHIP_NUM_5709))
1030 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1031 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1032}
1033
Michael Chanb6016b72005-05-26 13:03:09 -07001034static int
Michael Chan605a9e22007-05-03 13:23:13 -07001035bnx2_test_and_enable_2g5(struct bnx2 *bp)
1036{
1037 u32 up1;
1038 int ret = 1;
1039
1040 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1041 return 0;
1042
1043 if (bp->autoneg & AUTONEG_SPEED)
1044 bp->advertising |= ADVERTISED_2500baseX_Full;
1045
Michael Chan27a005b2007-05-03 13:23:41 -07001046 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1047 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1048
Michael Chan605a9e22007-05-03 13:23:13 -07001049 bnx2_read_phy(bp, bp->mii_up1, &up1);
1050 if (!(up1 & BCM5708S_UP1_2G5)) {
1051 up1 |= BCM5708S_UP1_2G5;
1052 bnx2_write_phy(bp, bp->mii_up1, up1);
1053 ret = 0;
1054 }
1055
Michael Chan27a005b2007-05-03 13:23:41 -07001056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059
Michael Chan605a9e22007-05-03 13:23:13 -07001060 return ret;
1061}
1062
1063static int
1064bnx2_test_and_disable_2g5(struct bnx2 *bp)
1065{
1066 u32 up1;
1067 int ret = 0;
1068
1069 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070 return 0;
1071
Michael Chan27a005b2007-05-03 13:23:41 -07001072 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1073 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1074
Michael Chan605a9e22007-05-03 13:23:13 -07001075 bnx2_read_phy(bp, bp->mii_up1, &up1);
1076 if (up1 & BCM5708S_UP1_2G5) {
1077 up1 &= ~BCM5708S_UP1_2G5;
1078 bnx2_write_phy(bp, bp->mii_up1, up1);
1079 ret = 1;
1080 }
1081
Michael Chan27a005b2007-05-03 13:23:41 -07001082 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1083 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1084 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1085
Michael Chan605a9e22007-05-03 13:23:13 -07001086 return ret;
1087}
1088
1089static void
1090bnx2_enable_forced_2g5(struct bnx2 *bp)
1091{
1092 u32 bmcr;
1093
1094 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 return;
1096
Michael Chan27a005b2007-05-03 13:23:41 -07001097 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 u32 val;
1099
1100 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1101 MII_BNX2_BLK_ADDR_SERDES_DIG);
1102 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1103 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1104 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1105 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1106
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1108 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110
1111 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001112 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1113 bmcr |= BCM5708S_BMCR_FORCE_2500;
1114 }
1115
1116 if (bp->autoneg & AUTONEG_SPEED) {
1117 bmcr &= ~BMCR_ANENABLE;
1118 if (bp->req_duplex == DUPLEX_FULL)
1119 bmcr |= BMCR_FULLDPLX;
1120 }
1121 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1122}
1123
1124static void
1125bnx2_disable_forced_2g5(struct bnx2 *bp)
1126{
1127 u32 bmcr;
1128
1129 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1130 return;
1131
Michael Chan27a005b2007-05-03 13:23:41 -07001132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1133 u32 val;
1134
1135 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1136 MII_BNX2_BLK_ADDR_SERDES_DIG);
1137 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1138 val &= ~MII_BNX2_SD_MISC1_FORCE;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1148 }
1149
1150 if (bp->autoneg & AUTONEG_SPEED)
1151 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1152 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1153}
1154
1155static int
Michael Chanb6016b72005-05-26 13:03:09 -07001156bnx2_set_link(struct bnx2 *bp)
1157{
1158 u32 bmsr;
1159 u8 link_up;
1160
Michael Chan80be4432006-11-19 14:07:28 -08001161 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001162 bp->link_up = 1;
1163 return 0;
1164 }
1165
Michael Chan0d8a6572007-07-07 22:49:43 -07001166 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1167 return 0;
1168
Michael Chanb6016b72005-05-26 13:03:09 -07001169 link_up = bp->link_up;
1170
Michael Chan27a005b2007-05-03 13:23:41 -07001171 bnx2_enable_bmsr1(bp);
1172 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1173 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1174 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001175
1176 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1177 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1178 u32 val;
1179
1180 val = REG_RD(bp, BNX2_EMAC_STATUS);
1181 if (val & BNX2_EMAC_STATUS_LINK)
1182 bmsr |= BMSR_LSTATUS;
1183 else
1184 bmsr &= ~BMSR_LSTATUS;
1185 }
1186
1187 if (bmsr & BMSR_LSTATUS) {
1188 bp->link_up = 1;
1189
1190 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001191 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1192 bnx2_5706s_linkup(bp);
1193 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1194 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001195 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1196 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001197 }
1198 else {
1199 bnx2_copper_linkup(bp);
1200 }
1201 bnx2_resolve_flow_ctrl(bp);
1202 }
1203 else {
1204 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001205 (bp->autoneg & AUTONEG_SPEED))
1206 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001207
Michael Chanb6016b72005-05-26 13:03:09 -07001208 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1209 bp->link_up = 0;
1210 }
1211
1212 if (bp->link_up != link_up) {
1213 bnx2_report_link(bp);
1214 }
1215
1216 bnx2_set_mac_link(bp);
1217
1218 return 0;
1219}
1220
1221static int
1222bnx2_reset_phy(struct bnx2 *bp)
1223{
1224 int i;
1225 u32 reg;
1226
Michael Chanca58c3a2007-05-03 13:22:52 -07001227 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001228
1229#define PHY_RESET_MAX_WAIT 100
1230 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1231 udelay(10);
1232
Michael Chanca58c3a2007-05-03 13:22:52 -07001233 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001234 if (!(reg & BMCR_RESET)) {
1235 udelay(20);
1236 break;
1237 }
1238 }
1239 if (i == PHY_RESET_MAX_WAIT) {
1240 return -EBUSY;
1241 }
1242 return 0;
1243}
1244
1245static u32
1246bnx2_phy_get_pause_adv(struct bnx2 *bp)
1247{
1248 u32 adv = 0;
1249
1250 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1251 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1252
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE;
1255 }
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP;
1258 }
1259 }
1260 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1261 if (bp->phy_flags & PHY_SERDES_FLAG) {
1262 adv = ADVERTISE_1000XPSE_ASYM;
1263 }
1264 else {
1265 adv = ADVERTISE_PAUSE_ASYM;
1266 }
1267 }
1268 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1269 if (bp->phy_flags & PHY_SERDES_FLAG) {
1270 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1271 }
1272 else {
1273 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1274 }
1275 }
1276 return adv;
1277}
1278
Michael Chan0d8a6572007-07-07 22:49:43 -07001279static int bnx2_fw_sync(struct bnx2 *, u32, int);
1280
Michael Chanb6016b72005-05-26 13:03:09 -07001281static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001282bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1283{
1284 u32 speed_arg = 0, pause_adv;
1285
1286 pause_adv = bnx2_phy_get_pause_adv(bp);
1287
1288 if (bp->autoneg & AUTONEG_SPEED) {
1289 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1290 if (bp->advertising & ADVERTISED_10baseT_Half)
1291 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1292 if (bp->advertising & ADVERTISED_10baseT_Full)
1293 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1294 if (bp->advertising & ADVERTISED_100baseT_Half)
1295 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 if (bp->advertising & ADVERTISED_100baseT_Full)
1297 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1298 if (bp->advertising & ADVERTISED_1000baseT_Full)
1299 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1300 if (bp->advertising & ADVERTISED_2500baseX_Full)
1301 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1302 } else {
1303 if (bp->req_line_speed == SPEED_2500)
1304 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1305 else if (bp->req_line_speed == SPEED_1000)
1306 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1307 else if (bp->req_line_speed == SPEED_100) {
1308 if (bp->req_duplex == DUPLEX_FULL)
1309 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1310 else
1311 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1312 } else if (bp->req_line_speed == SPEED_10) {
1313 if (bp->req_duplex == DUPLEX_FULL)
1314 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1315 else
1316 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1317 }
1318 }
1319
1320 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1321 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1322 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1323 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1324
1325 if (port == PORT_TP)
1326 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1327 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1328
1329 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1330
1331 spin_unlock_bh(&bp->phy_lock);
1332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1333 spin_lock_bh(&bp->phy_lock);
1334
1335 return 0;
1336}
1337
1338static int
1339bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001340{
Michael Chan605a9e22007-05-03 13:23:13 -07001341 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001342 u32 new_adv = 0;
1343
Michael Chan0d8a6572007-07-07 22:49:43 -07001344 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1345 return (bnx2_setup_remote_phy(bp, port));
1346
Michael Chanb6016b72005-05-26 13:03:09 -07001347 if (!(bp->autoneg & AUTONEG_SPEED)) {
1348 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001349 int force_link_down = 0;
1350
Michael Chan605a9e22007-05-03 13:23:13 -07001351 if (bp->req_line_speed == SPEED_2500) {
1352 if (!bnx2_test_and_enable_2g5(bp))
1353 force_link_down = 1;
1354 } else if (bp->req_line_speed == SPEED_1000) {
1355 if (bnx2_test_and_disable_2g5(bp))
1356 force_link_down = 1;
1357 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001358 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001359 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1360
Michael Chanca58c3a2007-05-03 13:22:52 -07001361 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001362 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001363 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001364
Michael Chan27a005b2007-05-03 13:23:41 -07001365 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1366 if (bp->req_line_speed == SPEED_2500)
1367 bnx2_enable_forced_2g5(bp);
1368 else if (bp->req_line_speed == SPEED_1000) {
1369 bnx2_disable_forced_2g5(bp);
1370 new_bmcr &= ~0x2000;
1371 }
1372
1373 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001374 if (bp->req_line_speed == SPEED_2500)
1375 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1376 else
1377 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001378 }
1379
Michael Chanb6016b72005-05-26 13:03:09 -07001380 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001381 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001382 new_bmcr |= BMCR_FULLDPLX;
1383 }
1384 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001385 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001386 new_bmcr &= ~BMCR_FULLDPLX;
1387 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001388 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001389 /* Force a link down visible on the other side */
1390 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001391 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001392 ~(ADVERTISE_1000XFULL |
1393 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001394 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001395 BMCR_ANRESTART | BMCR_ANENABLE);
1396
1397 bp->link_up = 0;
1398 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001399 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001400 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001401 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_write_phy(bp, bp->mii_adv, adv);
1403 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001404 } else {
1405 bnx2_resolve_flow_ctrl(bp);
1406 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001407 }
1408 return 0;
1409 }
1410
Michael Chan605a9e22007-05-03 13:23:13 -07001411 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001412
Michael Chanb6016b72005-05-26 13:03:09 -07001413 if (bp->advertising & ADVERTISED_1000baseT_Full)
1414 new_adv |= ADVERTISE_1000XFULL;
1415
1416 new_adv |= bnx2_phy_get_pause_adv(bp);
1417
Michael Chanca58c3a2007-05-03 13:22:52 -07001418 bnx2_read_phy(bp, bp->mii_adv, &adv);
1419 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001420
1421 bp->serdes_an_pending = 0;
1422 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1423 /* Force a link down visible on the other side */
1424 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001425 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001426 spin_unlock_bh(&bp->phy_lock);
1427 msleep(20);
1428 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001429 }
1430
Michael Chanca58c3a2007-05-03 13:22:52 -07001431 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1432 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001433 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001434 /* Speed up link-up time when the link partner
1435 * does not autonegotiate which is very common
1436 * in blade servers. Some blade servers use
1437 * IPMI for kerboard input and it's important
1438 * to minimize link disruptions. Autoneg. involves
1439 * exchanging base pages plus 3 next pages and
1440 * normally completes in about 120 msec.
1441 */
1442 bp->current_interval = SERDES_AN_TIMEOUT;
1443 bp->serdes_an_pending = 1;
1444 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001445 } else {
1446 bnx2_resolve_flow_ctrl(bp);
1447 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001448 }
1449
1450 return 0;
1451}
1452
1453#define ETHTOOL_ALL_FIBRE_SPEED \
Michael Chandeaf3912007-07-07 22:48:00 -07001454 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1455 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1456 (ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07001457
1458#define ETHTOOL_ALL_COPPER_SPEED \
1459 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1460 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1461 ADVERTISED_1000baseT_Full)
1462
1463#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1464 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001465
Michael Chanb6016b72005-05-26 13:03:09 -07001466#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1467
Michael Chandeaf3912007-07-07 22:48:00 -07001468static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001469bnx2_set_default_remote_link(struct bnx2 *bp)
1470{
1471 u32 link;
1472
1473 if (bp->phy_port == PORT_TP)
1474 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1475 else
1476 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1477
1478 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1479 bp->req_line_speed = 0;
1480 bp->autoneg |= AUTONEG_SPEED;
1481 bp->advertising = ADVERTISED_Autoneg;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1483 bp->advertising |= ADVERTISED_10baseT_Half;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1485 bp->advertising |= ADVERTISED_10baseT_Full;
1486 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1487 bp->advertising |= ADVERTISED_100baseT_Half;
1488 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1489 bp->advertising |= ADVERTISED_100baseT_Full;
1490 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1491 bp->advertising |= ADVERTISED_1000baseT_Full;
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1493 bp->advertising |= ADVERTISED_2500baseX_Full;
1494 } else {
1495 bp->autoneg = 0;
1496 bp->advertising = 0;
1497 bp->req_duplex = DUPLEX_FULL;
1498 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1499 bp->req_line_speed = SPEED_10;
1500 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1501 bp->req_duplex = DUPLEX_HALF;
1502 }
1503 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1504 bp->req_line_speed = SPEED_100;
1505 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1506 bp->req_duplex = DUPLEX_HALF;
1507 }
1508 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1509 bp->req_line_speed = SPEED_1000;
1510 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1511 bp->req_line_speed = SPEED_2500;
1512 }
1513}
1514
1515static void
Michael Chandeaf3912007-07-07 22:48:00 -07001516bnx2_set_default_link(struct bnx2 *bp)
1517{
Michael Chan0d8a6572007-07-07 22:49:43 -07001518 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1519 return bnx2_set_default_remote_link(bp);
1520
Michael Chandeaf3912007-07-07 22:48:00 -07001521 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1522 bp->req_line_speed = 0;
1523 if (bp->phy_flags & PHY_SERDES_FLAG) {
1524 u32 reg;
1525
1526 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1527
1528 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1529 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1530 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1531 bp->autoneg = 0;
1532 bp->req_line_speed = bp->line_speed = SPEED_1000;
1533 bp->req_duplex = DUPLEX_FULL;
1534 }
1535 } else
1536 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1537}
1538
Michael Chan0d8a6572007-07-07 22:49:43 -07001539static void
Michael Chandf149d72007-07-07 22:51:36 -07001540bnx2_send_heart_beat(struct bnx2 *bp)
1541{
1542 u32 msg;
1543 u32 addr;
1544
1545 spin_lock(&bp->indirect_lock);
1546 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1547 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1548 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1549 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1550 spin_unlock(&bp->indirect_lock);
1551}
1552
1553static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001554bnx2_remote_phy_event(struct bnx2 *bp)
1555{
1556 u32 msg;
1557 u8 link_up = bp->link_up;
1558 u8 old_port;
1559
1560 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1561
Michael Chandf149d72007-07-07 22:51:36 -07001562 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1563 bnx2_send_heart_beat(bp);
1564
1565 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1566
Michael Chan0d8a6572007-07-07 22:49:43 -07001567 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1568 bp->link_up = 0;
1569 else {
1570 u32 speed;
1571
1572 bp->link_up = 1;
1573 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1574 bp->duplex = DUPLEX_FULL;
1575 switch (speed) {
1576 case BNX2_LINK_STATUS_10HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_10FULL:
1579 bp->line_speed = SPEED_10;
1580 break;
1581 case BNX2_LINK_STATUS_100HALF:
1582 bp->duplex = DUPLEX_HALF;
1583 case BNX2_LINK_STATUS_100BASE_T4:
1584 case BNX2_LINK_STATUS_100FULL:
1585 bp->line_speed = SPEED_100;
1586 break;
1587 case BNX2_LINK_STATUS_1000HALF:
1588 bp->duplex = DUPLEX_HALF;
1589 case BNX2_LINK_STATUS_1000FULL:
1590 bp->line_speed = SPEED_1000;
1591 break;
1592 case BNX2_LINK_STATUS_2500HALF:
1593 bp->duplex = DUPLEX_HALF;
1594 case BNX2_LINK_STATUS_2500FULL:
1595 bp->line_speed = SPEED_2500;
1596 break;
1597 default:
1598 bp->line_speed = 0;
1599 break;
1600 }
1601
1602 spin_lock(&bp->phy_lock);
1603 bp->flow_ctrl = 0;
1604 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1605 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1606 if (bp->duplex == DUPLEX_FULL)
1607 bp->flow_ctrl = bp->req_flow_ctrl;
1608 } else {
1609 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1610 bp->flow_ctrl |= FLOW_CTRL_TX;
1611 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1612 bp->flow_ctrl |= FLOW_CTRL_RX;
1613 }
1614
1615 old_port = bp->phy_port;
1616 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1617 bp->phy_port = PORT_FIBRE;
1618 else
1619 bp->phy_port = PORT_TP;
1620
1621 if (old_port != bp->phy_port)
1622 bnx2_set_default_link(bp);
1623
1624 spin_unlock(&bp->phy_lock);
1625 }
1626 if (bp->link_up != link_up)
1627 bnx2_report_link(bp);
1628
1629 bnx2_set_mac_link(bp);
1630}
1631
1632static int
1633bnx2_set_remote_link(struct bnx2 *bp)
1634{
1635 u32 evt_code;
1636
1637 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1638 switch (evt_code) {
1639 case BNX2_FW_EVT_CODE_LINK_EVENT:
1640 bnx2_remote_phy_event(bp);
1641 break;
1642 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1643 default:
Michael Chandf149d72007-07-07 22:51:36 -07001644 bnx2_send_heart_beat(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07001645 break;
1646 }
1647 return 0;
1648}
1649
Michael Chanb6016b72005-05-26 13:03:09 -07001650static int
1651bnx2_setup_copper_phy(struct bnx2 *bp)
1652{
1653 u32 bmcr;
1654 u32 new_bmcr;
1655
Michael Chanca58c3a2007-05-03 13:22:52 -07001656 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001657
1658 if (bp->autoneg & AUTONEG_SPEED) {
1659 u32 adv_reg, adv1000_reg;
1660 u32 new_adv_reg = 0;
1661 u32 new_adv1000_reg = 0;
1662
Michael Chanca58c3a2007-05-03 13:22:52 -07001663 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001664 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1665 ADVERTISE_PAUSE_ASYM);
1666
1667 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1668 adv1000_reg &= PHY_ALL_1000_SPEED;
1669
1670 if (bp->advertising & ADVERTISED_10baseT_Half)
1671 new_adv_reg |= ADVERTISE_10HALF;
1672 if (bp->advertising & ADVERTISED_10baseT_Full)
1673 new_adv_reg |= ADVERTISE_10FULL;
1674 if (bp->advertising & ADVERTISED_100baseT_Half)
1675 new_adv_reg |= ADVERTISE_100HALF;
1676 if (bp->advertising & ADVERTISED_100baseT_Full)
1677 new_adv_reg |= ADVERTISE_100FULL;
1678 if (bp->advertising & ADVERTISED_1000baseT_Full)
1679 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001680
Michael Chanb6016b72005-05-26 13:03:09 -07001681 new_adv_reg |= ADVERTISE_CSMA;
1682
1683 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1684
1685 if ((adv1000_reg != new_adv1000_reg) ||
1686 (adv_reg != new_adv_reg) ||
1687 ((bmcr & BMCR_ANENABLE) == 0)) {
1688
Michael Chanca58c3a2007-05-03 13:22:52 -07001689 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001690 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001691 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001692 BMCR_ANENABLE);
1693 }
1694 else if (bp->link_up) {
1695 /* Flow ctrl may have changed from auto to forced */
1696 /* or vice-versa. */
1697
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1700 }
1701 return 0;
1702 }
1703
1704 new_bmcr = 0;
1705 if (bp->req_line_speed == SPEED_100) {
1706 new_bmcr |= BMCR_SPEED100;
1707 }
1708 if (bp->req_duplex == DUPLEX_FULL) {
1709 new_bmcr |= BMCR_FULLDPLX;
1710 }
1711 if (new_bmcr != bmcr) {
1712 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001713
Michael Chanca58c3a2007-05-03 13:22:52 -07001714 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1715 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001716
Michael Chanb6016b72005-05-26 13:03:09 -07001717 if (bmsr & BMSR_LSTATUS) {
1718 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001719 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001720 spin_unlock_bh(&bp->phy_lock);
1721 msleep(50);
1722 spin_lock_bh(&bp->phy_lock);
1723
Michael Chanca58c3a2007-05-03 13:22:52 -07001724 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1725 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001726 }
1727
Michael Chanca58c3a2007-05-03 13:22:52 -07001728 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001729
1730 /* Normally, the new speed is setup after the link has
1731 * gone down and up again. In some cases, link will not go
1732 * down so we need to set up the new speed here.
1733 */
1734 if (bmsr & BMSR_LSTATUS) {
1735 bp->line_speed = bp->req_line_speed;
1736 bp->duplex = bp->req_duplex;
1737 bnx2_resolve_flow_ctrl(bp);
1738 bnx2_set_mac_link(bp);
1739 }
Michael Chan27a005b2007-05-03 13:23:41 -07001740 } else {
1741 bnx2_resolve_flow_ctrl(bp);
1742 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001743 }
1744 return 0;
1745}
1746
1747static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001748bnx2_setup_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001749{
1750 if (bp->loopback == MAC_LOOPBACK)
1751 return 0;
1752
1753 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07001754 return (bnx2_setup_serdes_phy(bp, port));
Michael Chanb6016b72005-05-26 13:03:09 -07001755 }
1756 else {
1757 return (bnx2_setup_copper_phy(bp));
1758 }
1759}
1760
1761static int
Michael Chan27a005b2007-05-03 13:23:41 -07001762bnx2_init_5709s_phy(struct bnx2 *bp)
1763{
1764 u32 val;
1765
1766 bp->mii_bmcr = MII_BMCR + 0x10;
1767 bp->mii_bmsr = MII_BMSR + 0x10;
1768 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1769 bp->mii_adv = MII_ADVERTISE + 0x10;
1770 bp->mii_lpa = MII_LPA + 0x10;
1771 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1772
1773 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1774 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1775
1776 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1777 bnx2_reset_phy(bp);
1778
1779 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1780
1781 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1782 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1783 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1784 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1785
1786 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1787 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1788 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1789 val |= BCM5708S_UP1_2G5;
1790 else
1791 val &= ~BCM5708S_UP1_2G5;
1792 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1793
1794 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1795 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1796 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1797 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1798
1799 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1800
1801 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1802 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1803 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1804
1805 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1806
1807 return 0;
1808}
1809
1810static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001811bnx2_init_5708s_phy(struct bnx2 *bp)
1812{
1813 u32 val;
1814
Michael Chan27a005b2007-05-03 13:23:41 -07001815 bnx2_reset_phy(bp);
1816
1817 bp->mii_up1 = BCM5708S_UP1;
1818
Michael Chan5b0c76a2005-11-04 08:45:49 -08001819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1820 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1822
1823 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1824 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1825 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1826
1827 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1828 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1829 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1830
1831 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1832 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1833 val |= BCM5708S_UP1_2G5;
1834 bnx2_write_phy(bp, BCM5708S_UP1, val);
1835 }
1836
1837 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001838 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1839 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001840 /* increase tx signal amplitude */
1841 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1842 BCM5708S_BLK_ADDR_TX_MISC);
1843 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1844 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1845 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1846 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1847 }
1848
Michael Chane3648b32005-11-04 08:51:21 -08001849 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001850 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1851
1852 if (val) {
1853 u32 is_backplane;
1854
Michael Chane3648b32005-11-04 08:51:21 -08001855 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001856 BNX2_SHARED_HW_CFG_CONFIG);
1857 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1858 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1859 BCM5708S_BLK_ADDR_TX_MISC);
1860 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1861 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1862 BCM5708S_BLK_ADDR_DIG);
1863 }
1864 }
1865 return 0;
1866}
1867
1868static int
1869bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001870{
Michael Chan27a005b2007-05-03 13:23:41 -07001871 bnx2_reset_phy(bp);
1872
Michael Chanb6016b72005-05-26 13:03:09 -07001873 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1874
Michael Chan59b47d82006-11-19 14:10:45 -08001875 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1876 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001877
1878 if (bp->dev->mtu > 1500) {
1879 u32 val;
1880
1881 /* Set extended packet length bit */
1882 bnx2_write_phy(bp, 0x18, 0x7);
1883 bnx2_read_phy(bp, 0x18, &val);
1884 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1885
1886 bnx2_write_phy(bp, 0x1c, 0x6c00);
1887 bnx2_read_phy(bp, 0x1c, &val);
1888 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1889 }
1890 else {
1891 u32 val;
1892
1893 bnx2_write_phy(bp, 0x18, 0x7);
1894 bnx2_read_phy(bp, 0x18, &val);
1895 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1896
1897 bnx2_write_phy(bp, 0x1c, 0x6c00);
1898 bnx2_read_phy(bp, 0x1c, &val);
1899 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1900 }
1901
1902 return 0;
1903}
1904
1905static int
1906bnx2_init_copper_phy(struct bnx2 *bp)
1907{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001908 u32 val;
1909
Michael Chan27a005b2007-05-03 13:23:41 -07001910 bnx2_reset_phy(bp);
1911
Michael Chanb6016b72005-05-26 13:03:09 -07001912 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1913 bnx2_write_phy(bp, 0x18, 0x0c00);
1914 bnx2_write_phy(bp, 0x17, 0x000a);
1915 bnx2_write_phy(bp, 0x15, 0x310b);
1916 bnx2_write_phy(bp, 0x17, 0x201f);
1917 bnx2_write_phy(bp, 0x15, 0x9506);
1918 bnx2_write_phy(bp, 0x17, 0x401f);
1919 bnx2_write_phy(bp, 0x15, 0x14e2);
1920 bnx2_write_phy(bp, 0x18, 0x0400);
1921 }
1922
Michael Chanb659f442007-02-02 00:46:35 -08001923 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1924 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1925 MII_BNX2_DSP_EXPAND_REG | 0x8);
1926 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1927 val &= ~(1 << 8);
1928 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1929 }
1930
Michael Chanb6016b72005-05-26 13:03:09 -07001931 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001932 /* Set extended packet length bit */
1933 bnx2_write_phy(bp, 0x18, 0x7);
1934 bnx2_read_phy(bp, 0x18, &val);
1935 bnx2_write_phy(bp, 0x18, val | 0x4000);
1936
1937 bnx2_read_phy(bp, 0x10, &val);
1938 bnx2_write_phy(bp, 0x10, val | 0x1);
1939 }
1940 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001941 bnx2_write_phy(bp, 0x18, 0x7);
1942 bnx2_read_phy(bp, 0x18, &val);
1943 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1944
1945 bnx2_read_phy(bp, 0x10, &val);
1946 bnx2_write_phy(bp, 0x10, val & ~0x1);
1947 }
1948
Michael Chan5b0c76a2005-11-04 08:45:49 -08001949 /* ethernet@wirespeed */
1950 bnx2_write_phy(bp, 0x18, 0x7007);
1951 bnx2_read_phy(bp, 0x18, &val);
1952 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001953 return 0;
1954}
1955
1956
1957static int
1958bnx2_init_phy(struct bnx2 *bp)
1959{
1960 u32 val;
1961 int rc = 0;
1962
1963 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1964 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1965
Michael Chanca58c3a2007-05-03 13:22:52 -07001966 bp->mii_bmcr = MII_BMCR;
1967 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001968 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001969 bp->mii_adv = MII_ADVERTISE;
1970 bp->mii_lpa = MII_LPA;
1971
Michael Chanb6016b72005-05-26 13:03:09 -07001972 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1973
Michael Chan0d8a6572007-07-07 22:49:43 -07001974 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1975 goto setup_phy;
1976
Michael Chanb6016b72005-05-26 13:03:09 -07001977 bnx2_read_phy(bp, MII_PHYSID1, &val);
1978 bp->phy_id = val << 16;
1979 bnx2_read_phy(bp, MII_PHYSID2, &val);
1980 bp->phy_id |= val & 0xffff;
1981
1982 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001983 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1984 rc = bnx2_init_5706s_phy(bp);
1985 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1986 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001987 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1988 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001989 }
1990 else {
1991 rc = bnx2_init_copper_phy(bp);
1992 }
1993
Michael Chan0d8a6572007-07-07 22:49:43 -07001994setup_phy:
1995 if (!rc)
1996 rc = bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07001997
1998 return rc;
1999}
2000
2001static int
2002bnx2_set_mac_loopback(struct bnx2 *bp)
2003{
2004 u32 mac_mode;
2005
2006 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2007 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2008 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2009 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bp->link_up = 1;
2011 return 0;
2012}
2013
Michael Chanbc5a0692006-01-23 16:13:22 -08002014static int bnx2_test_link(struct bnx2 *);
2015
2016static int
2017bnx2_set_phy_loopback(struct bnx2 *bp)
2018{
2019 u32 mac_mode;
2020 int rc, i;
2021
2022 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07002023 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08002024 BMCR_SPEED1000);
2025 spin_unlock_bh(&bp->phy_lock);
2026 if (rc)
2027 return rc;
2028
2029 for (i = 0; i < 10; i++) {
2030 if (bnx2_test_link(bp) == 0)
2031 break;
Michael Chan80be4432006-11-19 14:07:28 -08002032 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08002033 }
2034
2035 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2036 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2037 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08002038 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08002039
2040 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2041 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2042 bp->link_up = 1;
2043 return 0;
2044}
2045
Michael Chanb6016b72005-05-26 13:03:09 -07002046static int
Michael Chanb090ae22006-01-23 16:07:10 -08002047bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07002048{
2049 int i;
2050 u32 val;
2051
Michael Chanb6016b72005-05-26 13:03:09 -07002052 bp->fw_wr_seq++;
2053 msg_data |= bp->fw_wr_seq;
2054
Michael Chane3648b32005-11-04 08:51:21 -08002055 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002056
2057 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08002058 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2059 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07002060
Michael Chane3648b32005-11-04 08:51:21 -08002061 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07002062
2063 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2064 break;
2065 }
Michael Chanb090ae22006-01-23 16:07:10 -08002066 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2067 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002068
2069 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08002070 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2071 if (!silent)
2072 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2073 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002074
2075 msg_data &= ~BNX2_DRV_MSG_CODE;
2076 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2077
Michael Chane3648b32005-11-04 08:51:21 -08002078 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002079
Michael Chanb6016b72005-05-26 13:03:09 -07002080 return -EBUSY;
2081 }
2082
Michael Chanb090ae22006-01-23 16:07:10 -08002083 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2084 return -EIO;
2085
Michael Chanb6016b72005-05-26 13:03:09 -07002086 return 0;
2087}
2088
Michael Chan59b47d82006-11-19 14:10:45 -08002089static int
2090bnx2_init_5709_context(struct bnx2 *bp)
2091{
2092 int i, ret = 0;
2093 u32 val;
2094
2095 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2096 val |= (BCM_PAGE_BITS - 8) << 16;
2097 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07002098 for (i = 0; i < 10; i++) {
2099 val = REG_RD(bp, BNX2_CTX_COMMAND);
2100 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2101 break;
2102 udelay(2);
2103 }
2104 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2105 return -EBUSY;
2106
Michael Chan59b47d82006-11-19 14:10:45 -08002107 for (i = 0; i < bp->ctx_pages; i++) {
2108 int j;
2109
2110 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2111 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2112 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2113 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2114 (u64) bp->ctx_blk_mapping[i] >> 32);
2115 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2116 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2117 for (j = 0; j < 10; j++) {
2118
2119 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2120 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2121 break;
2122 udelay(5);
2123 }
2124 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2125 ret = -EBUSY;
2126 break;
2127 }
2128 }
2129 return ret;
2130}
2131
Michael Chanb6016b72005-05-26 13:03:09 -07002132static void
2133bnx2_init_context(struct bnx2 *bp)
2134{
2135 u32 vcid;
2136
2137 vcid = 96;
2138 while (vcid) {
2139 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07002140 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002141
2142 vcid--;
2143
2144 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2145 u32 new_vcid;
2146
2147 vcid_addr = GET_PCID_ADDR(vcid);
2148 if (vcid & 0x8) {
2149 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2150 }
2151 else {
2152 new_vcid = vcid;
2153 }
2154 pcid_addr = GET_PCID_ADDR(new_vcid);
2155 }
2156 else {
2157 vcid_addr = GET_CID_ADDR(vcid);
2158 pcid_addr = vcid_addr;
2159 }
2160
Michael Chan7947b202007-06-04 21:17:10 -07002161 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2162 vcid_addr += (i << PHY_CTX_SHIFT);
2163 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07002164
Michael Chan5d5d0012007-12-12 11:17:43 -08002165 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
Michael Chan7947b202007-06-04 21:17:10 -07002166 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2167
2168 /* Zero out the context. */
2169 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
Michael Chan5d5d0012007-12-12 11:17:43 -08002170 CTX_WR(bp, vcid_addr, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002171 }
Michael Chanb6016b72005-05-26 13:03:09 -07002172 }
2173}
2174
2175static int
2176bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2177{
2178 u16 *good_mbuf;
2179 u32 good_mbuf_cnt;
2180 u32 val;
2181
2182 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2183 if (good_mbuf == NULL) {
2184 printk(KERN_ERR PFX "Failed to allocate memory in "
2185 "bnx2_alloc_bad_rbuf\n");
2186 return -ENOMEM;
2187 }
2188
2189 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2190 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2191
2192 good_mbuf_cnt = 0;
2193
2194 /* Allocate a bunch of mbufs and save the good ones in an array. */
2195 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2197 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2198
2199 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2200
2201 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2202
2203 /* The addresses with Bit 9 set are bad memory blocks. */
2204 if (!(val & (1 << 9))) {
2205 good_mbuf[good_mbuf_cnt] = (u16) val;
2206 good_mbuf_cnt++;
2207 }
2208
2209 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2210 }
2211
2212 /* Free the good ones back to the mbuf pool thus discarding
2213 * all the bad ones. */
2214 while (good_mbuf_cnt) {
2215 good_mbuf_cnt--;
2216
2217 val = good_mbuf[good_mbuf_cnt];
2218 val = (val << 9) | val | 1;
2219
2220 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2221 }
2222 kfree(good_mbuf);
2223 return 0;
2224}
2225
2226static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002227bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07002228{
2229 u32 val;
2230 u8 *mac_addr = bp->dev->dev_addr;
2231
2232 val = (mac_addr[0] << 8) | mac_addr[1];
2233
2234 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2235
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002236 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07002237 (mac_addr[4] << 8) | mac_addr[5];
2238
2239 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2240}
2241
2242static inline int
Michael Chan47bf4242007-12-12 11:19:12 -08002243bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2244{
2245 dma_addr_t mapping;
2246 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2247 struct rx_bd *rxbd =
2248 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2249 struct page *page = alloc_page(GFP_ATOMIC);
2250
2251 if (!page)
2252 return -ENOMEM;
2253 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2254 PCI_DMA_FROMDEVICE);
2255 rx_pg->page = page;
2256 pci_unmap_addr_set(rx_pg, mapping, mapping);
2257 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2258 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2259 return 0;
2260}
2261
2262static void
2263bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2264{
2265 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2266 struct page *page = rx_pg->page;
2267
2268 if (!page)
2269 return;
2270
2271 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2272 PCI_DMA_FROMDEVICE);
2273
2274 __free_page(page);
2275 rx_pg->page = NULL;
2276}
2277
2278static inline int
Michael Chanb6016b72005-05-26 13:03:09 -07002279bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2280{
2281 struct sk_buff *skb;
2282 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2283 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08002284 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07002285 unsigned long align;
2286
Michael Chan932f3772006-08-15 01:39:36 -07002287 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07002288 if (skb == NULL) {
2289 return -ENOMEM;
2290 }
2291
Michael Chan59b47d82006-11-19 14:10:45 -08002292 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2293 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07002294
Michael Chanb6016b72005-05-26 13:03:09 -07002295 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2296 PCI_DMA_FROMDEVICE);
2297
2298 rx_buf->skb = skb;
2299 pci_unmap_addr_set(rx_buf, mapping, mapping);
2300
2301 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303
2304 bp->rx_prod_bseq += bp->rx_buf_use_size;
2305
2306 return 0;
2307}
2308
Michael Chanda3e4fb2007-05-03 13:24:23 -07002309static int
Michael Chan35efa7c2007-12-20 19:56:37 -08002310bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
Michael Chanda3e4fb2007-05-03 13:24:23 -07002311{
Michael Chan35efa7c2007-12-20 19:56:37 -08002312 struct status_block *sblk = bnapi->status_blk;
Michael Chanda3e4fb2007-05-03 13:24:23 -07002313 u32 new_link_state, old_link_state;
2314 int is_set = 1;
2315
2316 new_link_state = sblk->status_attn_bits & event;
2317 old_link_state = sblk->status_attn_bits_ack & event;
2318 if (new_link_state != old_link_state) {
2319 if (new_link_state)
2320 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2321 else
2322 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2323 } else
2324 is_set = 0;
2325
2326 return is_set;
2327}
2328
Michael Chanb6016b72005-05-26 13:03:09 -07002329static void
Michael Chan35efa7c2007-12-20 19:56:37 -08002330bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
Michael Chanb6016b72005-05-26 13:03:09 -07002331{
Michael Chan35efa7c2007-12-20 19:56:37 -08002332 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
Michael Chanda3e4fb2007-05-03 13:24:23 -07002333 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002334 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002335 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002336 }
Michael Chan35efa7c2007-12-20 19:56:37 -08002337 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
Michael Chan0d8a6572007-07-07 22:49:43 -07002338 bnx2_set_remote_link(bp);
2339
Michael Chanb6016b72005-05-26 13:03:09 -07002340}
2341
Michael Chanead72702007-12-20 19:55:39 -08002342static inline u16
Michael Chan35efa7c2007-12-20 19:56:37 -08002343bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
Michael Chanead72702007-12-20 19:55:39 -08002344{
2345 u16 cons;
2346
Michael Chan35efa7c2007-12-20 19:56:37 -08002347 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
Michael Chanead72702007-12-20 19:55:39 -08002348
2349 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2350 cons++;
2351 return cons;
2352}
2353
Michael Chanb6016b72005-05-26 13:03:09 -07002354static void
Michael Chan35efa7c2007-12-20 19:56:37 -08002355bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
Michael Chanb6016b72005-05-26 13:03:09 -07002356{
2357 u16 hw_cons, sw_cons, sw_ring_cons;
2358 int tx_free_bd = 0;
2359
Michael Chan35efa7c2007-12-20 19:56:37 -08002360 hw_cons = bnx2_get_hw_tx_cons(bnapi);
Michael Chana550c992007-12-20 19:56:59 -08002361 sw_cons = bnapi->tx_cons;
Michael Chanb6016b72005-05-26 13:03:09 -07002362
2363 while (sw_cons != hw_cons) {
2364 struct sw_bd *tx_buf;
2365 struct sk_buff *skb;
2366 int i, last;
2367
2368 sw_ring_cons = TX_RING_IDX(sw_cons);
2369
2370 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2371 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002372
Michael Chanb6016b72005-05-26 13:03:09 -07002373 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002374 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002375 u16 last_idx, last_ring_idx;
2376
2377 last_idx = sw_cons +
2378 skb_shinfo(skb)->nr_frags + 1;
2379 last_ring_idx = sw_ring_cons +
2380 skb_shinfo(skb)->nr_frags + 1;
2381 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2382 last_idx++;
2383 }
2384 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2385 break;
2386 }
2387 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002388
Michael Chanb6016b72005-05-26 13:03:09 -07002389 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2390 skb_headlen(skb), PCI_DMA_TODEVICE);
2391
2392 tx_buf->skb = NULL;
2393 last = skb_shinfo(skb)->nr_frags;
2394
2395 for (i = 0; i < last; i++) {
2396 sw_cons = NEXT_TX_BD(sw_cons);
2397
2398 pci_unmap_page(bp->pdev,
2399 pci_unmap_addr(
2400 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2401 mapping),
2402 skb_shinfo(skb)->frags[i].size,
2403 PCI_DMA_TODEVICE);
2404 }
2405
2406 sw_cons = NEXT_TX_BD(sw_cons);
2407
2408 tx_free_bd += last + 1;
2409
Michael Chan745720e2006-06-29 12:37:41 -07002410 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002411
Michael Chan35efa7c2007-12-20 19:56:37 -08002412 hw_cons = bnx2_get_hw_tx_cons(bnapi);
Michael Chanb6016b72005-05-26 13:03:09 -07002413 }
2414
Michael Chana550c992007-12-20 19:56:59 -08002415 bnapi->hw_tx_cons = hw_cons;
2416 bnapi->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002417 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2418 * before checking for netif_queue_stopped(). Without the
2419 * memory barrier, there is a small possibility that bnx2_start_xmit()
2420 * will miss it and cause the queue to be stopped forever.
2421 */
2422 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002423
Michael Chan2f8af122006-08-15 01:39:10 -07002424 if (unlikely(netif_queue_stopped(bp->dev)) &&
Michael Chana550c992007-12-20 19:56:59 -08002425 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
Michael Chan2f8af122006-08-15 01:39:10 -07002426 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002427 if ((netif_queue_stopped(bp->dev)) &&
Michael Chana550c992007-12-20 19:56:59 -08002428 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002429 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002430 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002431 }
Michael Chanb6016b72005-05-26 13:03:09 -07002432}
2433
Michael Chan1db82f22007-12-12 11:19:35 -08002434static void
2435bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2436{
2437 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2438 struct rx_bd *cons_bd, *prod_bd;
2439 dma_addr_t mapping;
2440 int i;
2441 u16 hw_prod = bp->rx_pg_prod, prod;
2442 u16 cons = bp->rx_pg_cons;
2443
2444 for (i = 0; i < count; i++) {
2445 prod = RX_PG_RING_IDX(hw_prod);
2446
2447 prod_rx_pg = &bp->rx_pg_ring[prod];
2448 cons_rx_pg = &bp->rx_pg_ring[cons];
2449 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2450 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2451
2452 if (i == 0 && skb) {
2453 struct page *page;
2454 struct skb_shared_info *shinfo;
2455
2456 shinfo = skb_shinfo(skb);
2457 shinfo->nr_frags--;
2458 page = shinfo->frags[shinfo->nr_frags].page;
2459 shinfo->frags[shinfo->nr_frags].page = NULL;
2460 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2461 PCI_DMA_FROMDEVICE);
2462 cons_rx_pg->page = page;
2463 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2464 dev_kfree_skb(skb);
2465 }
2466 if (prod != cons) {
2467 prod_rx_pg->page = cons_rx_pg->page;
2468 cons_rx_pg->page = NULL;
2469 pci_unmap_addr_set(prod_rx_pg, mapping,
2470 pci_unmap_addr(cons_rx_pg, mapping));
2471
2472 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2473 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2474
2475 }
2476 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2477 hw_prod = NEXT_RX_BD(hw_prod);
2478 }
2479 bp->rx_pg_prod = hw_prod;
2480 bp->rx_pg_cons = cons;
2481}
2482
Michael Chanb6016b72005-05-26 13:03:09 -07002483static inline void
2484bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2485 u16 cons, u16 prod)
2486{
Michael Chan236b6392006-03-20 17:49:02 -08002487 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2488 struct rx_bd *cons_bd, *prod_bd;
2489
2490 cons_rx_buf = &bp->rx_buf_ring[cons];
2491 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002492
2493 pci_dma_sync_single_for_device(bp->pdev,
2494 pci_unmap_addr(cons_rx_buf, mapping),
2495 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2496
Michael Chan236b6392006-03-20 17:49:02 -08002497 bp->rx_prod_bseq += bp->rx_buf_use_size;
2498
2499 prod_rx_buf->skb = skb;
2500
2501 if (cons == prod)
2502 return;
2503
Michael Chanb6016b72005-05-26 13:03:09 -07002504 pci_unmap_addr_set(prod_rx_buf, mapping,
2505 pci_unmap_addr(cons_rx_buf, mapping));
2506
Michael Chan3fdfcc22006-03-20 17:49:49 -08002507 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2508 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002509 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2510 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002511}
2512
Michael Chan85833c62007-12-12 11:17:01 -08002513static int
2514bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
Michael Chan1db82f22007-12-12 11:19:35 -08002515 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
Michael Chan85833c62007-12-12 11:17:01 -08002516{
2517 int err;
2518 u16 prod = ring_idx & 0xffff;
2519
2520 err = bnx2_alloc_rx_skb(bp, prod);
2521 if (unlikely(err)) {
2522 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
Michael Chan1db82f22007-12-12 11:19:35 -08002523 if (hdr_len) {
2524 unsigned int raw_len = len + 4;
2525 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2526
2527 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2528 }
Michael Chan85833c62007-12-12 11:17:01 -08002529 return err;
2530 }
2531
2532 skb_reserve(skb, bp->rx_offset);
2533 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2534 PCI_DMA_FROMDEVICE);
2535
Michael Chan1db82f22007-12-12 11:19:35 -08002536 if (hdr_len == 0) {
2537 skb_put(skb, len);
2538 return 0;
2539 } else {
2540 unsigned int i, frag_len, frag_size, pages;
2541 struct sw_pg *rx_pg;
2542 u16 pg_cons = bp->rx_pg_cons;
2543 u16 pg_prod = bp->rx_pg_prod;
2544
2545 frag_size = len + 4 - hdr_len;
2546 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2547 skb_put(skb, hdr_len);
2548
2549 for (i = 0; i < pages; i++) {
2550 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2551 if (unlikely(frag_len <= 4)) {
2552 unsigned int tail = 4 - frag_len;
2553
2554 bp->rx_pg_cons = pg_cons;
2555 bp->rx_pg_prod = pg_prod;
2556 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2557 skb->len -= tail;
2558 if (i == 0) {
2559 skb->tail -= tail;
2560 } else {
2561 skb_frag_t *frag =
2562 &skb_shinfo(skb)->frags[i - 1];
2563 frag->size -= tail;
2564 skb->data_len -= tail;
2565 skb->truesize -= tail;
2566 }
2567 return 0;
2568 }
2569 rx_pg = &bp->rx_pg_ring[pg_cons];
2570
2571 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2572 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2573
2574 if (i == pages - 1)
2575 frag_len -= 4;
2576
2577 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2578 rx_pg->page = NULL;
2579
2580 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2581 if (unlikely(err)) {
2582 bp->rx_pg_cons = pg_cons;
2583 bp->rx_pg_prod = pg_prod;
2584 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2585 return err;
2586 }
2587
2588 frag_size -= frag_len;
2589 skb->data_len += frag_len;
2590 skb->truesize += frag_len;
2591 skb->len += frag_len;
2592
2593 pg_prod = NEXT_RX_BD(pg_prod);
2594 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2595 }
2596 bp->rx_pg_prod = pg_prod;
2597 bp->rx_pg_cons = pg_cons;
2598 }
Michael Chan85833c62007-12-12 11:17:01 -08002599 return 0;
2600}
2601
Michael Chanc09c2622007-12-10 17:18:37 -08002602static inline u16
Michael Chan35efa7c2007-12-20 19:56:37 -08002603bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
Michael Chanc09c2622007-12-10 17:18:37 -08002604{
Michael Chan35efa7c2007-12-20 19:56:37 -08002605 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
Michael Chanc09c2622007-12-10 17:18:37 -08002606
2607 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2608 cons++;
2609 return cons;
2610}
2611
Michael Chanb6016b72005-05-26 13:03:09 -07002612static int
Michael Chan35efa7c2007-12-20 19:56:37 -08002613bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
Michael Chanb6016b72005-05-26 13:03:09 -07002614{
2615 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2616 struct l2_fhdr *rx_hdr;
Michael Chan1db82f22007-12-12 11:19:35 -08002617 int rx_pkt = 0, pg_ring_used = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002618
Michael Chan35efa7c2007-12-20 19:56:37 -08002619 hw_cons = bnx2_get_hw_rx_cons(bnapi);
Michael Chanb6016b72005-05-26 13:03:09 -07002620 sw_cons = bp->rx_cons;
2621 sw_prod = bp->rx_prod;
2622
2623 /* Memory barrier necessary as speculative reads of the rx
2624 * buffer can be ahead of the index in the status block
2625 */
2626 rmb();
2627 while (sw_cons != hw_cons) {
Michael Chan1db82f22007-12-12 11:19:35 -08002628 unsigned int len, hdr_len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002629 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002630 struct sw_bd *rx_buf;
2631 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002632 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002633
2634 sw_ring_cons = RX_RING_IDX(sw_cons);
2635 sw_ring_prod = RX_RING_IDX(sw_prod);
2636
2637 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2638 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002639
2640 rx_buf->skb = NULL;
2641
2642 dma_addr = pci_unmap_addr(rx_buf, mapping);
2643
2644 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002645 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2646
2647 rx_hdr = (struct l2_fhdr *) skb->data;
Michael Chan1db82f22007-12-12 11:19:35 -08002648 len = rx_hdr->l2_fhdr_pkt_len;
Michael Chanb6016b72005-05-26 13:03:09 -07002649
Michael Chanade2bfe2006-01-23 16:09:51 -08002650 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002651 (L2_FHDR_ERRORS_BAD_CRC |
2652 L2_FHDR_ERRORS_PHY_DECODE |
2653 L2_FHDR_ERRORS_ALIGNMENT |
2654 L2_FHDR_ERRORS_TOO_SHORT |
2655 L2_FHDR_ERRORS_GIANT_FRAME)) {
2656
Michael Chan85833c62007-12-12 11:17:01 -08002657 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2658 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002659 }
Michael Chan1db82f22007-12-12 11:19:35 -08002660 hdr_len = 0;
2661 if (status & L2_FHDR_STATUS_SPLIT) {
2662 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2663 pg_ring_used = 1;
2664 } else if (len > bp->rx_jumbo_thresh) {
2665 hdr_len = bp->rx_jumbo_thresh;
2666 pg_ring_used = 1;
2667 }
2668
2669 len -= 4;
Michael Chanb6016b72005-05-26 13:03:09 -07002670
Michael Chan5d5d0012007-12-12 11:17:43 -08002671 if (len <= bp->rx_copy_thresh) {
Michael Chanb6016b72005-05-26 13:03:09 -07002672 struct sk_buff *new_skb;
2673
Michael Chan932f3772006-08-15 01:39:36 -07002674 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chan85833c62007-12-12 11:17:01 -08002675 if (new_skb == NULL) {
2676 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2677 sw_ring_prod);
2678 goto next_rx;
2679 }
Michael Chanb6016b72005-05-26 13:03:09 -07002680
2681 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002682 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2683 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002684 skb_reserve(new_skb, 2);
2685 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002686
2687 bnx2_reuse_rx_skb(bp, skb,
2688 sw_ring_cons, sw_ring_prod);
2689
2690 skb = new_skb;
Michael Chan1db82f22007-12-12 11:19:35 -08002691 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
Michael Chan85833c62007-12-12 11:17:01 -08002692 (sw_ring_cons << 16) | sw_ring_prod)))
Michael Chanb6016b72005-05-26 13:03:09 -07002693 goto next_rx;
Michael Chanb6016b72005-05-26 13:03:09 -07002694
2695 skb->protocol = eth_type_trans(skb, bp->dev);
2696
2697 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002698 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002699
Michael Chan745720e2006-06-29 12:37:41 -07002700 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002701 goto next_rx;
2702
2703 }
2704
Michael Chanb6016b72005-05-26 13:03:09 -07002705 skb->ip_summed = CHECKSUM_NONE;
2706 if (bp->rx_csum &&
2707 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2708 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2709
Michael Chanade2bfe2006-01-23 16:09:51 -08002710 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2711 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002712 skb->ip_summed = CHECKSUM_UNNECESSARY;
2713 }
2714
2715#ifdef BCM_VLAN
2716 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2717 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2718 rx_hdr->l2_fhdr_vlan_tag);
2719 }
2720 else
2721#endif
2722 netif_receive_skb(skb);
2723
2724 bp->dev->last_rx = jiffies;
2725 rx_pkt++;
2726
2727next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002728 sw_cons = NEXT_RX_BD(sw_cons);
2729 sw_prod = NEXT_RX_BD(sw_prod);
2730
2731 if ((rx_pkt == budget))
2732 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002733
2734 /* Refresh hw_cons to see if there is new work */
2735 if (sw_cons == hw_cons) {
Michael Chan35efa7c2007-12-20 19:56:37 -08002736 hw_cons = bnx2_get_hw_rx_cons(bnapi);
Michael Chanf4e418f2005-11-04 08:53:48 -08002737 rmb();
2738 }
Michael Chanb6016b72005-05-26 13:03:09 -07002739 }
2740 bp->rx_cons = sw_cons;
2741 bp->rx_prod = sw_prod;
2742
Michael Chan1db82f22007-12-12 11:19:35 -08002743 if (pg_ring_used)
2744 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2745 bp->rx_pg_prod);
2746
Michael Chanb6016b72005-05-26 13:03:09 -07002747 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2748
2749 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2750
2751 mmiowb();
2752
2753 return rx_pkt;
2754
2755}
2756
2757/* MSI ISR - The only difference between this and the INTx ISR
2758 * is that the MSI interrupt is always serviced.
2759 */
2760static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002761bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002762{
2763 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002764 struct bnx2 *bp = netdev_priv(dev);
Michael Chan35efa7c2007-12-20 19:56:37 -08002765 struct bnx2_napi *bnapi = &bp->bnx2_napi;
Michael Chanb6016b72005-05-26 13:03:09 -07002766
Michael Chan35efa7c2007-12-20 19:56:37 -08002767 prefetch(bnapi->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002768 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2769 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2770 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2771
2772 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002773 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2774 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002775
Michael Chan35efa7c2007-12-20 19:56:37 -08002776 netif_rx_schedule(dev, &bnapi->napi);
Michael Chanb6016b72005-05-26 13:03:09 -07002777
Michael Chan73eef4c2005-08-25 15:39:15 -07002778 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002779}
2780
2781static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002782bnx2_msi_1shot(int irq, void *dev_instance)
2783{
2784 struct net_device *dev = dev_instance;
2785 struct bnx2 *bp = netdev_priv(dev);
Michael Chan35efa7c2007-12-20 19:56:37 -08002786 struct bnx2_napi *bnapi = &bp->bnx2_napi;
Michael Chan8e6a72c2007-05-03 13:24:48 -07002787
Michael Chan35efa7c2007-12-20 19:56:37 -08002788 prefetch(bnapi->status_blk);
Michael Chan8e6a72c2007-05-03 13:24:48 -07002789
2790 /* Return here if interrupt is disabled. */
2791 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2792 return IRQ_HANDLED;
2793
Michael Chan35efa7c2007-12-20 19:56:37 -08002794 netif_rx_schedule(dev, &bnapi->napi);
Michael Chan8e6a72c2007-05-03 13:24:48 -07002795
2796 return IRQ_HANDLED;
2797}
2798
2799static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002800bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002801{
2802 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002803 struct bnx2 *bp = netdev_priv(dev);
Michael Chan35efa7c2007-12-20 19:56:37 -08002804 struct bnx2_napi *bnapi = &bp->bnx2_napi;
2805 struct status_block *sblk = bnapi->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002806
2807 /* When using INTx, it is possible for the interrupt to arrive
2808 * at the CPU before the status block posted prior to the
2809 * interrupt. Reading a register will flush the status block.
2810 * When using MSI, the MSI message will always complete after
2811 * the status block write.
2812 */
Michael Chan35efa7c2007-12-20 19:56:37 -08002813 if ((sblk->status_idx == bnapi->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002814 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2815 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002816 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002817
2818 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2819 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2820 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2821
Michael Chanb8a7ce72007-07-07 22:51:03 -07002822 /* Read back to deassert IRQ immediately to avoid too many
2823 * spurious interrupts.
2824 */
2825 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2826
Michael Chanb6016b72005-05-26 13:03:09 -07002827 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002828 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2829 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002830
Michael Chan35efa7c2007-12-20 19:56:37 -08002831 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2832 bnapi->last_status_idx = sblk->status_idx;
2833 __netif_rx_schedule(dev, &bnapi->napi);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002834 }
Michael Chanb6016b72005-05-26 13:03:09 -07002835
Michael Chan73eef4c2005-08-25 15:39:15 -07002836 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002837}
2838
Michael Chan0d8a6572007-07-07 22:49:43 -07002839#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2840 STATUS_ATTN_BITS_TIMER_ABORT)
Michael Chanda3e4fb2007-05-03 13:24:23 -07002841
Michael Chanf4e418f2005-11-04 08:53:48 -08002842static inline int
Michael Chan35efa7c2007-12-20 19:56:37 -08002843bnx2_has_work(struct bnx2_napi *bnapi)
Michael Chanf4e418f2005-11-04 08:53:48 -08002844{
Michael Chan35efa7c2007-12-20 19:56:37 -08002845 struct bnx2 *bp = bnapi->bp;
Michael Chanf4e418f2005-11-04 08:53:48 -08002846 struct status_block *sblk = bp->status_blk;
2847
Michael Chan35efa7c2007-12-20 19:56:37 -08002848 if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) ||
Michael Chana550c992007-12-20 19:56:59 -08002849 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
Michael Chanf4e418f2005-11-04 08:53:48 -08002850 return 1;
2851
Michael Chanda3e4fb2007-05-03 13:24:23 -07002852 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2853 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002854 return 1;
2855
2856 return 0;
2857}
2858
Michael Chan35efa7c2007-12-20 19:56:37 -08002859static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2860 int work_done, int budget)
Michael Chanb6016b72005-05-26 13:03:09 -07002861{
Michael Chan35efa7c2007-12-20 19:56:37 -08002862 struct status_block *sblk = bnapi->status_blk;
Michael Chanda3e4fb2007-05-03 13:24:23 -07002863 u32 status_attn_bits = sblk->status_attn_bits;
2864 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002865
Michael Chanda3e4fb2007-05-03 13:24:23 -07002866 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2867 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002868
Michael Chan35efa7c2007-12-20 19:56:37 -08002869 bnx2_phy_int(bp, bnapi);
Michael Chanbf5295b2006-03-23 01:11:56 -08002870
2871 /* This is needed to take care of transient status
2872 * during link changes.
2873 */
2874 REG_WR(bp, BNX2_HC_COMMAND,
2875 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2876 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002877 }
2878
Michael Chana550c992007-12-20 19:56:59 -08002879 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
Michael Chan35efa7c2007-12-20 19:56:37 -08002880 bnx2_tx_int(bp, bnapi);
Michael Chanb6016b72005-05-26 13:03:09 -07002881
Michael Chan35efa7c2007-12-20 19:56:37 -08002882 if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons)
2883 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002884
David S. Miller6f535762007-10-11 18:08:29 -07002885 return work_done;
2886}
Michael Chanf4e418f2005-11-04 08:53:48 -08002887
David S. Miller6f535762007-10-11 18:08:29 -07002888static int bnx2_poll(struct napi_struct *napi, int budget)
2889{
Michael Chan35efa7c2007-12-20 19:56:37 -08002890 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2891 struct bnx2 *bp = bnapi->bp;
David S. Miller6f535762007-10-11 18:08:29 -07002892 int work_done = 0;
Michael Chan35efa7c2007-12-20 19:56:37 -08002893 struct status_block *sblk = bnapi->status_blk;
David S. Miller6f535762007-10-11 18:08:29 -07002894
2895 while (1) {
Michael Chan35efa7c2007-12-20 19:56:37 -08002896 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
David S. Miller6f535762007-10-11 18:08:29 -07002897
2898 if (unlikely(work_done >= budget))
2899 break;
2900
Michael Chan35efa7c2007-12-20 19:56:37 -08002901 /* bnapi->last_status_idx is used below to tell the hw how
Michael Chan6dee6422007-10-12 01:40:38 -07002902 * much work has been processed, so we must read it before
2903 * checking for more work.
2904 */
Michael Chan35efa7c2007-12-20 19:56:37 -08002905 bnapi->last_status_idx = sblk->status_idx;
Michael Chan6dee6422007-10-12 01:40:38 -07002906 rmb();
Michael Chan35efa7c2007-12-20 19:56:37 -08002907 if (likely(!bnx2_has_work(bnapi))) {
David S. Miller6f535762007-10-11 18:08:29 -07002908 netif_rx_complete(bp->dev, napi);
2909 if (likely(bp->flags & USING_MSI_FLAG)) {
2910 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2911 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
Michael Chan35efa7c2007-12-20 19:56:37 -08002912 bnapi->last_status_idx);
Michael Chan6dee6422007-10-12 01:40:38 -07002913 break;
David S. Miller6f535762007-10-11 18:08:29 -07002914 }
2915 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2916 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2917 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
Michael Chan35efa7c2007-12-20 19:56:37 -08002918 bnapi->last_status_idx);
David S. Miller6f535762007-10-11 18:08:29 -07002919
Michael Chan1269a8a2006-01-23 16:11:03 -08002920 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2921 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
Michael Chan35efa7c2007-12-20 19:56:37 -08002922 bnapi->last_status_idx);
David S. Miller6f535762007-10-11 18:08:29 -07002923 break;
Michael Chan1269a8a2006-01-23 16:11:03 -08002924 }
Michael Chanb6016b72005-05-26 13:03:09 -07002925 }
2926
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002927 return work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002928}
2929
Herbert Xu932ff272006-06-09 12:20:56 -07002930/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002931 * from set_multicast.
2932 */
2933static void
2934bnx2_set_rx_mode(struct net_device *dev)
2935{
Michael Chan972ec0d2006-01-23 16:12:43 -08002936 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002937 u32 rx_mode, sort_mode;
2938 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002939
Michael Chanc770a652005-08-25 15:38:39 -07002940 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002941
2942 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2943 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2944 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2945#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002946 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002947 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002948#else
Michael Chane29054f2006-01-23 16:06:06 -08002949 if (!(bp->flags & ASF_ENABLE_FLAG))
2950 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002951#endif
2952 if (dev->flags & IFF_PROMISC) {
2953 /* Promiscuous mode. */
2954 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002955 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2956 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002957 }
2958 else if (dev->flags & IFF_ALLMULTI) {
2959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961 0xffffffff);
2962 }
2963 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2964 }
2965 else {
2966 /* Accept one or more multicast(s). */
2967 struct dev_mc_list *mclist;
2968 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2969 u32 regidx;
2970 u32 bit;
2971 u32 crc;
2972
2973 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2974
2975 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2976 i++, mclist = mclist->next) {
2977
2978 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2979 bit = crc & 0xff;
2980 regidx = (bit & 0xe0) >> 5;
2981 bit &= 0x1f;
2982 mc_filter[regidx] |= (1 << bit);
2983 }
2984
2985 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2986 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2987 mc_filter[i]);
2988 }
2989
2990 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2991 }
2992
2993 if (rx_mode != bp->rx_mode) {
2994 bp->rx_mode = rx_mode;
2995 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2996 }
2997
2998 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2999 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3000 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3001
Michael Chanc770a652005-08-25 15:38:39 -07003002 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003003}
3004
3005static void
3006load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3007 u32 rv2p_proc)
3008{
3009 int i;
3010 u32 val;
3011
3012
3013 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07003014 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07003015 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07003016 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07003017 rv2p_code++;
3018
3019 if (rv2p_proc == RV2P_PROC1) {
3020 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3021 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3022 }
3023 else {
3024 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3025 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3026 }
3027 }
3028
3029 /* Reset the processor, un-stall is done later. */
3030 if (rv2p_proc == RV2P_PROC1) {
3031 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3032 }
3033 else {
3034 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3035 }
3036}
3037
Michael Chanaf3ee512006-11-19 14:09:25 -08003038static int
Michael Chanb6016b72005-05-26 13:03:09 -07003039load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3040{
3041 u32 offset;
3042 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08003043 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003044
3045 /* Halt the CPU. */
3046 val = REG_RD_IND(bp, cpu_reg->mode);
3047 val |= cpu_reg->mode_value_halt;
3048 REG_WR_IND(bp, cpu_reg->mode, val);
3049 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3050
3051 /* Load the Text area. */
3052 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08003053 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07003054 int j;
3055
Michael Chanea1f8d52007-10-02 16:27:35 -07003056 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3057 fw->gz_text_len);
3058 if (rc < 0)
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003059 return rc;
Michael Chanea1f8d52007-10-02 16:27:35 -07003060
Michael Chanb6016b72005-05-26 13:03:09 -07003061 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003062 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07003063 }
3064 }
3065
3066 /* Load the Data area. */
3067 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3068 if (fw->data) {
3069 int j;
3070
3071 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3072 REG_WR_IND(bp, offset, fw->data[j]);
3073 }
3074 }
3075
3076 /* Load the SBSS area. */
3077 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07003078 if (fw->sbss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07003079 int j;
3080
3081 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003082 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003083 }
3084 }
3085
3086 /* Load the BSS area. */
3087 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
Michael Chanea1f8d52007-10-02 16:27:35 -07003088 if (fw->bss_len) {
Michael Chanb6016b72005-05-26 13:03:09 -07003089 int j;
3090
3091 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
Michael Chanea1f8d52007-10-02 16:27:35 -07003092 REG_WR_IND(bp, offset, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003093 }
3094 }
3095
3096 /* Load the Read-Only area. */
3097 offset = cpu_reg->spad_base +
3098 (fw->rodata_addr - cpu_reg->mips_view_base);
3099 if (fw->rodata) {
3100 int j;
3101
3102 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3103 REG_WR_IND(bp, offset, fw->rodata[j]);
3104 }
3105 }
3106
3107 /* Clear the pre-fetch instruction. */
3108 REG_WR_IND(bp, cpu_reg->inst, 0);
3109 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3110
3111 /* Start the CPU. */
3112 val = REG_RD_IND(bp, cpu_reg->mode);
3113 val &= ~cpu_reg->mode_value_halt;
3114 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3115 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08003116
3117 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003118}
3119
Michael Chanfba9fe92006-06-12 22:21:25 -07003120static int
Michael Chanb6016b72005-05-26 13:03:09 -07003121bnx2_init_cpus(struct bnx2 *bp)
3122{
3123 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08003124 struct fw_info *fw;
Michael Chan110d0ef2007-12-12 11:18:34 -08003125 int rc, rv2p_len;
3126 void *text, *rv2p;
Michael Chanb6016b72005-05-26 13:03:09 -07003127
3128 /* Initialize the RV2P processor. */
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003129 text = vmalloc(FW_BUF_SIZE);
3130 if (!text)
3131 return -ENOMEM;
Michael Chan110d0ef2007-12-12 11:18:34 -08003132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3133 rv2p = bnx2_xi_rv2p_proc1;
3134 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3135 } else {
3136 rv2p = bnx2_rv2p_proc1;
3137 rv2p_len = sizeof(bnx2_rv2p_proc1);
3138 }
3139 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
Michael Chanea1f8d52007-10-02 16:27:35 -07003140 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07003141 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07003142
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003143 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
Michael Chanfba9fe92006-06-12 22:21:25 -07003144
Michael Chan110d0ef2007-12-12 11:18:34 -08003145 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3146 rv2p = bnx2_xi_rv2p_proc2;
3147 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3148 } else {
3149 rv2p = bnx2_rv2p_proc2;
3150 rv2p_len = sizeof(bnx2_rv2p_proc2);
3151 }
3152 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
Michael Chanea1f8d52007-10-02 16:27:35 -07003153 if (rc < 0)
Michael Chanfba9fe92006-06-12 22:21:25 -07003154 goto init_cpu_err;
Michael Chanea1f8d52007-10-02 16:27:35 -07003155
Denys Vlasenkob3448b02007-09-30 17:55:51 -07003156 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07003157
3158 /* Initialize the RX Processor. */
3159 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3160 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3161 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3162 cpu_reg.state = BNX2_RXP_CPU_STATE;
3163 cpu_reg.state_value_clear = 0xffffff;
3164 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3165 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3166 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3167 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3168 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3169 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3170 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003171
Michael Chand43584c2006-11-19 14:14:35 -08003172 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3173 fw = &bnx2_rxp_fw_09;
3174 else
3175 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003176
Michael Chanea1f8d52007-10-02 16:27:35 -07003177 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003178 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003179 if (rc)
3180 goto init_cpu_err;
3181
Michael Chanb6016b72005-05-26 13:03:09 -07003182 /* Initialize the TX Processor. */
3183 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3184 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3185 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3186 cpu_reg.state = BNX2_TXP_CPU_STATE;
3187 cpu_reg.state_value_clear = 0xffffff;
3188 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3189 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3190 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3191 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3192 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3193 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3194 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003195
Michael Chand43584c2006-11-19 14:14:35 -08003196 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3197 fw = &bnx2_txp_fw_09;
3198 else
3199 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003200
Michael Chanea1f8d52007-10-02 16:27:35 -07003201 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003202 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003203 if (rc)
3204 goto init_cpu_err;
3205
Michael Chanb6016b72005-05-26 13:03:09 -07003206 /* Initialize the TX Patch-up Processor. */
3207 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3208 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3209 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3210 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3211 cpu_reg.state_value_clear = 0xffffff;
3212 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3213 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3214 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3215 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3216 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3217 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3218 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003219
Michael Chand43584c2006-11-19 14:14:35 -08003220 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3221 fw = &bnx2_tpat_fw_09;
3222 else
3223 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003224
Michael Chanea1f8d52007-10-02 16:27:35 -07003225 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003226 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003227 if (rc)
3228 goto init_cpu_err;
3229
Michael Chanb6016b72005-05-26 13:03:09 -07003230 /* Initialize the Completion Processor. */
3231 cpu_reg.mode = BNX2_COM_CPU_MODE;
3232 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3233 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3234 cpu_reg.state = BNX2_COM_CPU_STATE;
3235 cpu_reg.state_value_clear = 0xffffff;
3236 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3237 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3238 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3239 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3240 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3241 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3242 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003243
Michael Chand43584c2006-11-19 14:14:35 -08003244 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3245 fw = &bnx2_com_fw_09;
3246 else
3247 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003248
Michael Chanea1f8d52007-10-02 16:27:35 -07003249 fw->text = text;
Michael Chanaf3ee512006-11-19 14:09:25 -08003250 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003251 if (rc)
3252 goto init_cpu_err;
3253
Michael Chand43584c2006-11-19 14:14:35 -08003254 /* Initialize the Command Processor. */
3255 cpu_reg.mode = BNX2_CP_CPU_MODE;
3256 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3257 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3258 cpu_reg.state = BNX2_CP_CPU_STATE;
3259 cpu_reg.state_value_clear = 0xffffff;
3260 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3261 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3262 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3263 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3264 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3265 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3266 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07003267
Michael Chan110d0ef2007-12-12 11:18:34 -08003268 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Michael Chand43584c2006-11-19 14:14:35 -08003269 fw = &bnx2_cp_fw_09;
Michael Chan110d0ef2007-12-12 11:18:34 -08003270 else
3271 fw = &bnx2_cp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003272
Michael Chan110d0ef2007-12-12 11:18:34 -08003273 fw->text = text;
3274 rc = load_cpu_fw(bp, &cpu_reg, fw);
3275
Michael Chanfba9fe92006-06-12 22:21:25 -07003276init_cpu_err:
Michael Chanea1f8d52007-10-02 16:27:35 -07003277 vfree(text);
Michael Chanfba9fe92006-06-12 22:21:25 -07003278 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003279}
3280
3281static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07003282bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07003283{
3284 u16 pmcsr;
3285
3286 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3287
3288 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07003289 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07003290 u32 val;
3291
3292 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3293 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3294 PCI_PM_CTRL_PME_STATUS);
3295
3296 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3297 /* delay required during transition out of D3hot */
3298 msleep(20);
3299
3300 val = REG_RD(bp, BNX2_EMAC_MODE);
3301 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3302 val &= ~BNX2_EMAC_MODE_MPKT;
3303 REG_WR(bp, BNX2_EMAC_MODE, val);
3304
3305 val = REG_RD(bp, BNX2_RPM_CONFIG);
3306 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3307 REG_WR(bp, BNX2_RPM_CONFIG, val);
3308 break;
3309 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07003310 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07003311 int i;
3312 u32 val, wol_msg;
3313
3314 if (bp->wol) {
3315 u32 advertising;
3316 u8 autoneg;
3317
3318 autoneg = bp->autoneg;
3319 advertising = bp->advertising;
3320
Michael Chan239cd342007-10-17 19:26:15 -07003321 if (bp->phy_port == PORT_TP) {
3322 bp->autoneg = AUTONEG_SPEED;
3323 bp->advertising = ADVERTISED_10baseT_Half |
3324 ADVERTISED_10baseT_Full |
3325 ADVERTISED_100baseT_Half |
3326 ADVERTISED_100baseT_Full |
3327 ADVERTISED_Autoneg;
3328 }
Michael Chanb6016b72005-05-26 13:03:09 -07003329
Michael Chan239cd342007-10-17 19:26:15 -07003330 spin_lock_bh(&bp->phy_lock);
3331 bnx2_setup_phy(bp, bp->phy_port);
3332 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003333
3334 bp->autoneg = autoneg;
3335 bp->advertising = advertising;
3336
3337 bnx2_set_mac_addr(bp);
3338
3339 val = REG_RD(bp, BNX2_EMAC_MODE);
3340
3341 /* Enable port mode. */
3342 val &= ~BNX2_EMAC_MODE_PORT;
Michael Chan239cd342007-10-17 19:26:15 -07003343 val |= BNX2_EMAC_MODE_MPKT_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003344 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003345 BNX2_EMAC_MODE_MPKT;
Michael Chan239cd342007-10-17 19:26:15 -07003346 if (bp->phy_port == PORT_TP)
3347 val |= BNX2_EMAC_MODE_PORT_MII;
3348 else {
3349 val |= BNX2_EMAC_MODE_PORT_GMII;
3350 if (bp->line_speed == SPEED_2500)
3351 val |= BNX2_EMAC_MODE_25G_MODE;
3352 }
Michael Chanb6016b72005-05-26 13:03:09 -07003353
3354 REG_WR(bp, BNX2_EMAC_MODE, val);
3355
3356 /* receive all multicast */
3357 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3358 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3359 0xffffffff);
3360 }
3361 REG_WR(bp, BNX2_EMAC_RX_MODE,
3362 BNX2_EMAC_RX_MODE_SORT_MODE);
3363
3364 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3365 BNX2_RPM_SORT_USER0_MC_EN;
3366 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3367 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3368 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3369 BNX2_RPM_SORT_USER0_ENA);
3370
3371 /* Need to enable EMAC and RPM for WOL. */
3372 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3373 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3374 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3375 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3376
3377 val = REG_RD(bp, BNX2_RPM_CONFIG);
3378 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3379 REG_WR(bp, BNX2_RPM_CONFIG, val);
3380
3381 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3382 }
3383 else {
3384 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3385 }
3386
Michael Chandda1e392006-01-23 16:08:14 -08003387 if (!(bp->flags & NO_WOL_FLAG))
3388 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003389
3390 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3391 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3392 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3393
3394 if (bp->wol)
3395 pmcsr |= 3;
3396 }
3397 else {
3398 pmcsr |= 3;
3399 }
3400 if (bp->wol) {
3401 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3402 }
3403 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3404 pmcsr);
3405
3406 /* No more memory access after this point until
3407 * device is brought back to D0.
3408 */
3409 udelay(50);
3410 break;
3411 }
3412 default:
3413 return -EINVAL;
3414 }
3415 return 0;
3416}
3417
3418static int
3419bnx2_acquire_nvram_lock(struct bnx2 *bp)
3420{
3421 u32 val;
3422 int j;
3423
3424 /* Request access to the flash interface. */
3425 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3426 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3428 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3429 break;
3430
3431 udelay(5);
3432 }
3433
3434 if (j >= NVRAM_TIMEOUT_COUNT)
3435 return -EBUSY;
3436
3437 return 0;
3438}
3439
3440static int
3441bnx2_release_nvram_lock(struct bnx2 *bp)
3442{
3443 int j;
3444 u32 val;
3445
3446 /* Relinquish nvram interface. */
3447 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3448
3449 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3450 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3451 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3452 break;
3453
3454 udelay(5);
3455 }
3456
3457 if (j >= NVRAM_TIMEOUT_COUNT)
3458 return -EBUSY;
3459
3460 return 0;
3461}
3462
3463
3464static int
3465bnx2_enable_nvram_write(struct bnx2 *bp)
3466{
3467 u32 val;
3468
3469 val = REG_RD(bp, BNX2_MISC_CFG);
3470 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3471
Michael Chane30372c2007-07-16 18:26:23 -07003472 if (bp->flash_info->flags & BNX2_NV_WREN) {
Michael Chanb6016b72005-05-26 13:03:09 -07003473 int j;
3474
3475 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3476 REG_WR(bp, BNX2_NVM_COMMAND,
3477 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3478
3479 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3480 udelay(5);
3481
3482 val = REG_RD(bp, BNX2_NVM_COMMAND);
3483 if (val & BNX2_NVM_COMMAND_DONE)
3484 break;
3485 }
3486
3487 if (j >= NVRAM_TIMEOUT_COUNT)
3488 return -EBUSY;
3489 }
3490 return 0;
3491}
3492
3493static void
3494bnx2_disable_nvram_write(struct bnx2 *bp)
3495{
3496 u32 val;
3497
3498 val = REG_RD(bp, BNX2_MISC_CFG);
3499 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3500}
3501
3502
3503static void
3504bnx2_enable_nvram_access(struct bnx2 *bp)
3505{
3506 u32 val;
3507
3508 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3509 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003510 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003511 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3512}
3513
3514static void
3515bnx2_disable_nvram_access(struct bnx2 *bp)
3516{
3517 u32 val;
3518
3519 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3520 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003521 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003522 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3523 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3524}
3525
3526static int
3527bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3528{
3529 u32 cmd;
3530 int j;
3531
Michael Chane30372c2007-07-16 18:26:23 -07003532 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
Michael Chanb6016b72005-05-26 13:03:09 -07003533 /* Buffered flash, no erase needed */
3534 return 0;
3535
3536 /* Build an erase command */
3537 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3538 BNX2_NVM_COMMAND_DOIT;
3539
3540 /* Need to clear DONE bit separately. */
3541 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3542
3543 /* Address of the NVRAM to read from. */
3544 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3545
3546 /* Issue an erase command. */
3547 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3548
3549 /* Wait for completion. */
3550 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3551 u32 val;
3552
3553 udelay(5);
3554
3555 val = REG_RD(bp, BNX2_NVM_COMMAND);
3556 if (val & BNX2_NVM_COMMAND_DONE)
3557 break;
3558 }
3559
3560 if (j >= NVRAM_TIMEOUT_COUNT)
3561 return -EBUSY;
3562
3563 return 0;
3564}
3565
3566static int
3567bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3568{
3569 u32 cmd;
3570 int j;
3571
3572 /* Build the command word. */
3573 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3574
Michael Chane30372c2007-07-16 18:26:23 -07003575 /* Calculate an offset of a buffered flash, not needed for 5709. */
3576 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003577 offset = ((offset / bp->flash_info->page_size) <<
3578 bp->flash_info->page_bits) +
3579 (offset % bp->flash_info->page_size);
3580 }
3581
3582 /* Need to clear DONE bit separately. */
3583 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584
3585 /* Address of the NVRAM to read from. */
3586 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3587
3588 /* Issue a read command. */
3589 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3590
3591 /* Wait for completion. */
3592 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3593 u32 val;
3594
3595 udelay(5);
3596
3597 val = REG_RD(bp, BNX2_NVM_COMMAND);
3598 if (val & BNX2_NVM_COMMAND_DONE) {
3599 val = REG_RD(bp, BNX2_NVM_READ);
3600
3601 val = be32_to_cpu(val);
3602 memcpy(ret_val, &val, 4);
3603 break;
3604 }
3605 }
3606 if (j >= NVRAM_TIMEOUT_COUNT)
3607 return -EBUSY;
3608
3609 return 0;
3610}
3611
3612
3613static int
3614bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3615{
3616 u32 cmd, val32;
3617 int j;
3618
3619 /* Build the command word. */
3620 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3621
Michael Chane30372c2007-07-16 18:26:23 -07003622 /* Calculate an offset of a buffered flash, not needed for 5709. */
3623 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
Michael Chanb6016b72005-05-26 13:03:09 -07003624 offset = ((offset / bp->flash_info->page_size) <<
3625 bp->flash_info->page_bits) +
3626 (offset % bp->flash_info->page_size);
3627 }
3628
3629 /* Need to clear DONE bit separately. */
3630 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3631
3632 memcpy(&val32, val, 4);
3633 val32 = cpu_to_be32(val32);
3634
3635 /* Write the data. */
3636 REG_WR(bp, BNX2_NVM_WRITE, val32);
3637
3638 /* Address of the NVRAM to write to. */
3639 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3640
3641 /* Issue the write command. */
3642 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3643
3644 /* Wait for completion. */
3645 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3646 udelay(5);
3647
3648 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3649 break;
3650 }
3651 if (j >= NVRAM_TIMEOUT_COUNT)
3652 return -EBUSY;
3653
3654 return 0;
3655}
3656
3657static int
3658bnx2_init_nvram(struct bnx2 *bp)
3659{
3660 u32 val;
Michael Chane30372c2007-07-16 18:26:23 -07003661 int j, entry_count, rc = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003662 struct flash_spec *flash;
3663
Michael Chane30372c2007-07-16 18:26:23 -07003664 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3665 bp->flash_info = &flash_5709;
3666 goto get_flash_size;
3667 }
3668
Michael Chanb6016b72005-05-26 13:03:09 -07003669 /* Determine the selected interface. */
3670 val = REG_RD(bp, BNX2_NVM_CFG1);
3671
Denis Chengff8ac602007-09-02 18:30:18 +08003672 entry_count = ARRAY_SIZE(flash_table);
Michael Chanb6016b72005-05-26 13:03:09 -07003673
Michael Chanb6016b72005-05-26 13:03:09 -07003674 if (val & 0x40000000) {
3675
3676 /* Flash interface has been reconfigured */
3677 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003678 j++, flash++) {
3679 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3680 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003681 bp->flash_info = flash;
3682 break;
3683 }
3684 }
3685 }
3686 else {
Michael Chan37137702005-11-04 08:49:17 -08003687 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003688 /* Not yet been reconfigured */
3689
Michael Chan37137702005-11-04 08:49:17 -08003690 if (val & (1 << 23))
3691 mask = FLASH_BACKUP_STRAP_MASK;
3692 else
3693 mask = FLASH_STRAP_MASK;
3694
Michael Chanb6016b72005-05-26 13:03:09 -07003695 for (j = 0, flash = &flash_table[0]; j < entry_count;
3696 j++, flash++) {
3697
Michael Chan37137702005-11-04 08:49:17 -08003698 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003699 bp->flash_info = flash;
3700
3701 /* Request access to the flash interface. */
3702 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3703 return rc;
3704
3705 /* Enable access to flash interface */
3706 bnx2_enable_nvram_access(bp);
3707
3708 /* Reconfigure the flash interface */
3709 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3710 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3711 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3712 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3713
3714 /* Disable access to flash interface */
3715 bnx2_disable_nvram_access(bp);
3716 bnx2_release_nvram_lock(bp);
3717
3718 break;
3719 }
3720 }
3721 } /* if (val & 0x40000000) */
3722
3723 if (j == entry_count) {
3724 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003725 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003726 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003727 }
3728
Michael Chane30372c2007-07-16 18:26:23 -07003729get_flash_size:
Michael Chan1122db72006-01-23 16:11:42 -08003730 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3731 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3732 if (val)
3733 bp->flash_size = val;
3734 else
3735 bp->flash_size = bp->flash_info->total_size;
3736
Michael Chanb6016b72005-05-26 13:03:09 -07003737 return rc;
3738}
3739
3740static int
3741bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3742 int buf_size)
3743{
3744 int rc = 0;
3745 u32 cmd_flags, offset32, len32, extra;
3746
3747 if (buf_size == 0)
3748 return 0;
3749
3750 /* Request access to the flash interface. */
3751 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3752 return rc;
3753
3754 /* Enable access to flash interface */
3755 bnx2_enable_nvram_access(bp);
3756
3757 len32 = buf_size;
3758 offset32 = offset;
3759 extra = 0;
3760
3761 cmd_flags = 0;
3762
3763 if (offset32 & 3) {
3764 u8 buf[4];
3765 u32 pre_len;
3766
3767 offset32 &= ~3;
3768 pre_len = 4 - (offset & 3);
3769
3770 if (pre_len >= len32) {
3771 pre_len = len32;
3772 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3773 BNX2_NVM_COMMAND_LAST;
3774 }
3775 else {
3776 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3777 }
3778
3779 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3780
3781 if (rc)
3782 return rc;
3783
3784 memcpy(ret_buf, buf + (offset & 3), pre_len);
3785
3786 offset32 += 4;
3787 ret_buf += pre_len;
3788 len32 -= pre_len;
3789 }
3790 if (len32 & 3) {
3791 extra = 4 - (len32 & 3);
3792 len32 = (len32 + 4) & ~3;
3793 }
3794
3795 if (len32 == 4) {
3796 u8 buf[4];
3797
3798 if (cmd_flags)
3799 cmd_flags = BNX2_NVM_COMMAND_LAST;
3800 else
3801 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3802 BNX2_NVM_COMMAND_LAST;
3803
3804 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3805
3806 memcpy(ret_buf, buf, 4 - extra);
3807 }
3808 else if (len32 > 0) {
3809 u8 buf[4];
3810
3811 /* Read the first word. */
3812 if (cmd_flags)
3813 cmd_flags = 0;
3814 else
3815 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3816
3817 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3818
3819 /* Advance to the next dword. */
3820 offset32 += 4;
3821 ret_buf += 4;
3822 len32 -= 4;
3823
3824 while (len32 > 4 && rc == 0) {
3825 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3826
3827 /* Advance to the next dword. */
3828 offset32 += 4;
3829 ret_buf += 4;
3830 len32 -= 4;
3831 }
3832
3833 if (rc)
3834 return rc;
3835
3836 cmd_flags = BNX2_NVM_COMMAND_LAST;
3837 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3838
3839 memcpy(ret_buf, buf, 4 - extra);
3840 }
3841
3842 /* Disable access to flash interface */
3843 bnx2_disable_nvram_access(bp);
3844
3845 bnx2_release_nvram_lock(bp);
3846
3847 return rc;
3848}
3849
3850static int
3851bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3852 int buf_size)
3853{
3854 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003855 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003856 int rc = 0;
3857 int align_start, align_end;
3858
3859 buf = data_buf;
3860 offset32 = offset;
3861 len32 = buf_size;
3862 align_start = align_end = 0;
3863
3864 if ((align_start = (offset32 & 3))) {
3865 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003866 len32 += align_start;
3867 if (len32 < 4)
3868 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003869 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3870 return rc;
3871 }
3872
3873 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003874 align_end = 4 - (len32 & 3);
3875 len32 += align_end;
3876 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3877 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003878 }
3879
3880 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003881 align_buf = kmalloc(len32, GFP_KERNEL);
3882 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003883 return -ENOMEM;
3884 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003885 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003886 }
3887 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003888 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003889 }
Michael Chane6be7632007-01-08 19:56:13 -08003890 memcpy(align_buf + align_start, data_buf, buf_size);
3891 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003892 }
3893
Michael Chane30372c2007-07-16 18:26:23 -07003894 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanae181bc2006-05-22 16:39:20 -07003895 flash_buffer = kmalloc(264, GFP_KERNEL);
3896 if (flash_buffer == NULL) {
3897 rc = -ENOMEM;
3898 goto nvram_write_end;
3899 }
3900 }
3901
Michael Chanb6016b72005-05-26 13:03:09 -07003902 written = 0;
3903 while ((written < len32) && (rc == 0)) {
3904 u32 page_start, page_end, data_start, data_end;
3905 u32 addr, cmd_flags;
3906 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003907
3908 /* Find the page_start addr */
3909 page_start = offset32 + written;
3910 page_start -= (page_start % bp->flash_info->page_size);
3911 /* Find the page_end addr */
3912 page_end = page_start + bp->flash_info->page_size;
3913 /* Find the data_start addr */
3914 data_start = (written == 0) ? offset32 : page_start;
3915 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003916 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003917 (offset32 + len32) : page_end;
3918
3919 /* Request access to the flash interface. */
3920 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3921 goto nvram_write_end;
3922
3923 /* Enable access to flash interface */
3924 bnx2_enable_nvram_access(bp);
3925
3926 cmd_flags = BNX2_NVM_COMMAND_FIRST;
Michael Chane30372c2007-07-16 18:26:23 -07003927 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003928 int j;
3929
3930 /* Read the whole page into the buffer
3931 * (non-buffer flash only) */
3932 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3933 if (j == (bp->flash_info->page_size - 4)) {
3934 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3935 }
3936 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003937 page_start + j,
3938 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003939 cmd_flags);
3940
3941 if (rc)
3942 goto nvram_write_end;
3943
3944 cmd_flags = 0;
3945 }
3946 }
3947
3948 /* Enable writes to flash interface (unlock write-protect) */
3949 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3950 goto nvram_write_end;
3951
Michael Chanb6016b72005-05-26 13:03:09 -07003952 /* Loop to write back the buffer data from page_start to
3953 * data_start */
3954 i = 0;
Michael Chane30372c2007-07-16 18:26:23 -07003955 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanc8738792007-03-30 14:53:06 -07003956 /* Erase the page */
3957 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3958 goto nvram_write_end;
3959
3960 /* Re-enable the write again for the actual write */
3961 bnx2_enable_nvram_write(bp);
3962
Michael Chanb6016b72005-05-26 13:03:09 -07003963 for (addr = page_start; addr < data_start;
3964 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003965
Michael Chanb6016b72005-05-26 13:03:09 -07003966 rc = bnx2_nvram_write_dword(bp, addr,
3967 &flash_buffer[i], cmd_flags);
3968
3969 if (rc != 0)
3970 goto nvram_write_end;
3971
3972 cmd_flags = 0;
3973 }
3974 }
3975
3976 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003977 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003978 if ((addr == page_end - 4) ||
Michael Chane30372c2007-07-16 18:26:23 -07003979 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
Michael Chanb6016b72005-05-26 13:03:09 -07003980 (addr == data_end - 4))) {
3981
3982 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3983 }
3984 rc = bnx2_nvram_write_dword(bp, addr, buf,
3985 cmd_flags);
3986
3987 if (rc != 0)
3988 goto nvram_write_end;
3989
3990 cmd_flags = 0;
3991 buf += 4;
3992 }
3993
3994 /* Loop to write back the buffer data from data_end
3995 * to page_end */
Michael Chane30372c2007-07-16 18:26:23 -07003996 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003997 for (addr = data_end; addr < page_end;
3998 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003999
Michael Chanb6016b72005-05-26 13:03:09 -07004000 if (addr == page_end-4) {
4001 cmd_flags = BNX2_NVM_COMMAND_LAST;
4002 }
4003 rc = bnx2_nvram_write_dword(bp, addr,
4004 &flash_buffer[i], cmd_flags);
4005
4006 if (rc != 0)
4007 goto nvram_write_end;
4008
4009 cmd_flags = 0;
4010 }
4011 }
4012
4013 /* Disable writes to flash interface (lock write-protect) */
4014 bnx2_disable_nvram_write(bp);
4015
4016 /* Disable access to flash interface */
4017 bnx2_disable_nvram_access(bp);
4018 bnx2_release_nvram_lock(bp);
4019
4020 /* Increment written */
4021 written += data_end - data_start;
4022 }
4023
4024nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08004025 kfree(flash_buffer);
4026 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07004027 return rc;
4028}
4029
Michael Chan0d8a6572007-07-07 22:49:43 -07004030static void
4031bnx2_init_remote_phy(struct bnx2 *bp)
4032{
4033 u32 val;
4034
4035 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4036 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4037 return;
4038
4039 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4040 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4041 return;
4042
4043 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
Michael Chan0d8a6572007-07-07 22:49:43 -07004044 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4045
4046 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4047 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4048 bp->phy_port = PORT_FIBRE;
4049 else
4050 bp->phy_port = PORT_TP;
Michael Chan489310a2007-10-10 16:16:31 -07004051
4052 if (netif_running(bp->dev)) {
4053 u32 sig;
4054
4055 if (val & BNX2_LINK_STATUS_LINK_UP) {
4056 bp->link_up = 1;
4057 netif_carrier_on(bp->dev);
4058 } else {
4059 bp->link_up = 0;
4060 netif_carrier_off(bp->dev);
4061 }
4062 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4063 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4064 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4065 sig);
4066 }
Michael Chan0d8a6572007-07-07 22:49:43 -07004067 }
4068}
4069
Michael Chanb6016b72005-05-26 13:03:09 -07004070static int
4071bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4072{
4073 u32 val;
4074 int i, rc = 0;
Michael Chan489310a2007-10-10 16:16:31 -07004075 u8 old_port;
Michael Chanb6016b72005-05-26 13:03:09 -07004076
4077 /* Wait for the current PCI transaction to complete before
4078 * issuing a reset. */
4079 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4080 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4081 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4082 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4083 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4084 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4085 udelay(5);
4086
Michael Chanb090ae22006-01-23 16:07:10 -08004087 /* Wait for the firmware to tell us it is ok to issue a reset. */
4088 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4089
Michael Chanb6016b72005-05-26 13:03:09 -07004090 /* Deposit a driver reset signature so the firmware knows that
4091 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08004092 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07004093 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4094
Michael Chanb6016b72005-05-26 13:03:09 -07004095 /* Do a dummy read to force the chip to complete all current transaction
4096 * before we issue a reset. */
4097 val = REG_RD(bp, BNX2_MISC_ID);
4098
Michael Chan234754d2006-11-19 14:11:41 -08004099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4100 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4101 REG_RD(bp, BNX2_MISC_COMMAND);
4102 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07004103
Michael Chan234754d2006-11-19 14:11:41 -08004104 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4105 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07004106
Michael Chan234754d2006-11-19 14:11:41 -08004107 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004108
Michael Chan234754d2006-11-19 14:11:41 -08004109 } else {
4110 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4111 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4112 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4113
4114 /* Chip reset. */
4115 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4116
Michael Chan594a9df2007-08-28 15:39:42 -07004117 /* Reading back any register after chip reset will hang the
4118 * bus on 5706 A0 and A1. The msleep below provides plenty
4119 * of margin for write posting.
4120 */
Michael Chan234754d2006-11-19 14:11:41 -08004121 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
Arjan van de Ven8e545882007-08-28 14:34:43 -07004122 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4123 msleep(20);
Michael Chanb6016b72005-05-26 13:03:09 -07004124
Michael Chan234754d2006-11-19 14:11:41 -08004125 /* Reset takes approximate 30 usec */
4126 for (i = 0; i < 10; i++) {
4127 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4128 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4129 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4130 break;
4131 udelay(10);
4132 }
4133
4134 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4135 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4136 printk(KERN_ERR PFX "Chip reset did not complete\n");
4137 return -EBUSY;
4138 }
Michael Chanb6016b72005-05-26 13:03:09 -07004139 }
4140
4141 /* Make sure byte swapping is properly configured. */
4142 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4143 if (val != 0x01020304) {
4144 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4145 return -ENODEV;
4146 }
4147
Michael Chanb6016b72005-05-26 13:03:09 -07004148 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08004149 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4150 if (rc)
4151 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004152
Michael Chan0d8a6572007-07-07 22:49:43 -07004153 spin_lock_bh(&bp->phy_lock);
Michael Chan489310a2007-10-10 16:16:31 -07004154 old_port = bp->phy_port;
Michael Chan0d8a6572007-07-07 22:49:43 -07004155 bnx2_init_remote_phy(bp);
Michael Chan489310a2007-10-10 16:16:31 -07004156 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
Michael Chan0d8a6572007-07-07 22:49:43 -07004157 bnx2_set_default_remote_link(bp);
4158 spin_unlock_bh(&bp->phy_lock);
4159
Michael Chanb6016b72005-05-26 13:03:09 -07004160 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4161 /* Adjust the voltage regular to two steps lower. The default
4162 * of this register is 0x0000000e. */
4163 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4164
4165 /* Remove bad rbuf memory from the free pool. */
4166 rc = bnx2_alloc_bad_rbuf(bp);
4167 }
4168
4169 return rc;
4170}
4171
4172static int
4173bnx2_init_chip(struct bnx2 *bp)
4174{
4175 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08004176 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004177
4178 /* Make sure the interrupt is not active. */
4179 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4180
4181 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4182 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4183#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004184 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07004185#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004186 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07004187 DMA_READ_CHANS << 12 |
4188 DMA_WRITE_CHANS << 16;
4189
4190 val |= (0x2 << 20) | (1 << 11);
4191
Michael Chandda1e392006-01-23 16:08:14 -08004192 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07004193 val |= (1 << 23);
4194
4195 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4196 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4197 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4198
4199 REG_WR(bp, BNX2_DMA_CONFIG, val);
4200
4201 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4202 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4203 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4204 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4205 }
4206
4207 if (bp->flags & PCIX_FLAG) {
4208 u16 val16;
4209
4210 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4211 &val16);
4212 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4213 val16 & ~PCI_X_CMD_ERO);
4214 }
4215
4216 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4217 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4218 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4219 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4220
4221 /* Initialize context mapping and zero out the quick contexts. The
4222 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07004223 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4224 rc = bnx2_init_5709_context(bp);
4225 if (rc)
4226 return rc;
4227 } else
Michael Chan59b47d82006-11-19 14:10:45 -08004228 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004229
Michael Chanfba9fe92006-06-12 22:21:25 -07004230 if ((rc = bnx2_init_cpus(bp)) != 0)
4231 return rc;
4232
Michael Chanb6016b72005-05-26 13:03:09 -07004233 bnx2_init_nvram(bp);
4234
4235 bnx2_set_mac_addr(bp);
4236
4237 val = REG_RD(bp, BNX2_MQ_CONFIG);
4238 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4239 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07004240 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4241 val |= BNX2_MQ_CONFIG_HALT_DIS;
4242
Michael Chanb6016b72005-05-26 13:03:09 -07004243 REG_WR(bp, BNX2_MQ_CONFIG, val);
4244
4245 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4246 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4247 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4248
4249 val = (BCM_PAGE_BITS - 8) << 24;
4250 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4251
4252 /* Configure page size. */
4253 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4254 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4255 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4256 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4257
4258 val = bp->mac_addr[0] +
4259 (bp->mac_addr[1] << 8) +
4260 (bp->mac_addr[2] << 16) +
4261 bp->mac_addr[3] +
4262 (bp->mac_addr[4] << 8) +
4263 (bp->mac_addr[5] << 16);
4264 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4265
4266 /* Program the MTU. Also include 4 bytes for CRC32. */
4267 val = bp->dev->mtu + ETH_HLEN + 4;
4268 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4269 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4270 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4271
Michael Chan35efa7c2007-12-20 19:56:37 -08004272 bp->bnx2_napi.last_status_idx = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004273 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4274
4275 /* Set up how to generate a link change interrupt. */
4276 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4277
4278 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4279 (u64) bp->status_blk_mapping & 0xffffffff);
4280 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4281
4282 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4283 (u64) bp->stats_blk_mapping & 0xffffffff);
4284 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4285 (u64) bp->stats_blk_mapping >> 32);
4286
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004287 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07004288 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4289
4290 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4291 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4292
4293 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4294 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4295
4296 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4297
4298 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4299
4300 REG_WR(bp, BNX2_HC_COM_TICKS,
4301 (bp->com_ticks_int << 16) | bp->com_ticks);
4302
4303 REG_WR(bp, BNX2_HC_CMD_TICKS,
4304 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4305
Michael Chan02537b062007-06-04 21:24:07 -07004306 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4307 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4308 else
Michael Chan7ea69202007-07-16 18:27:10 -07004309 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
Michael Chanb6016b72005-05-26 13:03:09 -07004310 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4311
4312 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07004313 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004314 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004315 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4316 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004317 }
4318
Michael Chan8e6a72c2007-05-03 13:24:48 -07004319 if (bp->flags & ONE_SHOT_MSI_FLAG)
4320 val |= BNX2_HC_CONFIG_ONE_SHOT;
4321
4322 REG_WR(bp, BNX2_HC_CONFIG, val);
4323
Michael Chanb6016b72005-05-26 13:03:09 -07004324 /* Clear internal stats counters. */
4325 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4326
Michael Chanda3e4fb2007-05-03 13:24:23 -07004327 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07004328
4329 /* Initialize the receive filter. */
4330 bnx2_set_rx_mode(bp->dev);
4331
Michael Chan0aa38df2007-06-04 21:23:06 -07004332 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4333 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4334 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4335 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4336 }
Michael Chanb090ae22006-01-23 16:07:10 -08004337 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4338 0);
Michael Chanb6016b72005-05-26 13:03:09 -07004339
Michael Chandf149d72007-07-07 22:51:36 -07004340 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
Michael Chanb6016b72005-05-26 13:03:09 -07004341 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4342
4343 udelay(20);
4344
Michael Chanbf5295b2006-03-23 01:11:56 -08004345 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4346
Michael Chanb090ae22006-01-23 16:07:10 -08004347 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004348}
4349
Michael Chan59b47d82006-11-19 14:10:45 -08004350static void
4351bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4352{
4353 u32 val, offset0, offset1, offset2, offset3;
4354
4355 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4356 offset0 = BNX2_L2CTX_TYPE_XI;
4357 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4358 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4359 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4360 } else {
4361 offset0 = BNX2_L2CTX_TYPE;
4362 offset1 = BNX2_L2CTX_CMD_TYPE;
4363 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4364 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4365 }
4366 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4367 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4368
4369 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4370 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4371
4372 val = (u64) bp->tx_desc_mapping >> 32;
4373 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4374
4375 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4376 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4377}
Michael Chanb6016b72005-05-26 13:03:09 -07004378
4379static void
4380bnx2_init_tx_ring(struct bnx2 *bp)
4381{
4382 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08004383 u32 cid;
Michael Chana550c992007-12-20 19:56:59 -08004384 struct bnx2_napi *bnapi = &bp->bnx2_napi;
Michael Chanb6016b72005-05-26 13:03:09 -07004385
Michael Chan2f8af122006-08-15 01:39:10 -07004386 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4387
Michael Chanb6016b72005-05-26 13:03:09 -07004388 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004389
Michael Chanb6016b72005-05-26 13:03:09 -07004390 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4391 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4392
4393 bp->tx_prod = 0;
Michael Chana550c992007-12-20 19:56:59 -08004394 bnapi->tx_cons = 0;
4395 bnapi->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004396 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004397
Michael Chan59b47d82006-11-19 14:10:45 -08004398 cid = TX_CID;
4399 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4400 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07004401
Michael Chan59b47d82006-11-19 14:10:45 -08004402 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07004403}
4404
4405static void
Michael Chan5d5d0012007-12-12 11:17:43 -08004406bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4407 int num_rings)
Michael Chanb6016b72005-05-26 13:03:09 -07004408{
Michael Chanb6016b72005-05-26 13:03:09 -07004409 int i;
Michael Chan5d5d0012007-12-12 11:17:43 -08004410 struct rx_bd *rxbd;
Michael Chanb6016b72005-05-26 13:03:09 -07004411
Michael Chan5d5d0012007-12-12 11:17:43 -08004412 for (i = 0; i < num_rings; i++) {
Michael Chan13daffa2006-03-20 17:49:20 -08004413 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07004414
Michael Chan5d5d0012007-12-12 11:17:43 -08004415 rxbd = &rx_ring[i][0];
Michael Chan13daffa2006-03-20 17:49:20 -08004416 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
Michael Chan5d5d0012007-12-12 11:17:43 -08004417 rxbd->rx_bd_len = buf_size;
Michael Chan13daffa2006-03-20 17:49:20 -08004418 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4419 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004420 if (i == (num_rings - 1))
Michael Chan13daffa2006-03-20 17:49:20 -08004421 j = 0;
4422 else
4423 j = i + 1;
Michael Chan5d5d0012007-12-12 11:17:43 -08004424 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4425 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
Michael Chan13daffa2006-03-20 17:49:20 -08004426 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004427}
4428
4429static void
4430bnx2_init_rx_ring(struct bnx2 *bp)
4431{
4432 int i;
4433 u16 prod, ring_prod;
4434 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4435
4436 bp->rx_prod = 0;
4437 bp->rx_cons = 0;
4438 bp->rx_prod_bseq = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08004439 bp->rx_pg_prod = 0;
4440 bp->rx_pg_cons = 0;
Michael Chan5d5d0012007-12-12 11:17:43 -08004441
4442 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4443 bp->rx_buf_use_size, bp->rx_max_ring);
4444
4445 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
Michael Chan47bf4242007-12-12 11:19:12 -08004446 if (bp->rx_pg_ring_size) {
4447 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4448 bp->rx_pg_desc_mapping,
4449 PAGE_SIZE, bp->rx_max_pg_ring);
4450 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4451 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4452 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4453 BNX2_L2CTX_RBDC_JUMBO_KEY);
4454
4455 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4456 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4457
4458 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4459 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4460
4461 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4462 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4463 }
Michael Chanb6016b72005-05-26 13:03:09 -07004464
4465 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4466 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4467 val |= 0x02 << 8;
Michael Chan5d5d0012007-12-12 11:17:43 -08004468 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004469
Michael Chan13daffa2006-03-20 17:49:20 -08004470 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chan5d5d0012007-12-12 11:17:43 -08004471 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004472
Michael Chan13daffa2006-03-20 17:49:20 -08004473 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chan5d5d0012007-12-12 11:17:43 -08004474 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
Michael Chanb6016b72005-05-26 13:03:09 -07004475
Michael Chan47bf4242007-12-12 11:19:12 -08004476 ring_prod = prod = bp->rx_pg_prod;
4477 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4478 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4479 break;
4480 prod = NEXT_RX_BD(prod);
4481 ring_prod = RX_PG_RING_IDX(prod);
4482 }
4483 bp->rx_pg_prod = prod;
4484
Michael Chan5d5d0012007-12-12 11:17:43 -08004485 ring_prod = prod = bp->rx_prod;
Michael Chan236b6392006-03-20 17:49:02 -08004486 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004487 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4488 break;
4489 }
4490 prod = NEXT_RX_BD(prod);
4491 ring_prod = RX_RING_IDX(prod);
4492 }
4493 bp->rx_prod = prod;
4494
Michael Chan47bf4242007-12-12 11:19:12 -08004495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
Michael Chanb6016b72005-05-26 13:03:09 -07004496 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4497
4498 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4499}
4500
Michael Chan5d5d0012007-12-12 11:17:43 -08004501static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
Michael Chan13daffa2006-03-20 17:49:20 -08004502{
Michael Chan5d5d0012007-12-12 11:17:43 -08004503 u32 max, num_rings = 1;
Michael Chan13daffa2006-03-20 17:49:20 -08004504
Michael Chan5d5d0012007-12-12 11:17:43 -08004505 while (ring_size > MAX_RX_DESC_CNT) {
4506 ring_size -= MAX_RX_DESC_CNT;
Michael Chan13daffa2006-03-20 17:49:20 -08004507 num_rings++;
4508 }
4509 /* round to next power of 2 */
Michael Chan5d5d0012007-12-12 11:17:43 -08004510 max = max_size;
Michael Chan13daffa2006-03-20 17:49:20 -08004511 while ((max & num_rings) == 0)
4512 max >>= 1;
4513
4514 if (num_rings != max)
4515 max <<= 1;
4516
Michael Chan5d5d0012007-12-12 11:17:43 -08004517 return max;
4518}
4519
4520static void
4521bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4522{
Michael Chan84eaa182007-12-12 11:19:57 -08004523 u32 rx_size, rx_space, jumbo_size;
Michael Chan5d5d0012007-12-12 11:17:43 -08004524
4525 /* 8 for CRC and VLAN */
4526 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4527
Michael Chan84eaa182007-12-12 11:19:57 -08004528 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4529 sizeof(struct skb_shared_info);
4530
Michael Chan5d5d0012007-12-12 11:17:43 -08004531 bp->rx_copy_thresh = RX_COPY_THRESH;
Michael Chan47bf4242007-12-12 11:19:12 -08004532 bp->rx_pg_ring_size = 0;
4533 bp->rx_max_pg_ring = 0;
4534 bp->rx_max_pg_ring_idx = 0;
Michael Chan84eaa182007-12-12 11:19:57 -08004535 if (rx_space > PAGE_SIZE) {
4536 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4537
4538 jumbo_size = size * pages;
4539 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4540 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4541
4542 bp->rx_pg_ring_size = jumbo_size;
4543 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4544 MAX_RX_PG_RINGS);
4545 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4546 rx_size = RX_COPY_THRESH + bp->rx_offset;
4547 bp->rx_copy_thresh = 0;
4548 }
Michael Chan5d5d0012007-12-12 11:17:43 -08004549
4550 bp->rx_buf_use_size = rx_size;
4551 /* hw alignment */
4552 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chan1db82f22007-12-12 11:19:35 -08004553 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
Michael Chan5d5d0012007-12-12 11:17:43 -08004554 bp->rx_ring_size = size;
4555 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
Michael Chan13daffa2006-03-20 17:49:20 -08004556 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4557}
4558
4559static void
Michael Chanb6016b72005-05-26 13:03:09 -07004560bnx2_free_tx_skbs(struct bnx2 *bp)
4561{
4562 int i;
4563
4564 if (bp->tx_buf_ring == NULL)
4565 return;
4566
4567 for (i = 0; i < TX_DESC_CNT; ) {
4568 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4569 struct sk_buff *skb = tx_buf->skb;
4570 int j, last;
4571
4572 if (skb == NULL) {
4573 i++;
4574 continue;
4575 }
4576
4577 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4578 skb_headlen(skb), PCI_DMA_TODEVICE);
4579
4580 tx_buf->skb = NULL;
4581
4582 last = skb_shinfo(skb)->nr_frags;
4583 for (j = 0; j < last; j++) {
4584 tx_buf = &bp->tx_buf_ring[i + j + 1];
4585 pci_unmap_page(bp->pdev,
4586 pci_unmap_addr(tx_buf, mapping),
4587 skb_shinfo(skb)->frags[j].size,
4588 PCI_DMA_TODEVICE);
4589 }
Michael Chan745720e2006-06-29 12:37:41 -07004590 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004591 i += j + 1;
4592 }
4593
4594}
4595
4596static void
4597bnx2_free_rx_skbs(struct bnx2 *bp)
4598{
4599 int i;
4600
4601 if (bp->rx_buf_ring == NULL)
4602 return;
4603
Michael Chan13daffa2006-03-20 17:49:20 -08004604 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004605 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4606 struct sk_buff *skb = rx_buf->skb;
4607
Michael Chan05d0f1c2005-11-04 08:53:48 -08004608 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004609 continue;
4610
4611 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4612 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4613
4614 rx_buf->skb = NULL;
4615
Michael Chan745720e2006-06-29 12:37:41 -07004616 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004617 }
Michael Chan47bf4242007-12-12 11:19:12 -08004618 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4619 bnx2_free_rx_page(bp, i);
Michael Chanb6016b72005-05-26 13:03:09 -07004620}
4621
4622static void
4623bnx2_free_skbs(struct bnx2 *bp)
4624{
4625 bnx2_free_tx_skbs(bp);
4626 bnx2_free_rx_skbs(bp);
4627}
4628
4629static int
4630bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4631{
4632 int rc;
4633
4634 rc = bnx2_reset_chip(bp, reset_code);
4635 bnx2_free_skbs(bp);
4636 if (rc)
4637 return rc;
4638
Michael Chanfba9fe92006-06-12 22:21:25 -07004639 if ((rc = bnx2_init_chip(bp)) != 0)
4640 return rc;
4641
Michael Chanb6016b72005-05-26 13:03:09 -07004642 bnx2_init_tx_ring(bp);
4643 bnx2_init_rx_ring(bp);
4644 return 0;
4645}
4646
4647static int
4648bnx2_init_nic(struct bnx2 *bp)
4649{
4650 int rc;
4651
4652 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4653 return rc;
4654
Michael Chan80be4432006-11-19 14:07:28 -08004655 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004656 bnx2_init_phy(bp);
4657 bnx2_set_link(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07004658 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004659 return 0;
4660}
4661
4662static int
4663bnx2_test_registers(struct bnx2 *bp)
4664{
4665 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004666 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004667 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004668 u16 offset;
4669 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004670#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004671 u32 rw_mask;
4672 u32 ro_mask;
4673 } reg_tbl[] = {
4674 { 0x006c, 0, 0x00000000, 0x0000003f },
4675 { 0x0090, 0, 0xffffffff, 0x00000000 },
4676 { 0x0094, 0, 0x00000000, 0x00000000 },
4677
Michael Chan5bae30c2007-05-03 13:18:46 -07004678 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4679 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4680 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4681 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4682 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4683 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4684 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4685 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4686 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004687
Michael Chan5bae30c2007-05-03 13:18:46 -07004688 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4689 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4690 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4691 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4692 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4693 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004694
Michael Chan5bae30c2007-05-03 13:18:46 -07004695 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4696 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4697 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004698
4699 { 0x1000, 0, 0x00000000, 0x00000001 },
4700 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004701
4702 { 0x1408, 0, 0x01c00800, 0x00000000 },
4703 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4704 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004705 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004706 { 0x14b0, 0, 0x00000002, 0x00000001 },
4707 { 0x14b8, 0, 0x00000000, 0x00000000 },
4708 { 0x14c0, 0, 0x00000000, 0x00000009 },
4709 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4710 { 0x14cc, 0, 0x00000000, 0x00000001 },
4711 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004712
4713 { 0x1800, 0, 0x00000000, 0x00000001 },
4714 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004715
4716 { 0x2800, 0, 0x00000000, 0x00000001 },
4717 { 0x2804, 0, 0x00000000, 0x00003f01 },
4718 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4719 { 0x2810, 0, 0xffff0000, 0x00000000 },
4720 { 0x2814, 0, 0xffff0000, 0x00000000 },
4721 { 0x2818, 0, 0xffff0000, 0x00000000 },
4722 { 0x281c, 0, 0xffff0000, 0x00000000 },
4723 { 0x2834, 0, 0xffffffff, 0x00000000 },
4724 { 0x2840, 0, 0x00000000, 0xffffffff },
4725 { 0x2844, 0, 0x00000000, 0xffffffff },
4726 { 0x2848, 0, 0xffffffff, 0x00000000 },
4727 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4728
4729 { 0x2c00, 0, 0x00000000, 0x00000011 },
4730 { 0x2c04, 0, 0x00000000, 0x00030007 },
4731
Michael Chanb6016b72005-05-26 13:03:09 -07004732 { 0x3c00, 0, 0x00000000, 0x00000001 },
4733 { 0x3c04, 0, 0x00000000, 0x00070000 },
4734 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4735 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4736 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4737 { 0x3c14, 0, 0x00000000, 0xffffffff },
4738 { 0x3c18, 0, 0x00000000, 0xffffffff },
4739 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4740 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004741
4742 { 0x5004, 0, 0x00000000, 0x0000007f },
4743 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004744
Michael Chanb6016b72005-05-26 13:03:09 -07004745 { 0x5c00, 0, 0x00000000, 0x00000001 },
4746 { 0x5c04, 0, 0x00000000, 0x0003000f },
4747 { 0x5c08, 0, 0x00000003, 0x00000000 },
4748 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4749 { 0x5c10, 0, 0x00000000, 0xffffffff },
4750 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4751 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4752 { 0x5c88, 0, 0x00000000, 0x00077373 },
4753 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4754
4755 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4756 { 0x680c, 0, 0xffffffff, 0x00000000 },
4757 { 0x6810, 0, 0xffffffff, 0x00000000 },
4758 { 0x6814, 0, 0xffffffff, 0x00000000 },
4759 { 0x6818, 0, 0xffffffff, 0x00000000 },
4760 { 0x681c, 0, 0xffffffff, 0x00000000 },
4761 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4762 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4763 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4764 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4765 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4766 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4767 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4768 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4769 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4770 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4771 { 0x684c, 0, 0xffffffff, 0x00000000 },
4772 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4773 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4774 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4775 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4776 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4777 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4778
4779 { 0xffff, 0, 0x00000000, 0x00000000 },
4780 };
4781
4782 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004783 is_5709 = 0;
4784 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4785 is_5709 = 1;
4786
Michael Chanb6016b72005-05-26 13:03:09 -07004787 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4788 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004789 u16 flags = reg_tbl[i].flags;
4790
4791 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4792 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004793
4794 offset = (u32) reg_tbl[i].offset;
4795 rw_mask = reg_tbl[i].rw_mask;
4796 ro_mask = reg_tbl[i].ro_mask;
4797
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004798 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004799
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004800 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004801
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004802 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004803 if ((val & rw_mask) != 0) {
4804 goto reg_test_err;
4805 }
4806
4807 if ((val & ro_mask) != (save_val & ro_mask)) {
4808 goto reg_test_err;
4809 }
4810
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004811 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004812
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004813 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004814 if ((val & rw_mask) != rw_mask) {
4815 goto reg_test_err;
4816 }
4817
4818 if ((val & ro_mask) != (save_val & ro_mask)) {
4819 goto reg_test_err;
4820 }
4821
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004822 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004823 continue;
4824
4825reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004826 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004827 ret = -ENODEV;
4828 break;
4829 }
4830 return ret;
4831}
4832
4833static int
4834bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4835{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004836 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004837 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4838 int i;
4839
4840 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4841 u32 offset;
4842
4843 for (offset = 0; offset < size; offset += 4) {
4844
4845 REG_WR_IND(bp, start + offset, test_pattern[i]);
4846
4847 if (REG_RD_IND(bp, start + offset) !=
4848 test_pattern[i]) {
4849 return -ENODEV;
4850 }
4851 }
4852 }
4853 return 0;
4854}
4855
4856static int
4857bnx2_test_memory(struct bnx2 *bp)
4858{
4859 int ret = 0;
4860 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004861 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004862 u32 offset;
4863 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004864 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004865 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004866 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004867 { 0xe0000, 0x4000 },
4868 { 0x120000, 0x4000 },
4869 { 0x1a0000, 0x4000 },
4870 { 0x160000, 0x4000 },
4871 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004872 },
4873 mem_tbl_5709[] = {
4874 { 0x60000, 0x4000 },
4875 { 0xa0000, 0x3000 },
4876 { 0xe0000, 0x4000 },
4877 { 0x120000, 0x4000 },
4878 { 0x1a0000, 0x4000 },
4879 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004880 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004881 struct mem_entry *mem_tbl;
4882
4883 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4884 mem_tbl = mem_tbl_5709;
4885 else
4886 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004887
4888 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4889 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4890 mem_tbl[i].len)) != 0) {
4891 return ret;
4892 }
4893 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004894
Michael Chanb6016b72005-05-26 13:03:09 -07004895 return ret;
4896}
4897
Michael Chanbc5a0692006-01-23 16:13:22 -08004898#define BNX2_MAC_LOOPBACK 0
4899#define BNX2_PHY_LOOPBACK 1
4900
Michael Chanb6016b72005-05-26 13:03:09 -07004901static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004902bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004903{
4904 unsigned int pkt_size, num_pkts, i;
4905 struct sk_buff *skb, *rx_skb;
4906 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004907 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004908 dma_addr_t map;
4909 struct tx_bd *txbd;
4910 struct sw_bd *rx_buf;
4911 struct l2_fhdr *rx_hdr;
4912 int ret = -ENODEV;
Michael Chan35efa7c2007-12-20 19:56:37 -08004913 struct bnx2_napi *bnapi = &bp->bnx2_napi;
Michael Chanb6016b72005-05-26 13:03:09 -07004914
Michael Chanbc5a0692006-01-23 16:13:22 -08004915 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4916 bp->loopback = MAC_LOOPBACK;
4917 bnx2_set_mac_loopback(bp);
4918 }
4919 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan489310a2007-10-10 16:16:31 -07004920 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4921 return 0;
4922
Michael Chan80be4432006-11-19 14:07:28 -08004923 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004924 bnx2_set_phy_loopback(bp);
4925 }
4926 else
4927 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004928
Michael Chan84eaa182007-12-12 11:19:57 -08004929 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
Michael Chan932f3772006-08-15 01:39:36 -07004930 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004931 if (!skb)
4932 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004933 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004934 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004935 memset(packet + 6, 0x0, 8);
4936 for (i = 14; i < pkt_size; i++)
4937 packet[i] = (unsigned char) (i & 0xff);
4938
4939 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4940 PCI_DMA_TODEVICE);
4941
Michael Chanbf5295b2006-03-23 01:11:56 -08004942 REG_WR(bp, BNX2_HC_COMMAND,
4943 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4944
Michael Chanb6016b72005-05-26 13:03:09 -07004945 REG_RD(bp, BNX2_HC_COMMAND);
4946
4947 udelay(5);
Michael Chan35efa7c2007-12-20 19:56:37 -08004948 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
Michael Chanb6016b72005-05-26 13:03:09 -07004949
Michael Chanb6016b72005-05-26 13:03:09 -07004950 num_pkts = 0;
4951
Michael Chanbc5a0692006-01-23 16:13:22 -08004952 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004953
4954 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4955 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4956 txbd->tx_bd_mss_nbytes = pkt_size;
4957 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4958
4959 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004960 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4961 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004962
Michael Chan234754d2006-11-19 14:11:41 -08004963 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4964 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004965
4966 udelay(100);
4967
Michael Chanbf5295b2006-03-23 01:11:56 -08004968 REG_WR(bp, BNX2_HC_COMMAND,
4969 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4970
Michael Chanb6016b72005-05-26 13:03:09 -07004971 REG_RD(bp, BNX2_HC_COMMAND);
4972
4973 udelay(5);
4974
4975 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004976 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004977
Michael Chan35efa7c2007-12-20 19:56:37 -08004978 if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
Michael Chanb6016b72005-05-26 13:03:09 -07004979 goto loopback_test_done;
Michael Chanb6016b72005-05-26 13:03:09 -07004980
Michael Chan35efa7c2007-12-20 19:56:37 -08004981 rx_idx = bnx2_get_hw_rx_cons(bnapi);
Michael Chanb6016b72005-05-26 13:03:09 -07004982 if (rx_idx != rx_start_idx + num_pkts) {
4983 goto loopback_test_done;
4984 }
4985
4986 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4987 rx_skb = rx_buf->skb;
4988
4989 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4990 skb_reserve(rx_skb, bp->rx_offset);
4991
4992 pci_dma_sync_single_for_cpu(bp->pdev,
4993 pci_unmap_addr(rx_buf, mapping),
4994 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4995
Michael Chanade2bfe2006-01-23 16:09:51 -08004996 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004997 (L2_FHDR_ERRORS_BAD_CRC |
4998 L2_FHDR_ERRORS_PHY_DECODE |
4999 L2_FHDR_ERRORS_ALIGNMENT |
5000 L2_FHDR_ERRORS_TOO_SHORT |
5001 L2_FHDR_ERRORS_GIANT_FRAME)) {
5002
5003 goto loopback_test_done;
5004 }
5005
5006 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5007 goto loopback_test_done;
5008 }
5009
5010 for (i = 14; i < pkt_size; i++) {
5011 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5012 goto loopback_test_done;
5013 }
5014 }
5015
5016 ret = 0;
5017
5018loopback_test_done:
5019 bp->loopback = 0;
5020 return ret;
5021}
5022
Michael Chanbc5a0692006-01-23 16:13:22 -08005023#define BNX2_MAC_LOOPBACK_FAILED 1
5024#define BNX2_PHY_LOOPBACK_FAILED 2
5025#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5026 BNX2_PHY_LOOPBACK_FAILED)
5027
5028static int
5029bnx2_test_loopback(struct bnx2 *bp)
5030{
5031 int rc = 0;
5032
5033 if (!netif_running(bp->dev))
5034 return BNX2_LOOPBACK_FAILED;
5035
5036 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5037 spin_lock_bh(&bp->phy_lock);
5038 bnx2_init_phy(bp);
5039 spin_unlock_bh(&bp->phy_lock);
5040 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5041 rc |= BNX2_MAC_LOOPBACK_FAILED;
5042 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5043 rc |= BNX2_PHY_LOOPBACK_FAILED;
5044 return rc;
5045}
5046
Michael Chanb6016b72005-05-26 13:03:09 -07005047#define NVRAM_SIZE 0x200
5048#define CRC32_RESIDUAL 0xdebb20e3
5049
5050static int
5051bnx2_test_nvram(struct bnx2 *bp)
5052{
5053 u32 buf[NVRAM_SIZE / 4];
5054 u8 *data = (u8 *) buf;
5055 int rc = 0;
5056 u32 magic, csum;
5057
5058 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5059 goto test_nvram_done;
5060
5061 magic = be32_to_cpu(buf[0]);
5062 if (magic != 0x669955aa) {
5063 rc = -ENODEV;
5064 goto test_nvram_done;
5065 }
5066
5067 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5068 goto test_nvram_done;
5069
5070 csum = ether_crc_le(0x100, data);
5071 if (csum != CRC32_RESIDUAL) {
5072 rc = -ENODEV;
5073 goto test_nvram_done;
5074 }
5075
5076 csum = ether_crc_le(0x100, data + 0x100);
5077 if (csum != CRC32_RESIDUAL) {
5078 rc = -ENODEV;
5079 }
5080
5081test_nvram_done:
5082 return rc;
5083}
5084
5085static int
5086bnx2_test_link(struct bnx2 *bp)
5087{
5088 u32 bmsr;
5089
Michael Chan489310a2007-10-10 16:16:31 -07005090 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5091 if (bp->link_up)
5092 return 0;
5093 return -ENODEV;
5094 }
Michael Chanc770a652005-08-25 15:38:39 -07005095 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07005096 bnx2_enable_bmsr1(bp);
5097 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5098 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5099 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07005100 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005101
Michael Chanb6016b72005-05-26 13:03:09 -07005102 if (bmsr & BMSR_LSTATUS) {
5103 return 0;
5104 }
5105 return -ENODEV;
5106}
5107
5108static int
5109bnx2_test_intr(struct bnx2 *bp)
5110{
5111 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07005112 u16 status_idx;
5113
5114 if (!netif_running(bp->dev))
5115 return -ENODEV;
5116
5117 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5118
5119 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08005120 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07005121 REG_RD(bp, BNX2_HC_COMMAND);
5122
5123 for (i = 0; i < 10; i++) {
5124 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5125 status_idx) {
5126
5127 break;
5128 }
5129
5130 msleep_interruptible(10);
5131 }
5132 if (i < 10)
5133 return 0;
5134
5135 return -ENODEV;
5136}
5137
5138static void
Michael Chan48b01e22006-11-19 14:08:00 -08005139bnx2_5706_serdes_timer(struct bnx2 *bp)
5140{
5141 spin_lock(&bp->phy_lock);
5142 if (bp->serdes_an_pending)
5143 bp->serdes_an_pending--;
5144 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5145 u32 bmcr;
5146
5147 bp->current_interval = bp->timer_interval;
5148
Michael Chanca58c3a2007-05-03 13:22:52 -07005149 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005150
5151 if (bmcr & BMCR_ANENABLE) {
5152 u32 phy1, phy2;
5153
5154 bnx2_write_phy(bp, 0x1c, 0x7c00);
5155 bnx2_read_phy(bp, 0x1c, &phy1);
5156
5157 bnx2_write_phy(bp, 0x17, 0x0f01);
5158 bnx2_read_phy(bp, 0x15, &phy2);
5159 bnx2_write_phy(bp, 0x17, 0x0f01);
5160 bnx2_read_phy(bp, 0x15, &phy2);
5161
5162 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5163 !(phy2 & 0x20)) { /* no CONFIG */
5164
5165 bmcr &= ~BMCR_ANENABLE;
5166 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07005167 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005168 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5169 }
5170 }
5171 }
5172 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5173 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5174 u32 phy2;
5175
5176 bnx2_write_phy(bp, 0x17, 0x0f01);
5177 bnx2_read_phy(bp, 0x15, &phy2);
5178 if (phy2 & 0x20) {
5179 u32 bmcr;
5180
Michael Chanca58c3a2007-05-03 13:22:52 -07005181 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005182 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07005183 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08005184
5185 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5186 }
5187 } else
5188 bp->current_interval = bp->timer_interval;
5189
5190 spin_unlock(&bp->phy_lock);
5191}
5192
5193static void
Michael Chanf8dd0642006-11-19 14:08:29 -08005194bnx2_5708_serdes_timer(struct bnx2 *bp)
5195{
Michael Chan0d8a6572007-07-07 22:49:43 -07005196 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5197 return;
5198
Michael Chanf8dd0642006-11-19 14:08:29 -08005199 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5200 bp->serdes_an_pending = 0;
5201 return;
5202 }
5203
5204 spin_lock(&bp->phy_lock);
5205 if (bp->serdes_an_pending)
5206 bp->serdes_an_pending--;
5207 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5208 u32 bmcr;
5209
Michael Chanca58c3a2007-05-03 13:22:52 -07005210 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08005211 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07005212 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08005213 bp->current_interval = SERDES_FORCED_TIMEOUT;
5214 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07005215 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08005216 bp->serdes_an_pending = 2;
5217 bp->current_interval = bp->timer_interval;
5218 }
5219
5220 } else
5221 bp->current_interval = bp->timer_interval;
5222
5223 spin_unlock(&bp->phy_lock);
5224}
5225
5226static void
Michael Chanb6016b72005-05-26 13:03:09 -07005227bnx2_timer(unsigned long data)
5228{
5229 struct bnx2 *bp = (struct bnx2 *) data;
Michael Chanb6016b72005-05-26 13:03:09 -07005230
Michael Chancd339a02005-08-25 15:35:24 -07005231 if (!netif_running(bp->dev))
5232 return;
5233
Michael Chanb6016b72005-05-26 13:03:09 -07005234 if (atomic_read(&bp->intr_sem) != 0)
5235 goto bnx2_restart_timer;
5236
Michael Chandf149d72007-07-07 22:51:36 -07005237 bnx2_send_heart_beat(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005238
Michael Chancea94db2006-06-12 22:16:13 -07005239 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5240
Michael Chan02537b062007-06-04 21:24:07 -07005241 /* workaround occasional corrupted counters */
5242 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5243 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5244 BNX2_HC_COMMAND_STATS_NOW);
5245
Michael Chanf8dd0642006-11-19 14:08:29 -08005246 if (bp->phy_flags & PHY_SERDES_FLAG) {
5247 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5248 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07005249 else
Michael Chanf8dd0642006-11-19 14:08:29 -08005250 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005251 }
5252
5253bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07005254 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005255}
5256
Michael Chan8e6a72c2007-05-03 13:24:48 -07005257static int
5258bnx2_request_irq(struct bnx2 *bp)
5259{
5260 struct net_device *dev = bp->dev;
Michael Chan6d866ff2007-12-20 19:56:09 -08005261 unsigned long flags;
5262 struct bnx2_irq *irq = &bp->irq_tbl[0];
5263 int rc;
Michael Chan8e6a72c2007-05-03 13:24:48 -07005264
Michael Chan6d866ff2007-12-20 19:56:09 -08005265 if (bp->flags & USING_MSI_FLAG)
5266 flags = 0;
5267 else
5268 flags = IRQF_SHARED;
5269 rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005270 return rc;
5271}
5272
5273static void
5274bnx2_free_irq(struct bnx2 *bp)
5275{
5276 struct net_device *dev = bp->dev;
5277
Michael Chan6d866ff2007-12-20 19:56:09 -08005278 free_irq(bp->irq_tbl[0].vector, dev);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005279 if (bp->flags & USING_MSI_FLAG) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07005280 pci_disable_msi(bp->pdev);
5281 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
Michael Chan6d866ff2007-12-20 19:56:09 -08005282 }
5283}
5284
5285static void
5286bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5287{
5288 bp->irq_tbl[0].handler = bnx2_interrupt;
5289 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5290
5291 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5292 if (pci_enable_msi(bp->pdev) == 0) {
5293 bp->flags |= USING_MSI_FLAG;
5294 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5295 bp->flags |= ONE_SHOT_MSI_FLAG;
5296 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5297 } else
5298 bp->irq_tbl[0].handler = bnx2_msi;
5299 }
5300 }
5301
5302 bp->irq_tbl[0].vector = bp->pdev->irq;
Michael Chan8e6a72c2007-05-03 13:24:48 -07005303}
5304
Michael Chanb6016b72005-05-26 13:03:09 -07005305/* Called with rtnl_lock */
5306static int
5307bnx2_open(struct net_device *dev)
5308{
Michael Chan972ec0d2006-01-23 16:12:43 -08005309 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005310 int rc;
5311
Michael Chan1b2f9222007-05-03 13:20:19 -07005312 netif_carrier_off(dev);
5313
Pavel Machek829ca9a2005-09-03 15:56:56 -07005314 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005315 bnx2_disable_int(bp);
5316
5317 rc = bnx2_alloc_mem(bp);
5318 if (rc)
5319 return rc;
5320
Michael Chan6d866ff2007-12-20 19:56:09 -08005321 bnx2_setup_int_mode(bp, disable_msi);
Michael Chan35efa7c2007-12-20 19:56:37 -08005322 bnx2_napi_enable(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005323 rc = bnx2_request_irq(bp);
5324
Michael Chanb6016b72005-05-26 13:03:09 -07005325 if (rc) {
Michael Chan35efa7c2007-12-20 19:56:37 -08005326 bnx2_napi_disable(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005327 bnx2_free_mem(bp);
5328 return rc;
5329 }
5330
5331 rc = bnx2_init_nic(bp);
5332
5333 if (rc) {
Michael Chan35efa7c2007-12-20 19:56:37 -08005334 bnx2_napi_disable(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005335 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005336 bnx2_free_skbs(bp);
5337 bnx2_free_mem(bp);
5338 return rc;
5339 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005340
Michael Chancd339a02005-08-25 15:35:24 -07005341 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005342
5343 atomic_set(&bp->intr_sem, 0);
5344
5345 bnx2_enable_int(bp);
5346
5347 if (bp->flags & USING_MSI_FLAG) {
5348 /* Test MSI to make sure it is working
5349 * If MSI test fails, go back to INTx mode
5350 */
5351 if (bnx2_test_intr(bp) != 0) {
5352 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5353 " using MSI, switching to INTx mode. Please"
5354 " report this failure to the PCI maintainer"
5355 " and include system chipset information.\n",
5356 bp->dev->name);
5357
5358 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005359 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005360
Michael Chan6d866ff2007-12-20 19:56:09 -08005361 bnx2_setup_int_mode(bp, 1);
5362
Michael Chanb6016b72005-05-26 13:03:09 -07005363 rc = bnx2_init_nic(bp);
5364
Michael Chan8e6a72c2007-05-03 13:24:48 -07005365 if (!rc)
5366 rc = bnx2_request_irq(bp);
5367
Michael Chanb6016b72005-05-26 13:03:09 -07005368 if (rc) {
Michael Chan35efa7c2007-12-20 19:56:37 -08005369 bnx2_napi_disable(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005370 bnx2_free_skbs(bp);
5371 bnx2_free_mem(bp);
5372 del_timer_sync(&bp->timer);
5373 return rc;
5374 }
5375 bnx2_enable_int(bp);
5376 }
5377 }
5378 if (bp->flags & USING_MSI_FLAG) {
5379 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5380 }
5381
5382 netif_start_queue(dev);
5383
5384 return 0;
5385}
5386
5387static void
David Howellsc4028952006-11-22 14:57:56 +00005388bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07005389{
David Howellsc4028952006-11-22 14:57:56 +00005390 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005391
Michael Chanafdc08b2005-08-25 15:34:29 -07005392 if (!netif_running(bp->dev))
5393 return;
5394
5395 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07005396 bnx2_netif_stop(bp);
5397
5398 bnx2_init_nic(bp);
5399
5400 atomic_set(&bp->intr_sem, 1);
5401 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07005402 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005403}
5404
5405static void
5406bnx2_tx_timeout(struct net_device *dev)
5407{
Michael Chan972ec0d2006-01-23 16:12:43 -08005408 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005409
5410 /* This allows the netif to be shutdown gracefully before resetting */
5411 schedule_work(&bp->reset_task);
5412}
5413
5414#ifdef BCM_VLAN
5415/* Called with rtnl_lock */
5416static void
5417bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5418{
Michael Chan972ec0d2006-01-23 16:12:43 -08005419 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005420
5421 bnx2_netif_stop(bp);
5422
5423 bp->vlgrp = vlgrp;
5424 bnx2_set_rx_mode(dev);
5425
5426 bnx2_netif_start(bp);
5427}
Michael Chanb6016b72005-05-26 13:03:09 -07005428#endif
5429
Herbert Xu932ff272006-06-09 12:20:56 -07005430/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07005431 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5432 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07005433 */
5434static int
5435bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5436{
Michael Chan972ec0d2006-01-23 16:12:43 -08005437 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005438 dma_addr_t mapping;
5439 struct tx_bd *txbd;
5440 struct sw_bd *tx_buf;
5441 u32 len, vlan_tag_flags, last_frag, mss;
5442 u16 prod, ring_prod;
5443 int i;
Michael Chana550c992007-12-20 19:56:59 -08005444 struct bnx2_napi *bnapi = &bp->bnx2_napi;
Michael Chanb6016b72005-05-26 13:03:09 -07005445
Michael Chana550c992007-12-20 19:56:59 -08005446 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5447 (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07005448 netif_stop_queue(dev);
5449 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5450 dev->name);
5451
5452 return NETDEV_TX_BUSY;
5453 }
5454 len = skb_headlen(skb);
5455 prod = bp->tx_prod;
5456 ring_prod = TX_RING_IDX(prod);
5457
5458 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005459 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07005460 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5461 }
5462
5463 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5464 vlan_tag_flags |=
5465 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5466 }
Michael Chanfde82052007-05-03 17:23:35 -07005467 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07005468 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005469 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07005470
Michael Chanb6016b72005-05-26 13:03:09 -07005471 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5472
Michael Chan4666f872007-05-03 13:22:28 -07005473 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005474
Michael Chan4666f872007-05-03 13:22:28 -07005475 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5476 u32 tcp_off = skb_transport_offset(skb) -
5477 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07005478
Michael Chan4666f872007-05-03 13:22:28 -07005479 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5480 TX_BD_FLAGS_SW_FLAGS;
5481 if (likely(tcp_off == 0))
5482 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5483 else {
5484 tcp_off >>= 3;
5485 vlan_tag_flags |= ((tcp_off & 0x3) <<
5486 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5487 ((tcp_off & 0x10) <<
5488 TX_BD_FLAGS_TCP6_OFF4_SHL);
5489 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5490 }
5491 } else {
5492 if (skb_header_cloned(skb) &&
5493 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5494 dev_kfree_skb(skb);
5495 return NETDEV_TX_OK;
5496 }
5497
5498 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5499
5500 iph = ip_hdr(skb);
5501 iph->check = 0;
5502 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5503 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5504 iph->daddr, 0,
5505 IPPROTO_TCP,
5506 0);
5507 if (tcp_opt_len || (iph->ihl > 5)) {
5508 vlan_tag_flags |= ((iph->ihl - 5) +
5509 (tcp_opt_len >> 2)) << 8;
5510 }
Michael Chanb6016b72005-05-26 13:03:09 -07005511 }
Michael Chan4666f872007-05-03 13:22:28 -07005512 } else
Michael Chanb6016b72005-05-26 13:03:09 -07005513 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005514
5515 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005516
Michael Chanb6016b72005-05-26 13:03:09 -07005517 tx_buf = &bp->tx_buf_ring[ring_prod];
5518 tx_buf->skb = skb;
5519 pci_unmap_addr_set(tx_buf, mapping, mapping);
5520
5521 txbd = &bp->tx_desc_ring[ring_prod];
5522
5523 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5524 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5525 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5526 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5527
5528 last_frag = skb_shinfo(skb)->nr_frags;
5529
5530 for (i = 0; i < last_frag; i++) {
5531 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5532
5533 prod = NEXT_TX_BD(prod);
5534 ring_prod = TX_RING_IDX(prod);
5535 txbd = &bp->tx_desc_ring[ring_prod];
5536
5537 len = frag->size;
5538 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5539 len, PCI_DMA_TODEVICE);
5540 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5541 mapping, mapping);
5542
5543 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5544 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5545 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5546 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5547
5548 }
5549 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5550
5551 prod = NEXT_TX_BD(prod);
5552 bp->tx_prod_bseq += skb->len;
5553
Michael Chan234754d2006-11-19 14:11:41 -08005554 REG_WR16(bp, bp->tx_bidx_addr, prod);
5555 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07005556
5557 mmiowb();
5558
5559 bp->tx_prod = prod;
5560 dev->trans_start = jiffies;
5561
Michael Chana550c992007-12-20 19:56:59 -08005562 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07005563 netif_stop_queue(dev);
Michael Chana550c992007-12-20 19:56:59 -08005564 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07005565 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005566 }
5567
5568 return NETDEV_TX_OK;
5569}
5570
5571/* Called with rtnl_lock */
5572static int
5573bnx2_close(struct net_device *dev)
5574{
Michael Chan972ec0d2006-01-23 16:12:43 -08005575 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005576 u32 reset_code;
5577
Michael Chanafdc08b2005-08-25 15:34:29 -07005578 /* Calling flush_scheduled_work() may deadlock because
5579 * linkwatch_event() may be on the workqueue and it will try to get
5580 * the rtnl_lock which we are holding.
5581 */
5582 while (bp->in_reset_task)
5583 msleep(1);
5584
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005585 bnx2_disable_int_sync(bp);
Michael Chan35efa7c2007-12-20 19:56:37 -08005586 bnx2_napi_disable(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005587 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005588 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07005589 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005590 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005591 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5592 else
5593 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5594 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005595 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005596 bnx2_free_skbs(bp);
5597 bnx2_free_mem(bp);
5598 bp->link_up = 0;
5599 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005600 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07005601 return 0;
5602}
5603
5604#define GET_NET_STATS64(ctr) \
5605 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5606 (unsigned long) (ctr##_lo)
5607
5608#define GET_NET_STATS32(ctr) \
5609 (ctr##_lo)
5610
5611#if (BITS_PER_LONG == 64)
5612#define GET_NET_STATS GET_NET_STATS64
5613#else
5614#define GET_NET_STATS GET_NET_STATS32
5615#endif
5616
5617static struct net_device_stats *
5618bnx2_get_stats(struct net_device *dev)
5619{
Michael Chan972ec0d2006-01-23 16:12:43 -08005620 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005621 struct statistics_block *stats_blk = bp->stats_blk;
5622 struct net_device_stats *net_stats = &bp->net_stats;
5623
5624 if (bp->stats_blk == NULL) {
5625 return net_stats;
5626 }
5627 net_stats->rx_packets =
5628 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5629 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5630 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5631
5632 net_stats->tx_packets =
5633 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5634 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5635 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5636
5637 net_stats->rx_bytes =
5638 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5639
5640 net_stats->tx_bytes =
5641 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5642
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005643 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005644 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5645
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005646 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005647 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5648
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005649 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005650 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5651 stats_blk->stat_EtherStatsOverrsizePkts);
5652
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005653 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005654 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5655
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005656 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005657 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5658
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005659 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005660 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5661
5662 net_stats->rx_errors = net_stats->rx_length_errors +
5663 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5664 net_stats->rx_crc_errors;
5665
5666 net_stats->tx_aborted_errors =
5667 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5668 stats_blk->stat_Dot3StatsLateCollisions);
5669
Michael Chan5b0c76a2005-11-04 08:45:49 -08005670 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5671 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005672 net_stats->tx_carrier_errors = 0;
5673 else {
5674 net_stats->tx_carrier_errors =
5675 (unsigned long)
5676 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5677 }
5678
5679 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005680 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005681 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5682 +
5683 net_stats->tx_aborted_errors +
5684 net_stats->tx_carrier_errors;
5685
Michael Chancea94db2006-06-12 22:16:13 -07005686 net_stats->rx_missed_errors =
5687 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5688 stats_blk->stat_FwRxDrop);
5689
Michael Chanb6016b72005-05-26 13:03:09 -07005690 return net_stats;
5691}
5692
5693/* All ethtool functions called with rtnl_lock */
5694
5695static int
5696bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5697{
Michael Chan972ec0d2006-01-23 16:12:43 -08005698 struct bnx2 *bp = netdev_priv(dev);
Michael Chan7b6b8342007-07-07 22:50:15 -07005699 int support_serdes = 0, support_copper = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005700
5701 cmd->supported = SUPPORTED_Autoneg;
Michael Chan7b6b8342007-07-07 22:50:15 -07005702 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5703 support_serdes = 1;
5704 support_copper = 1;
5705 } else if (bp->phy_port == PORT_FIBRE)
5706 support_serdes = 1;
5707 else
5708 support_copper = 1;
5709
5710 if (support_serdes) {
Michael Chanb6016b72005-05-26 13:03:09 -07005711 cmd->supported |= SUPPORTED_1000baseT_Full |
5712 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005713 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5714 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005715
Michael Chanb6016b72005-05-26 13:03:09 -07005716 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005717 if (support_copper) {
Michael Chanb6016b72005-05-26 13:03:09 -07005718 cmd->supported |= SUPPORTED_10baseT_Half |
5719 SUPPORTED_10baseT_Full |
5720 SUPPORTED_100baseT_Half |
5721 SUPPORTED_100baseT_Full |
5722 SUPPORTED_1000baseT_Full |
5723 SUPPORTED_TP;
5724
Michael Chanb6016b72005-05-26 13:03:09 -07005725 }
5726
Michael Chan7b6b8342007-07-07 22:50:15 -07005727 spin_lock_bh(&bp->phy_lock);
5728 cmd->port = bp->phy_port;
Michael Chanb6016b72005-05-26 13:03:09 -07005729 cmd->advertising = bp->advertising;
5730
5731 if (bp->autoneg & AUTONEG_SPEED) {
5732 cmd->autoneg = AUTONEG_ENABLE;
5733 }
5734 else {
5735 cmd->autoneg = AUTONEG_DISABLE;
5736 }
5737
5738 if (netif_carrier_ok(dev)) {
5739 cmd->speed = bp->line_speed;
5740 cmd->duplex = bp->duplex;
5741 }
5742 else {
5743 cmd->speed = -1;
5744 cmd->duplex = -1;
5745 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005746 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005747
5748 cmd->transceiver = XCVR_INTERNAL;
5749 cmd->phy_address = bp->phy_addr;
5750
5751 return 0;
5752}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005753
Michael Chanb6016b72005-05-26 13:03:09 -07005754static int
5755bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5756{
Michael Chan972ec0d2006-01-23 16:12:43 -08005757 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005758 u8 autoneg = bp->autoneg;
5759 u8 req_duplex = bp->req_duplex;
5760 u16 req_line_speed = bp->req_line_speed;
5761 u32 advertising = bp->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005762 int err = -EINVAL;
5763
5764 spin_lock_bh(&bp->phy_lock);
5765
5766 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5767 goto err_out_unlock;
5768
5769 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5770 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005771
5772 if (cmd->autoneg == AUTONEG_ENABLE) {
5773 autoneg |= AUTONEG_SPEED;
5774
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005775 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005776
5777 /* allow advertising 1 speed */
5778 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5779 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5780 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5781 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5782
Michael Chan7b6b8342007-07-07 22:50:15 -07005783 if (cmd->port == PORT_FIBRE)
5784 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005785
5786 advertising = cmd->advertising;
5787
Michael Chan27a005b2007-05-03 13:23:41 -07005788 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
Michael Chan7b6b8342007-07-07 22:50:15 -07005789 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5790 (cmd->port == PORT_TP))
5791 goto err_out_unlock;
5792 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07005793 advertising = cmd->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005794 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5795 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005796 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005797 if (cmd->port == PORT_FIBRE)
Michael Chanb6016b72005-05-26 13:03:09 -07005798 advertising = ETHTOOL_ALL_FIBRE_SPEED;
Michael Chan7b6b8342007-07-07 22:50:15 -07005799 else
Michael Chanb6016b72005-05-26 13:03:09 -07005800 advertising = ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005801 }
5802 advertising |= ADVERTISED_Autoneg;
5803 }
5804 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005805 if (cmd->port == PORT_FIBRE) {
Michael Chan80be4432006-11-19 14:07:28 -08005806 if ((cmd->speed != SPEED_1000 &&
5807 cmd->speed != SPEED_2500) ||
5808 (cmd->duplex != DUPLEX_FULL))
Michael Chan7b6b8342007-07-07 22:50:15 -07005809 goto err_out_unlock;
Michael Chan80be4432006-11-19 14:07:28 -08005810
5811 if (cmd->speed == SPEED_2500 &&
5812 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
Michael Chan7b6b8342007-07-07 22:50:15 -07005813 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005814 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005815 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5816 goto err_out_unlock;
5817
Michael Chanb6016b72005-05-26 13:03:09 -07005818 autoneg &= ~AUTONEG_SPEED;
5819 req_line_speed = cmd->speed;
5820 req_duplex = cmd->duplex;
5821 advertising = 0;
5822 }
5823
5824 bp->autoneg = autoneg;
5825 bp->advertising = advertising;
5826 bp->req_line_speed = req_line_speed;
5827 bp->req_duplex = req_duplex;
5828
Michael Chan7b6b8342007-07-07 22:50:15 -07005829 err = bnx2_setup_phy(bp, cmd->port);
Michael Chanb6016b72005-05-26 13:03:09 -07005830
Michael Chan7b6b8342007-07-07 22:50:15 -07005831err_out_unlock:
Michael Chanc770a652005-08-25 15:38:39 -07005832 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005833
Michael Chan7b6b8342007-07-07 22:50:15 -07005834 return err;
Michael Chanb6016b72005-05-26 13:03:09 -07005835}
5836
5837static void
5838bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5839{
Michael Chan972ec0d2006-01-23 16:12:43 -08005840 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005841
5842 strcpy(info->driver, DRV_MODULE_NAME);
5843 strcpy(info->version, DRV_MODULE_VERSION);
5844 strcpy(info->bus_info, pci_name(bp->pdev));
Michael Chan58fc2ea2007-07-07 22:52:02 -07005845 strcpy(info->fw_version, bp->fw_version);
Michael Chanb6016b72005-05-26 13:03:09 -07005846}
5847
Michael Chan244ac4f2006-03-20 17:48:46 -08005848#define BNX2_REGDUMP_LEN (32 * 1024)
5849
5850static int
5851bnx2_get_regs_len(struct net_device *dev)
5852{
5853 return BNX2_REGDUMP_LEN;
5854}
5855
5856static void
5857bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5858{
5859 u32 *p = _p, i, offset;
5860 u8 *orig_p = _p;
5861 struct bnx2 *bp = netdev_priv(dev);
5862 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5863 0x0800, 0x0880, 0x0c00, 0x0c10,
5864 0x0c30, 0x0d08, 0x1000, 0x101c,
5865 0x1040, 0x1048, 0x1080, 0x10a4,
5866 0x1400, 0x1490, 0x1498, 0x14f0,
5867 0x1500, 0x155c, 0x1580, 0x15dc,
5868 0x1600, 0x1658, 0x1680, 0x16d8,
5869 0x1800, 0x1820, 0x1840, 0x1854,
5870 0x1880, 0x1894, 0x1900, 0x1984,
5871 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5872 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5873 0x2000, 0x2030, 0x23c0, 0x2400,
5874 0x2800, 0x2820, 0x2830, 0x2850,
5875 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5876 0x3c00, 0x3c94, 0x4000, 0x4010,
5877 0x4080, 0x4090, 0x43c0, 0x4458,
5878 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5879 0x4fc0, 0x5010, 0x53c0, 0x5444,
5880 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5881 0x5fc0, 0x6000, 0x6400, 0x6428,
5882 0x6800, 0x6848, 0x684c, 0x6860,
5883 0x6888, 0x6910, 0x8000 };
5884
5885 regs->version = 0;
5886
5887 memset(p, 0, BNX2_REGDUMP_LEN);
5888
5889 if (!netif_running(bp->dev))
5890 return;
5891
5892 i = 0;
5893 offset = reg_boundaries[0];
5894 p += offset;
5895 while (offset < BNX2_REGDUMP_LEN) {
5896 *p++ = REG_RD(bp, offset);
5897 offset += 4;
5898 if (offset == reg_boundaries[i + 1]) {
5899 offset = reg_boundaries[i + 2];
5900 p = (u32 *) (orig_p + offset);
5901 i += 2;
5902 }
5903 }
5904}
5905
Michael Chanb6016b72005-05-26 13:03:09 -07005906static void
5907bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5908{
Michael Chan972ec0d2006-01-23 16:12:43 -08005909 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005910
5911 if (bp->flags & NO_WOL_FLAG) {
5912 wol->supported = 0;
5913 wol->wolopts = 0;
5914 }
5915 else {
5916 wol->supported = WAKE_MAGIC;
5917 if (bp->wol)
5918 wol->wolopts = WAKE_MAGIC;
5919 else
5920 wol->wolopts = 0;
5921 }
5922 memset(&wol->sopass, 0, sizeof(wol->sopass));
5923}
5924
5925static int
5926bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5927{
Michael Chan972ec0d2006-01-23 16:12:43 -08005928 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005929
5930 if (wol->wolopts & ~WAKE_MAGIC)
5931 return -EINVAL;
5932
5933 if (wol->wolopts & WAKE_MAGIC) {
5934 if (bp->flags & NO_WOL_FLAG)
5935 return -EINVAL;
5936
5937 bp->wol = 1;
5938 }
5939 else {
5940 bp->wol = 0;
5941 }
5942 return 0;
5943}
5944
5945static int
5946bnx2_nway_reset(struct net_device *dev)
5947{
Michael Chan972ec0d2006-01-23 16:12:43 -08005948 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005949 u32 bmcr;
5950
5951 if (!(bp->autoneg & AUTONEG_SPEED)) {
5952 return -EINVAL;
5953 }
5954
Michael Chanc770a652005-08-25 15:38:39 -07005955 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005956
Michael Chan7b6b8342007-07-07 22:50:15 -07005957 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5958 int rc;
5959
5960 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5961 spin_unlock_bh(&bp->phy_lock);
5962 return rc;
5963 }
5964
Michael Chanb6016b72005-05-26 13:03:09 -07005965 /* Force a link down visible on the other side */
5966 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005967 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005968 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005969
5970 msleep(20);
5971
Michael Chanc770a652005-08-25 15:38:39 -07005972 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005973
5974 bp->current_interval = SERDES_AN_TIMEOUT;
5975 bp->serdes_an_pending = 1;
5976 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005977 }
5978
Michael Chanca58c3a2007-05-03 13:22:52 -07005979 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005980 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005981 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005982
Michael Chanc770a652005-08-25 15:38:39 -07005983 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005984
5985 return 0;
5986}
5987
5988static int
5989bnx2_get_eeprom_len(struct net_device *dev)
5990{
Michael Chan972ec0d2006-01-23 16:12:43 -08005991 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005992
Michael Chan1122db72006-01-23 16:11:42 -08005993 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005994 return 0;
5995
Michael Chan1122db72006-01-23 16:11:42 -08005996 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005997}
5998
5999static int
6000bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6001 u8 *eebuf)
6002{
Michael Chan972ec0d2006-01-23 16:12:43 -08006003 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006004 int rc;
6005
John W. Linville1064e942005-11-10 12:58:24 -08006006 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07006007
6008 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6009
6010 return rc;
6011}
6012
6013static int
6014bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6015 u8 *eebuf)
6016{
Michael Chan972ec0d2006-01-23 16:12:43 -08006017 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006018 int rc;
6019
John W. Linville1064e942005-11-10 12:58:24 -08006020 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07006021
6022 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6023
6024 return rc;
6025}
6026
6027static int
6028bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6029{
Michael Chan972ec0d2006-01-23 16:12:43 -08006030 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006031
6032 memset(coal, 0, sizeof(struct ethtool_coalesce));
6033
6034 coal->rx_coalesce_usecs = bp->rx_ticks;
6035 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6036 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6037 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6038
6039 coal->tx_coalesce_usecs = bp->tx_ticks;
6040 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6041 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6042 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6043
6044 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6045
6046 return 0;
6047}
6048
6049static int
6050bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6051{
Michael Chan972ec0d2006-01-23 16:12:43 -08006052 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006053
6054 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6055 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6056
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006057 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07006058 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6059
6060 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6061 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6062
6063 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6064 if (bp->rx_quick_cons_trip_int > 0xff)
6065 bp->rx_quick_cons_trip_int = 0xff;
6066
6067 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6068 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6069
6070 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6071 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6072
6073 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6074 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6075
6076 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6077 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6078 0xff;
6079
6080 bp->stats_ticks = coal->stats_block_coalesce_usecs;
Michael Chan02537b062007-06-04 21:24:07 -07006081 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6082 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6083 bp->stats_ticks = USEC_PER_SEC;
6084 }
Michael Chan7ea69202007-07-16 18:27:10 -07006085 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6086 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6087 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07006088
6089 if (netif_running(bp->dev)) {
6090 bnx2_netif_stop(bp);
6091 bnx2_init_nic(bp);
6092 bnx2_netif_start(bp);
6093 }
6094
6095 return 0;
6096}
6097
6098static void
6099bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6100{
Michael Chan972ec0d2006-01-23 16:12:43 -08006101 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006102
Michael Chan13daffa2006-03-20 17:49:20 -08006103 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07006104 ering->rx_mini_max_pending = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08006105 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07006106
6107 ering->rx_pending = bp->rx_ring_size;
6108 ering->rx_mini_pending = 0;
Michael Chan47bf4242007-12-12 11:19:12 -08006109 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
Michael Chanb6016b72005-05-26 13:03:09 -07006110
6111 ering->tx_max_pending = MAX_TX_DESC_CNT;
6112 ering->tx_pending = bp->tx_ring_size;
6113}
6114
6115static int
Michael Chan5d5d0012007-12-12 11:17:43 -08006116bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
Michael Chanb6016b72005-05-26 13:03:09 -07006117{
Michael Chan13daffa2006-03-20 17:49:20 -08006118 if (netif_running(bp->dev)) {
6119 bnx2_netif_stop(bp);
6120 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6121 bnx2_free_skbs(bp);
6122 bnx2_free_mem(bp);
6123 }
6124
Michael Chan5d5d0012007-12-12 11:17:43 -08006125 bnx2_set_rx_ring_size(bp, rx);
6126 bp->tx_ring_size = tx;
Michael Chanb6016b72005-05-26 13:03:09 -07006127
6128 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08006129 int rc;
6130
6131 rc = bnx2_alloc_mem(bp);
6132 if (rc)
6133 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07006134 bnx2_init_nic(bp);
6135 bnx2_netif_start(bp);
6136 }
Michael Chanb6016b72005-05-26 13:03:09 -07006137 return 0;
6138}
6139
Michael Chan5d5d0012007-12-12 11:17:43 -08006140static int
6141bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6142{
6143 struct bnx2 *bp = netdev_priv(dev);
6144 int rc;
6145
6146 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6147 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6148 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6149
6150 return -EINVAL;
6151 }
6152 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6153 return rc;
6154}
6155
Michael Chanb6016b72005-05-26 13:03:09 -07006156static void
6157bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6158{
Michael Chan972ec0d2006-01-23 16:12:43 -08006159 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006160
6161 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6162 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6163 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6164}
6165
6166static int
6167bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6168{
Michael Chan972ec0d2006-01-23 16:12:43 -08006169 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006170
6171 bp->req_flow_ctrl = 0;
6172 if (epause->rx_pause)
6173 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6174 if (epause->tx_pause)
6175 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6176
6177 if (epause->autoneg) {
6178 bp->autoneg |= AUTONEG_FLOW_CTRL;
6179 }
6180 else {
6181 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6182 }
6183
Michael Chanc770a652005-08-25 15:38:39 -07006184 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006185
Michael Chan0d8a6572007-07-07 22:49:43 -07006186 bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07006187
Michael Chanc770a652005-08-25 15:38:39 -07006188 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006189
6190 return 0;
6191}
6192
6193static u32
6194bnx2_get_rx_csum(struct net_device *dev)
6195{
Michael Chan972ec0d2006-01-23 16:12:43 -08006196 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006197
6198 return bp->rx_csum;
6199}
6200
6201static int
6202bnx2_set_rx_csum(struct net_device *dev, u32 data)
6203{
Michael Chan972ec0d2006-01-23 16:12:43 -08006204 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006205
6206 bp->rx_csum = data;
6207 return 0;
6208}
6209
Michael Chanb11d6212006-06-29 12:31:21 -07006210static int
6211bnx2_set_tso(struct net_device *dev, u32 data)
6212{
Michael Chan4666f872007-05-03 13:22:28 -07006213 struct bnx2 *bp = netdev_priv(dev);
6214
6215 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07006216 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006217 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6218 dev->features |= NETIF_F_TSO6;
6219 } else
6220 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6221 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07006222 return 0;
6223}
6224
Michael Chancea94db2006-06-12 22:16:13 -07006225#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07006226
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006227static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006228 char string[ETH_GSTRING_LEN];
6229} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6230 { "rx_bytes" },
6231 { "rx_error_bytes" },
6232 { "tx_bytes" },
6233 { "tx_error_bytes" },
6234 { "rx_ucast_packets" },
6235 { "rx_mcast_packets" },
6236 { "rx_bcast_packets" },
6237 { "tx_ucast_packets" },
6238 { "tx_mcast_packets" },
6239 { "tx_bcast_packets" },
6240 { "tx_mac_errors" },
6241 { "tx_carrier_errors" },
6242 { "rx_crc_errors" },
6243 { "rx_align_errors" },
6244 { "tx_single_collisions" },
6245 { "tx_multi_collisions" },
6246 { "tx_deferred" },
6247 { "tx_excess_collisions" },
6248 { "tx_late_collisions" },
6249 { "tx_total_collisions" },
6250 { "rx_fragments" },
6251 { "rx_jabbers" },
6252 { "rx_undersize_packets" },
6253 { "rx_oversize_packets" },
6254 { "rx_64_byte_packets" },
6255 { "rx_65_to_127_byte_packets" },
6256 { "rx_128_to_255_byte_packets" },
6257 { "rx_256_to_511_byte_packets" },
6258 { "rx_512_to_1023_byte_packets" },
6259 { "rx_1024_to_1522_byte_packets" },
6260 { "rx_1523_to_9022_byte_packets" },
6261 { "tx_64_byte_packets" },
6262 { "tx_65_to_127_byte_packets" },
6263 { "tx_128_to_255_byte_packets" },
6264 { "tx_256_to_511_byte_packets" },
6265 { "tx_512_to_1023_byte_packets" },
6266 { "tx_1024_to_1522_byte_packets" },
6267 { "tx_1523_to_9022_byte_packets" },
6268 { "rx_xon_frames" },
6269 { "rx_xoff_frames" },
6270 { "tx_xon_frames" },
6271 { "tx_xoff_frames" },
6272 { "rx_mac_ctrl_frames" },
6273 { "rx_filtered_packets" },
6274 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07006275 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07006276};
6277
6278#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6279
Arjan van de Venf71e1302006-03-03 21:33:57 -05006280static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006281 STATS_OFFSET32(stat_IfHCInOctets_hi),
6282 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6283 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6284 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6285 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6286 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6287 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6288 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6289 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6290 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6291 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006292 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6293 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6294 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6295 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6296 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6297 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6298 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6299 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6300 STATS_OFFSET32(stat_EtherStatsCollisions),
6301 STATS_OFFSET32(stat_EtherStatsFragments),
6302 STATS_OFFSET32(stat_EtherStatsJabbers),
6303 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6304 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6305 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6306 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6307 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6308 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6309 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6310 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6311 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6312 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6313 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6314 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6315 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6316 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6317 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6318 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6319 STATS_OFFSET32(stat_XonPauseFramesReceived),
6320 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6321 STATS_OFFSET32(stat_OutXonSent),
6322 STATS_OFFSET32(stat_OutXoffSent),
6323 STATS_OFFSET32(stat_MacControlFramesReceived),
6324 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6325 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07006326 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07006327};
6328
6329/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6330 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006331 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006332static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006333 8,0,8,8,8,8,8,8,8,8,
6334 4,0,4,4,4,4,4,4,4,4,
6335 4,4,4,4,4,4,4,4,4,4,
6336 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006337 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07006338};
6339
Michael Chan5b0c76a2005-11-04 08:45:49 -08006340static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6341 8,0,8,8,8,8,8,8,8,8,
6342 4,4,4,4,4,4,4,4,4,4,
6343 4,4,4,4,4,4,4,4,4,4,
6344 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006345 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08006346};
6347
Michael Chanb6016b72005-05-26 13:03:09 -07006348#define BNX2_NUM_TESTS 6
6349
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006350static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006351 char string[ETH_GSTRING_LEN];
6352} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6353 { "register_test (offline)" },
6354 { "memory_test (offline)" },
6355 { "loopback_test (offline)" },
6356 { "nvram_test (online)" },
6357 { "interrupt_test (online)" },
6358 { "link_test (online)" },
6359};
6360
6361static int
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006362bnx2_get_sset_count(struct net_device *dev, int sset)
Michael Chanb6016b72005-05-26 13:03:09 -07006363{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006364 switch (sset) {
6365 case ETH_SS_TEST:
6366 return BNX2_NUM_TESTS;
6367 case ETH_SS_STATS:
6368 return BNX2_NUM_STATS;
6369 default:
6370 return -EOPNOTSUPP;
6371 }
Michael Chanb6016b72005-05-26 13:03:09 -07006372}
6373
6374static void
6375bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6376{
Michael Chan972ec0d2006-01-23 16:12:43 -08006377 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006378
6379 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6380 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08006381 int i;
6382
Michael Chanb6016b72005-05-26 13:03:09 -07006383 bnx2_netif_stop(bp);
6384 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6385 bnx2_free_skbs(bp);
6386
6387 if (bnx2_test_registers(bp) != 0) {
6388 buf[0] = 1;
6389 etest->flags |= ETH_TEST_FL_FAILED;
6390 }
6391 if (bnx2_test_memory(bp) != 0) {
6392 buf[1] = 1;
6393 etest->flags |= ETH_TEST_FL_FAILED;
6394 }
Michael Chanbc5a0692006-01-23 16:13:22 -08006395 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07006396 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07006397
6398 if (!netif_running(bp->dev)) {
6399 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6400 }
6401 else {
6402 bnx2_init_nic(bp);
6403 bnx2_netif_start(bp);
6404 }
6405
6406 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08006407 for (i = 0; i < 7; i++) {
6408 if (bp->link_up)
6409 break;
6410 msleep_interruptible(1000);
6411 }
Michael Chanb6016b72005-05-26 13:03:09 -07006412 }
6413
6414 if (bnx2_test_nvram(bp) != 0) {
6415 buf[3] = 1;
6416 etest->flags |= ETH_TEST_FL_FAILED;
6417 }
6418 if (bnx2_test_intr(bp) != 0) {
6419 buf[4] = 1;
6420 etest->flags |= ETH_TEST_FL_FAILED;
6421 }
6422
6423 if (bnx2_test_link(bp) != 0) {
6424 buf[5] = 1;
6425 etest->flags |= ETH_TEST_FL_FAILED;
6426
6427 }
6428}
6429
6430static void
6431bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6432{
6433 switch (stringset) {
6434 case ETH_SS_STATS:
6435 memcpy(buf, bnx2_stats_str_arr,
6436 sizeof(bnx2_stats_str_arr));
6437 break;
6438 case ETH_SS_TEST:
6439 memcpy(buf, bnx2_tests_str_arr,
6440 sizeof(bnx2_tests_str_arr));
6441 break;
6442 }
6443}
6444
Michael Chanb6016b72005-05-26 13:03:09 -07006445static void
6446bnx2_get_ethtool_stats(struct net_device *dev,
6447 struct ethtool_stats *stats, u64 *buf)
6448{
Michael Chan972ec0d2006-01-23 16:12:43 -08006449 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006450 int i;
6451 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006452 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006453
6454 if (hw_stats == NULL) {
6455 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6456 return;
6457 }
6458
Michael Chan5b0c76a2005-11-04 08:45:49 -08006459 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6460 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6461 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6462 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07006463 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006464 else
6465 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07006466
6467 for (i = 0; i < BNX2_NUM_STATS; i++) {
6468 if (stats_len_arr[i] == 0) {
6469 /* skip this counter */
6470 buf[i] = 0;
6471 continue;
6472 }
6473 if (stats_len_arr[i] == 4) {
6474 /* 4-byte counter */
6475 buf[i] = (u64)
6476 *(hw_stats + bnx2_stats_offset_arr[i]);
6477 continue;
6478 }
6479 /* 8-byte counter */
6480 buf[i] = (((u64) *(hw_stats +
6481 bnx2_stats_offset_arr[i])) << 32) +
6482 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6483 }
6484}
6485
6486static int
6487bnx2_phys_id(struct net_device *dev, u32 data)
6488{
Michael Chan972ec0d2006-01-23 16:12:43 -08006489 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006490 int i;
6491 u32 save;
6492
6493 if (data == 0)
6494 data = 2;
6495
6496 save = REG_RD(bp, BNX2_MISC_CFG);
6497 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6498
6499 for (i = 0; i < (data * 2); i++) {
6500 if ((i % 2) == 0) {
6501 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6502 }
6503 else {
6504 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6505 BNX2_EMAC_LED_1000MB_OVERRIDE |
6506 BNX2_EMAC_LED_100MB_OVERRIDE |
6507 BNX2_EMAC_LED_10MB_OVERRIDE |
6508 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6509 BNX2_EMAC_LED_TRAFFIC);
6510 }
6511 msleep_interruptible(500);
6512 if (signal_pending(current))
6513 break;
6514 }
6515 REG_WR(bp, BNX2_EMAC_LED, 0);
6516 REG_WR(bp, BNX2_MISC_CFG, save);
6517 return 0;
6518}
6519
Michael Chan4666f872007-05-03 13:22:28 -07006520static int
6521bnx2_set_tx_csum(struct net_device *dev, u32 data)
6522{
6523 struct bnx2 *bp = netdev_priv(dev);
6524
6525 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Michael Chan6460d942007-07-14 19:07:52 -07006526 return (ethtool_op_set_tx_ipv6_csum(dev, data));
Michael Chan4666f872007-05-03 13:22:28 -07006527 else
6528 return (ethtool_op_set_tx_csum(dev, data));
6529}
6530
Jeff Garzik7282d492006-09-13 14:30:00 -04006531static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07006532 .get_settings = bnx2_get_settings,
6533 .set_settings = bnx2_set_settings,
6534 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08006535 .get_regs_len = bnx2_get_regs_len,
6536 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07006537 .get_wol = bnx2_get_wol,
6538 .set_wol = bnx2_set_wol,
6539 .nway_reset = bnx2_nway_reset,
6540 .get_link = ethtool_op_get_link,
6541 .get_eeprom_len = bnx2_get_eeprom_len,
6542 .get_eeprom = bnx2_get_eeprom,
6543 .set_eeprom = bnx2_set_eeprom,
6544 .get_coalesce = bnx2_get_coalesce,
6545 .set_coalesce = bnx2_set_coalesce,
6546 .get_ringparam = bnx2_get_ringparam,
6547 .set_ringparam = bnx2_set_ringparam,
6548 .get_pauseparam = bnx2_get_pauseparam,
6549 .set_pauseparam = bnx2_set_pauseparam,
6550 .get_rx_csum = bnx2_get_rx_csum,
6551 .set_rx_csum = bnx2_set_rx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07006552 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07006553 .set_sg = ethtool_op_set_sg,
Michael Chanb11d6212006-06-29 12:31:21 -07006554 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07006555 .self_test = bnx2_self_test,
6556 .get_strings = bnx2_get_strings,
6557 .phys_id = bnx2_phys_id,
Michael Chanb6016b72005-05-26 13:03:09 -07006558 .get_ethtool_stats = bnx2_get_ethtool_stats,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07006559 .get_sset_count = bnx2_get_sset_count,
Michael Chanb6016b72005-05-26 13:03:09 -07006560};
6561
6562/* Called with rtnl_lock */
6563static int
6564bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6565{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006566 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08006567 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006568 int err;
6569
6570 switch(cmd) {
6571 case SIOCGMIIPHY:
6572 data->phy_id = bp->phy_addr;
6573
6574 /* fallthru */
6575 case SIOCGMIIREG: {
6576 u32 mii_regval;
6577
Michael Chan7b6b8342007-07-07 22:50:15 -07006578 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6579 return -EOPNOTSUPP;
6580
Michael Chandad3e452007-05-03 13:18:03 -07006581 if (!netif_running(dev))
6582 return -EAGAIN;
6583
Michael Chanc770a652005-08-25 15:38:39 -07006584 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006585 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07006586 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006587
6588 data->val_out = mii_regval;
6589
6590 return err;
6591 }
6592
6593 case SIOCSMIIREG:
6594 if (!capable(CAP_NET_ADMIN))
6595 return -EPERM;
6596
Michael Chan7b6b8342007-07-07 22:50:15 -07006597 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6598 return -EOPNOTSUPP;
6599
Michael Chandad3e452007-05-03 13:18:03 -07006600 if (!netif_running(dev))
6601 return -EAGAIN;
6602
Michael Chanc770a652005-08-25 15:38:39 -07006603 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006604 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07006605 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006606
6607 return err;
6608
6609 default:
6610 /* do nothing */
6611 break;
6612 }
6613 return -EOPNOTSUPP;
6614}
6615
6616/* Called with rtnl_lock */
6617static int
6618bnx2_change_mac_addr(struct net_device *dev, void *p)
6619{
6620 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08006621 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006622
Michael Chan73eef4c2005-08-25 15:39:15 -07006623 if (!is_valid_ether_addr(addr->sa_data))
6624 return -EINVAL;
6625
Michael Chanb6016b72005-05-26 13:03:09 -07006626 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6627 if (netif_running(dev))
6628 bnx2_set_mac_addr(bp);
6629
6630 return 0;
6631}
6632
6633/* Called with rtnl_lock */
6634static int
6635bnx2_change_mtu(struct net_device *dev, int new_mtu)
6636{
Michael Chan972ec0d2006-01-23 16:12:43 -08006637 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006638
6639 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6640 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6641 return -EINVAL;
6642
6643 dev->mtu = new_mtu;
Michael Chan5d5d0012007-12-12 11:17:43 -08006644 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
Michael Chanb6016b72005-05-26 13:03:09 -07006645}
6646
6647#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6648static void
6649poll_bnx2(struct net_device *dev)
6650{
Michael Chan972ec0d2006-01-23 16:12:43 -08006651 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006652
6653 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006654 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006655 enable_irq(bp->pdev->irq);
6656}
6657#endif
6658
Michael Chan253c8b72007-01-08 19:56:01 -08006659static void __devinit
6660bnx2_get_5709_media(struct bnx2 *bp)
6661{
6662 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6663 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6664 u32 strap;
6665
6666 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6667 return;
6668 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6669 bp->phy_flags |= PHY_SERDES_FLAG;
6670 return;
6671 }
6672
6673 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6674 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6675 else
6676 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6677
6678 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6679 switch (strap) {
6680 case 0x4:
6681 case 0x5:
6682 case 0x6:
6683 bp->phy_flags |= PHY_SERDES_FLAG;
6684 return;
6685 }
6686 } else {
6687 switch (strap) {
6688 case 0x1:
6689 case 0x2:
6690 case 0x4:
6691 bp->phy_flags |= PHY_SERDES_FLAG;
6692 return;
6693 }
6694 }
6695}
6696
Michael Chan883e5152007-05-03 13:25:11 -07006697static void __devinit
6698bnx2_get_pci_speed(struct bnx2 *bp)
6699{
6700 u32 reg;
6701
6702 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6703 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6704 u32 clkreg;
6705
6706 bp->flags |= PCIX_FLAG;
6707
6708 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6709
6710 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6711 switch (clkreg) {
6712 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6713 bp->bus_speed_mhz = 133;
6714 break;
6715
6716 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6717 bp->bus_speed_mhz = 100;
6718 break;
6719
6720 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6721 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6722 bp->bus_speed_mhz = 66;
6723 break;
6724
6725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6727 bp->bus_speed_mhz = 50;
6728 break;
6729
6730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6732 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6733 bp->bus_speed_mhz = 33;
6734 break;
6735 }
6736 }
6737 else {
6738 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6739 bp->bus_speed_mhz = 66;
6740 else
6741 bp->bus_speed_mhz = 33;
6742 }
6743
6744 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6745 bp->flags |= PCI_32BIT_FLAG;
6746
6747}
6748
Michael Chanb6016b72005-05-26 13:03:09 -07006749static int __devinit
6750bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6751{
6752 struct bnx2 *bp;
6753 unsigned long mem_len;
Michael Chan58fc2ea2007-07-07 22:52:02 -07006754 int rc, i, j;
Michael Chanb6016b72005-05-26 13:03:09 -07006755 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006756 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006757
Michael Chanb6016b72005-05-26 13:03:09 -07006758 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006759 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006760
6761 bp->flags = 0;
6762 bp->phy_flags = 0;
6763
6764 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6765 rc = pci_enable_device(pdev);
6766 if (rc) {
Joe Perches898eb712007-10-18 03:06:30 -07006767 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006768 goto err_out;
6769 }
6770
6771 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006772 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006773 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006774 rc = -ENODEV;
6775 goto err_out_disable;
6776 }
6777
6778 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6779 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006780 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006781 goto err_out_disable;
6782 }
6783
6784 pci_set_master(pdev);
6785
6786 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6787 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006788 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006789 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006790 rc = -EIO;
6791 goto err_out_release;
6792 }
6793
Michael Chanb6016b72005-05-26 13:03:09 -07006794 bp->dev = dev;
6795 bp->pdev = pdev;
6796
6797 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006798 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006799 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006800
6801 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006802 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006803 dev->mem_end = dev->mem_start + mem_len;
6804 dev->irq = pdev->irq;
6805
6806 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6807
6808 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006809 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006810 rc = -ENOMEM;
6811 goto err_out_release;
6812 }
6813
6814 /* Configure byte swap and enable write to the reg_window registers.
6815 * Rely on CPU to do target byte swapping on big endian systems
6816 * The chip's target access swapping will not swap all accesses
6817 */
6818 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6819 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6820 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6821
Pavel Machek829ca9a2005-09-03 15:56:56 -07006822 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006823
6824 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6825
Michael Chan883e5152007-05-03 13:25:11 -07006826 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6827 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6828 dev_err(&pdev->dev,
6829 "Cannot find PCIE capability, aborting.\n");
6830 rc = -EIO;
6831 goto err_out_unmap;
6832 }
6833 bp->flags |= PCIE_FLAG;
6834 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006835 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6836 if (bp->pcix_cap == 0) {
6837 dev_err(&pdev->dev,
6838 "Cannot find PCIX capability, aborting.\n");
6839 rc = -EIO;
6840 goto err_out_unmap;
6841 }
6842 }
6843
Michael Chan8e6a72c2007-05-03 13:24:48 -07006844 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6845 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6846 bp->flags |= MSI_CAP_FLAG;
6847 }
6848
Michael Chan40453c82007-05-03 13:19:18 -07006849 /* 5708 cannot support DMA addresses > 40-bit. */
6850 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6851 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6852 else
6853 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6854
6855 /* Configure DMA attributes. */
6856 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6857 dev->features |= NETIF_F_HIGHDMA;
6858 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6859 if (rc) {
6860 dev_err(&pdev->dev,
6861 "pci_set_consistent_dma_mask failed, aborting.\n");
6862 goto err_out_unmap;
6863 }
6864 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6865 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6866 goto err_out_unmap;
6867 }
6868
Michael Chan883e5152007-05-03 13:25:11 -07006869 if (!(bp->flags & PCIE_FLAG))
6870 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006871
6872 /* 5706A0 may falsely detect SERR and PERR. */
6873 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6874 reg = REG_RD(bp, PCI_COMMAND);
6875 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6876 REG_WR(bp, PCI_COMMAND, reg);
6877 }
6878 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6879 !(bp->flags & PCIX_FLAG)) {
6880
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006881 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006882 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006883 goto err_out_unmap;
6884 }
6885
6886 bnx2_init_nvram(bp);
6887
Michael Chane3648b32005-11-04 08:51:21 -08006888 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6889
6890 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006891 BNX2_SHM_HDR_SIGNATURE_SIG) {
6892 u32 off = PCI_FUNC(pdev->devfn) << 2;
6893
6894 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6895 } else
Michael Chane3648b32005-11-04 08:51:21 -08006896 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6897
Michael Chanb6016b72005-05-26 13:03:09 -07006898 /* Get the permanent MAC address. First we need to make sure the
6899 * firmware is actually running.
6900 */
Michael Chane3648b32005-11-04 08:51:21 -08006901 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006902
6903 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6904 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006905 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006906 rc = -ENODEV;
6907 goto err_out_unmap;
6908 }
6909
Michael Chan58fc2ea2007-07-07 22:52:02 -07006910 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6911 for (i = 0, j = 0; i < 3; i++) {
6912 u8 num, k, skip0;
6913
6914 num = (u8) (reg >> (24 - (i * 8)));
6915 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6916 if (num >= k || !skip0 || k == 1) {
6917 bp->fw_version[j++] = (num / k) + '0';
6918 skip0 = 0;
6919 }
6920 }
6921 if (i != 2)
6922 bp->fw_version[j++] = '.';
6923 }
Michael Chan846f5c62007-10-10 16:16:51 -07006924 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6925 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6926 bp->wol = 1;
6927
6928 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
Michael Chanc2d3db82007-07-16 18:26:43 -07006929 bp->flags |= ASF_ENABLE_FLAG;
6930
6931 for (i = 0; i < 30; i++) {
6932 reg = REG_RD_IND(bp, bp->shmem_base +
6933 BNX2_BC_STATE_CONDITION);
6934 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6935 break;
6936 msleep(10);
6937 }
6938 }
Michael Chan58fc2ea2007-07-07 22:52:02 -07006939 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6940 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6941 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6942 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6943 int i;
6944 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6945
6946 bp->fw_version[j++] = ' ';
6947 for (i = 0; i < 3; i++) {
6948 reg = REG_RD_IND(bp, addr + i * 4);
6949 reg = swab32(reg);
6950 memcpy(&bp->fw_version[j], &reg, 4);
6951 j += 4;
6952 }
6953 }
Michael Chanb6016b72005-05-26 13:03:09 -07006954
Michael Chane3648b32005-11-04 08:51:21 -08006955 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006956 bp->mac_addr[0] = (u8) (reg >> 8);
6957 bp->mac_addr[1] = (u8) reg;
6958
Michael Chane3648b32005-11-04 08:51:21 -08006959 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006960 bp->mac_addr[2] = (u8) (reg >> 24);
6961 bp->mac_addr[3] = (u8) (reg >> 16);
6962 bp->mac_addr[4] = (u8) (reg >> 8);
6963 bp->mac_addr[5] = (u8) reg;
6964
Michael Chan5d5d0012007-12-12 11:17:43 -08006965 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6966
Michael Chanb6016b72005-05-26 13:03:09 -07006967 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006968 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006969
6970 bp->rx_csum = 1;
6971
Michael Chanb6016b72005-05-26 13:03:09 -07006972 bp->tx_quick_cons_trip_int = 20;
6973 bp->tx_quick_cons_trip = 20;
6974 bp->tx_ticks_int = 80;
6975 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006976
Michael Chanb6016b72005-05-26 13:03:09 -07006977 bp->rx_quick_cons_trip_int = 6;
6978 bp->rx_quick_cons_trip = 6;
6979 bp->rx_ticks_int = 18;
6980 bp->rx_ticks = 18;
6981
Michael Chan7ea69202007-07-16 18:27:10 -07006982 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
Michael Chanb6016b72005-05-26 13:03:09 -07006983
6984 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006985 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006986
Michael Chan5b0c76a2005-11-04 08:45:49 -08006987 bp->phy_addr = 1;
6988
Michael Chanb6016b72005-05-26 13:03:09 -07006989 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006990 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6991 bnx2_get_5709_media(bp);
6992 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006993 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006994
Michael Chan0d8a6572007-07-07 22:49:43 -07006995 bp->phy_port = PORT_TP;
Michael Chanbac0dff2006-11-19 14:15:05 -08006996 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07006997 bp->phy_port = PORT_FIBRE;
Michael Chan846f5c62007-10-10 16:16:51 -07006998 reg = REG_RD_IND(bp, bp->shmem_base +
6999 BNX2_SHARED_HW_CFG_CONFIG);
7000 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7001 bp->flags |= NO_WOL_FLAG;
7002 bp->wol = 0;
7003 }
Michael Chanbac0dff2006-11-19 14:15:05 -08007004 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08007005 bp->phy_addr = 2;
Michael Chan5b0c76a2005-11-04 08:45:49 -08007006 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7007 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7008 }
Michael Chan0d8a6572007-07-07 22:49:43 -07007009 bnx2_init_remote_phy(bp);
7010
Michael Chan261dd5c2007-01-08 19:55:46 -08007011 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7012 CHIP_NUM(bp) == CHIP_NUM_5708)
7013 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanfb0c18b2007-12-10 17:18:23 -08007014 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7015 (CHIP_REV(bp) == CHIP_REV_Ax ||
7016 CHIP_REV(bp) == CHIP_REV_Bx))
Michael Chanb659f442007-02-02 00:46:35 -08007017 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07007018
Michael Chan16088272006-06-12 22:16:43 -07007019 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7020 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
Michael Chan846f5c62007-10-10 16:16:51 -07007021 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chandda1e392006-01-23 16:08:14 -08007022 bp->flags |= NO_WOL_FLAG;
Michael Chan846f5c62007-10-10 16:16:51 -07007023 bp->wol = 0;
7024 }
Michael Chandda1e392006-01-23 16:08:14 -08007025
Michael Chanb6016b72005-05-26 13:03:09 -07007026 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7027 bp->tx_quick_cons_trip_int =
7028 bp->tx_quick_cons_trip;
7029 bp->tx_ticks_int = bp->tx_ticks;
7030 bp->rx_quick_cons_trip_int =
7031 bp->rx_quick_cons_trip;
7032 bp->rx_ticks_int = bp->rx_ticks;
7033 bp->comp_prod_trip_int = bp->comp_prod_trip;
7034 bp->com_ticks_int = bp->com_ticks;
7035 bp->cmd_ticks_int = bp->cmd_ticks;
7036 }
7037
Michael Chanf9317a42006-09-29 17:06:23 -07007038 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7039 *
7040 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7041 * with byte enables disabled on the unused 32-bit word. This is legal
7042 * but causes problems on the AMD 8132 which will eventually stop
7043 * responding after a while.
7044 *
7045 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11007046 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07007047 */
7048 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7049 struct pci_dev *amd_8132 = NULL;
7050
7051 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7052 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7053 amd_8132))) {
Michael Chanf9317a42006-09-29 17:06:23 -07007054
Auke Kok44c10132007-06-08 15:46:36 -07007055 if (amd_8132->revision >= 0x10 &&
7056 amd_8132->revision <= 0x13) {
Michael Chanf9317a42006-09-29 17:06:23 -07007057 disable_msi = 1;
7058 pci_dev_put(amd_8132);
7059 break;
7060 }
7061 }
7062 }
7063
Michael Chandeaf3912007-07-07 22:48:00 -07007064 bnx2_set_default_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07007065 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7066
Michael Chancd339a02005-08-25 15:35:24 -07007067 init_timer(&bp->timer);
7068 bp->timer.expires = RUN_AT(bp->timer_interval);
7069 bp->timer.data = (unsigned long) bp;
7070 bp->timer.function = bnx2_timer;
7071
Michael Chanb6016b72005-05-26 13:03:09 -07007072 return 0;
7073
7074err_out_unmap:
7075 if (bp->regview) {
7076 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07007077 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07007078 }
7079
7080err_out_release:
7081 pci_release_regions(pdev);
7082
7083err_out_disable:
7084 pci_disable_device(pdev);
7085 pci_set_drvdata(pdev, NULL);
7086
7087err_out:
7088 return rc;
7089}
7090
Michael Chan883e5152007-05-03 13:25:11 -07007091static char * __devinit
7092bnx2_bus_string(struct bnx2 *bp, char *str)
7093{
7094 char *s = str;
7095
7096 if (bp->flags & PCIE_FLAG) {
7097 s += sprintf(s, "PCI Express");
7098 } else {
7099 s += sprintf(s, "PCI");
7100 if (bp->flags & PCIX_FLAG)
7101 s += sprintf(s, "-X");
7102 if (bp->flags & PCI_32BIT_FLAG)
7103 s += sprintf(s, " 32-bit");
7104 else
7105 s += sprintf(s, " 64-bit");
7106 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7107 }
7108 return str;
7109}
7110
Michael Chanb6016b72005-05-26 13:03:09 -07007111static int __devinit
Michael Chan35efa7c2007-12-20 19:56:37 -08007112bnx2_init_napi(struct bnx2 *bp)
7113{
7114 struct bnx2_napi *bnapi = &bp->bnx2_napi;
7115
7116 bnapi->bp = bp;
7117 netif_napi_add(bp->dev, &bnapi->napi, bnx2_poll, 64);
7118}
7119
7120static int __devinit
Michael Chanb6016b72005-05-26 13:03:09 -07007121bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7122{
7123 static int version_printed = 0;
7124 struct net_device *dev = NULL;
7125 struct bnx2 *bp;
Joe Perches0795af52007-10-03 17:59:30 -07007126 int rc;
Michael Chan883e5152007-05-03 13:25:11 -07007127 char str[40];
Joe Perches0795af52007-10-03 17:59:30 -07007128 DECLARE_MAC_BUF(mac);
Michael Chanb6016b72005-05-26 13:03:09 -07007129
7130 if (version_printed++ == 0)
7131 printk(KERN_INFO "%s", version);
7132
7133 /* dev zeroed in init_etherdev */
7134 dev = alloc_etherdev(sizeof(*bp));
7135
7136 if (!dev)
7137 return -ENOMEM;
7138
7139 rc = bnx2_init_board(pdev, dev);
7140 if (rc < 0) {
7141 free_netdev(dev);
7142 return rc;
7143 }
7144
7145 dev->open = bnx2_open;
7146 dev->hard_start_xmit = bnx2_start_xmit;
7147 dev->stop = bnx2_close;
7148 dev->get_stats = bnx2_get_stats;
7149 dev->set_multicast_list = bnx2_set_rx_mode;
7150 dev->do_ioctl = bnx2_ioctl;
7151 dev->set_mac_address = bnx2_change_mac_addr;
7152 dev->change_mtu = bnx2_change_mtu;
7153 dev->tx_timeout = bnx2_tx_timeout;
7154 dev->watchdog_timeo = TX_TIMEOUT;
7155#ifdef BCM_VLAN
7156 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07007157#endif
Michael Chanb6016b72005-05-26 13:03:09 -07007158 dev->ethtool_ops = &bnx2_ethtool_ops;
Michael Chanb6016b72005-05-26 13:03:09 -07007159
Michael Chan972ec0d2006-01-23 16:12:43 -08007160 bp = netdev_priv(dev);
Michael Chan35efa7c2007-12-20 19:56:37 -08007161 bnx2_init_napi(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07007162
7163#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7164 dev->poll_controller = poll_bnx2;
7165#endif
7166
Michael Chan1b2f9222007-05-03 13:20:19 -07007167 pci_set_drvdata(pdev, dev);
7168
7169 memcpy(dev->dev_addr, bp->mac_addr, 6);
7170 memcpy(dev->perm_addr, bp->mac_addr, 6);
7171 bp->name = board_info[ent->driver_data].name;
7172
Stephen Hemmingerd212f872007-06-27 00:47:37 -07007173 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan4666f872007-05-03 13:22:28 -07007174 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Stephen Hemmingerd212f872007-06-27 00:47:37 -07007175 dev->features |= NETIF_F_IPV6_CSUM;
7176
Michael Chan1b2f9222007-05-03 13:20:19 -07007177#ifdef BCM_VLAN
7178 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7179#endif
7180 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07007181 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7182 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07007183
Michael Chanb6016b72005-05-26 13:03:09 -07007184 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04007185 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07007186 if (bp->regview)
7187 iounmap(bp->regview);
7188 pci_release_regions(pdev);
7189 pci_disable_device(pdev);
7190 pci_set_drvdata(pdev, NULL);
7191 free_netdev(dev);
7192 return rc;
7193 }
7194
Michael Chan883e5152007-05-03 13:25:11 -07007195 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Joe Perches0795af52007-10-03 17:59:30 -07007196 "IRQ %d, node addr %s\n",
Michael Chanb6016b72005-05-26 13:03:09 -07007197 dev->name,
7198 bp->name,
7199 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7200 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07007201 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07007202 dev->base_addr,
Joe Perches0795af52007-10-03 17:59:30 -07007203 bp->pdev->irq, print_mac(mac, dev->dev_addr));
Michael Chanb6016b72005-05-26 13:03:09 -07007204
Michael Chanb6016b72005-05-26 13:03:09 -07007205 return 0;
7206}
7207
7208static void __devexit
7209bnx2_remove_one(struct pci_dev *pdev)
7210{
7211 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007212 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007213
Michael Chanafdc08b2005-08-25 15:34:29 -07007214 flush_scheduled_work();
7215
Michael Chanb6016b72005-05-26 13:03:09 -07007216 unregister_netdev(dev);
7217
7218 if (bp->regview)
7219 iounmap(bp->regview);
7220
7221 free_netdev(dev);
7222 pci_release_regions(pdev);
7223 pci_disable_device(pdev);
7224 pci_set_drvdata(pdev, NULL);
7225}
7226
7227static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07007228bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07007229{
7230 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007231 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007232 u32 reset_code;
7233
Michael Chan6caebb02007-08-03 20:57:25 -07007234 /* PCI register 4 needs to be saved whether netif_running() or not.
7235 * MSI address and data need to be saved if using MSI and
7236 * netif_running().
7237 */
7238 pci_save_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07007239 if (!netif_running(dev))
7240 return 0;
7241
Michael Chan1d60290f2006-03-20 17:50:08 -08007242 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07007243 bnx2_netif_stop(bp);
7244 netif_device_detach(dev);
7245 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08007246 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07007247 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08007248 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07007249 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7250 else
7251 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7252 bnx2_reset_chip(bp, reset_code);
7253 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07007254 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07007255 return 0;
7256}
7257
7258static int
7259bnx2_resume(struct pci_dev *pdev)
7260{
7261 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08007262 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07007263
Michael Chan6caebb02007-08-03 20:57:25 -07007264 pci_restore_state(pdev);
Michael Chanb6016b72005-05-26 13:03:09 -07007265 if (!netif_running(dev))
7266 return 0;
7267
Pavel Machek829ca9a2005-09-03 15:56:56 -07007268 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07007269 netif_device_attach(dev);
7270 bnx2_init_nic(bp);
7271 bnx2_netif_start(bp);
7272 return 0;
7273}
7274
7275static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07007276 .name = DRV_MODULE_NAME,
7277 .id_table = bnx2_pci_tbl,
7278 .probe = bnx2_init_one,
7279 .remove = __devexit_p(bnx2_remove_one),
7280 .suspend = bnx2_suspend,
7281 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07007282};
7283
7284static int __init bnx2_init(void)
7285{
Jeff Garzik29917622006-08-19 17:48:59 -04007286 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07007287}
7288
7289static void __exit bnx2_cleanup(void)
7290{
7291 pci_unregister_driver(&bnx2_pci_driver);
7292}
7293
7294module_init(bnx2_init);
7295module_exit(bnx2_cleanup);
7296
7297
7298