blob: 56bc41ee9a4703998360a204405a03c16ddf7c3d [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chanb91b9fd2007-06-04 21:24:42 -070057#define DRV_MODULE_VERSION "1.5.11"
58#define DRV_MODULE_RELDATE "June 4, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
553 if (bp->link_up) {
554 u32 bmsr;
555
556 switch (bp->line_speed) {
557 case SPEED_10:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
562 break;
563 case SPEED_100:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
568 break;
569 case SPEED_1000:
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572 else
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574 break;
575 case SPEED_2500:
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578 else
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580 break;
581 }
582
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585 if (bp->autoneg) {
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
Michael Chanca58c3a2007-05-03 13:22:52 -0700588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800590
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594 else
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596 }
597 }
598 else
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
604static void
Michael Chanb6016b72005-05-26 13:03:09 -0700605bnx2_report_link(struct bnx2 *bp)
606{
607 if (bp->link_up) {
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611 printk("%d Mbps ", bp->line_speed);
612
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
615 else
616 printk("half duplex");
617
618 if (bp->flow_ctrl) {
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
623 }
624 else {
625 printk(", transmit ");
626 }
627 printk("flow control ON");
628 }
629 printk("\n");
630 }
631 else {
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634 }
Michael Chane3648b32005-11-04 08:51:21 -0800635
636 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642 u32 local_adv, remote_adv;
643
644 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
650 }
651 return;
652 }
653
654 if (bp->duplex != DUPLEX_FULL) {
655 return;
656 }
657
Michael Chan5b0c76a2005-11-04 08:45:49 -0800658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660 u32 val;
661
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
667 return;
668 }
669
Michael Chanca58c3a2007-05-03 13:22:52 -0700670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700672
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
676
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
688 }
689
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695 }
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
698 }
699 }
700 else {
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703 }
704 }
705 }
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710 bp->flow_ctrl = FLOW_CTRL_TX;
711 }
712 }
713}
714
715static int
Michael Chan27a005b2007-05-03 13:23:41 -0700716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718 u32 val, speed;
719
720 bp->link_up = 1;
721
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
729 return 0;
730 }
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732 switch (speed) {
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
735 break;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
742 break;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
745 break;
746 }
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
749 else
750 bp->duplex = DUPLEX_HALF;
751 return 0;
752}
753
754static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757 u32 val;
758
759 bp->link_up = 1;
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
764 break;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
773 break;
774 }
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
777 else
778 bp->duplex = DUPLEX_HALF;
779
780 return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700785{
786 u32 bmcr, local_adv, remote_adv, common;
787
788 bp->link_up = 1;
789 bp->line_speed = SPEED_1000;
790
Michael Chanca58c3a2007-05-03 13:22:52 -0700791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
794 }
795 else {
796 bp->duplex = DUPLEX_HALF;
797 }
798
799 if (!(bmcr & BMCR_ANENABLE)) {
800 return 0;
801 }
802
Michael Chanca58c3a2007-05-03 13:22:52 -0700803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700805
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
811 }
812 else {
813 bp->duplex = DUPLEX_HALF;
814 }
815 }
816
817 return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823 u32 bmcr;
824
Michael Chanca58c3a2007-05-03 13:22:52 -0700825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
828
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
836 }
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
840 }
841 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700844
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
857 }
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
861 }
862 else {
863 bp->line_speed = 0;
864 bp->link_up = 0;
865 }
866 }
867 }
868 else {
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
871 }
872 else {
873 bp->line_speed = SPEED_10;
874 }
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
877 }
878 else {
879 bp->duplex = DUPLEX_HALF;
880 }
881 }
882
883 return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889 u32 val;
890
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895 }
896
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
899
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800902 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700903
904 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800905 switch (bp->line_speed) {
906 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800909 break;
910 }
911 /* fall through */
912 case SPEED_100:
913 val |= BNX2_EMAC_MODE_PORT_MII;
914 break;
915 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800916 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800917 /* fall through */
918 case SPEED_1000:
919 val |= BNX2_EMAC_MODE_PORT_GMII;
920 break;
921 }
Michael Chanb6016b72005-05-26 13:03:09 -0700922 }
923 else {
924 val |= BNX2_EMAC_MODE_PORT_GMII;
925 }
926
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
931
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950 return 0;
951}
952
Michael Chan27a005b2007-05-03 13:23:41 -0700953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
Michael Chanb6016b72005-05-26 13:03:09 -0700971static int
Michael Chan605a9e22007-05-03 13:23:13 -0700972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974 u32 up1;
975 int ret = 1;
976
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978 return 0;
979
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
982
Michael Chan27a005b2007-05-03 13:23:41 -0700983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
Michael Chan605a9e22007-05-03 13:23:13 -0700986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
990 ret = 0;
991 }
992
Michael Chan27a005b2007-05-03 13:23:41 -0700993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
Michael Chan605a9e22007-05-03 13:23:13 -0700997 return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003 u32 up1;
1004 int ret = 0;
1005
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007 return 0;
1008
Michael Chan27a005b2007-05-03 13:23:41 -07001009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
Michael Chan605a9e22007-05-03 13:23:13 -07001012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 1;
1017 }
1018
Michael Chan27a005b2007-05-03 13:23:41 -07001019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
Michael Chan605a9e22007-05-03 13:23:13 -07001023 return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029 u32 bmcr;
1030
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032 return;
1033
Michael Chan27a005b2007-05-03 13:23:41 -07001034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035 u32 val;
1036
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051 }
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1057 }
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064 u32 bmcr;
1065
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067 return;
1068
Michael Chan27a005b2007-05-03 13:23:41 -07001069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070 u32 val;
1071
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085 }
1086
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
1092static int
Michael Chanb6016b72005-05-26 13:03:09 -07001093bnx2_set_link(struct bnx2 *bp)
1094{
1095 u32 bmsr;
1096 u8 link_up;
1097
Michael Chan80be4432006-11-19 14:07:28 -08001098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001099 bp->link_up = 1;
1100 return 0;
1101 }
1102
1103 link_up = bp->link_up;
1104
Michael Chan27a005b2007-05-03 13:23:41 -07001105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001109
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112 u32 val;
1113
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1117 else
1118 bmsr &= ~BMSR_LSTATUS;
1119 }
1120
1121 if (bmsr & BMSR_LSTATUS) {
1122 bp->link_up = 1;
1123
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001131 }
1132 else {
1133 bnx2_copper_linkup(bp);
1134 }
1135 bnx2_resolve_flow_ctrl(bp);
1136 }
1137 else {
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001141
Michael Chanb6016b72005-05-26 13:03:09 -07001142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143 bp->link_up = 0;
1144 }
1145
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1148 }
1149
1150 bnx2_set_mac_link(bp);
1151
1152 return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158 int i;
1159 u32 reg;
1160
Michael Chanca58c3a2007-05-03 13:22:52 -07001161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163#define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165 udelay(10);
1166
Michael Chanca58c3a2007-05-03 13:22:52 -07001167 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001168 if (!(reg & BMCR_RESET)) {
1169 udelay(20);
1170 break;
1171 }
1172 }
1173 if (i == PHY_RESET_MAX_WAIT) {
1174 return -EBUSY;
1175 }
1176 return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182 u32 adv = 0;
1183
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1189 }
1190 else {
1191 adv = ADVERTISE_PAUSE_CAP;
1192 }
1193 }
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1197 }
1198 else {
1199 adv = ADVERTISE_PAUSE_ASYM;
1200 }
1201 }
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208 }
1209 }
1210 return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
Michael Chan605a9e22007-05-03 13:23:13 -07001216 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001217 u32 new_adv = 0;
1218
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001221 int force_link_down = 0;
1222
Michael Chan605a9e22007-05-03 13:23:13 -07001223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1229 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001230 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
Michael Chanca58c3a2007-05-03 13:22:52 -07001233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001234 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001235 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001236
Michael Chan27a005b2007-05-03 13:23:41 -07001237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1243 }
1244
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248 else
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001250 }
1251
Michael Chanb6016b72005-05-26 13:03:09 -07001252 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001253 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001254 new_bmcr |= BMCR_FULLDPLX;
1255 }
1256 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001257 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001258 new_bmcr &= ~BMCR_FULLDPLX;
1259 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001260 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001261 /* Force a link down visible on the other side */
1262 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001263 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001267 BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269 bp->link_up = 0;
1270 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001272 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001273 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001276 } else {
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001279 }
1280 return 0;
1281 }
1282
Michael Chan605a9e22007-05-03 13:23:13 -07001283 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001284
Michael Chanb6016b72005-05-26 13:03:09 -07001285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1287
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1289
Michael Chanca58c3a2007-05-03 13:22:52 -07001290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001292
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1296 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001298 spin_unlock_bh(&bp->phy_lock);
1299 msleep(20);
1300 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001301 }
1302
Michael Chanca58c3a2007-05-03 13:22:52 -07001303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001305 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1313 */
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001317 } else {
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001320 }
1321
1322 return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED \
Michael Chandeaf3912007-07-07 22:48:00 -07001326 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1327 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1328 (ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07001329
1330#define ETHTOOL_ALL_COPPER_SPEED \
1331 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1332 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1333 ADVERTISED_1000baseT_Full)
1334
1335#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1336 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001337
Michael Chanb6016b72005-05-26 13:03:09 -07001338#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1339
Michael Chandeaf3912007-07-07 22:48:00 -07001340static void
1341bnx2_set_default_link(struct bnx2 *bp)
1342{
1343 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1344 bp->req_line_speed = 0;
1345 if (bp->phy_flags & PHY_SERDES_FLAG) {
1346 u32 reg;
1347
1348 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1349
1350 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1351 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1352 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1353 bp->autoneg = 0;
1354 bp->req_line_speed = bp->line_speed = SPEED_1000;
1355 bp->req_duplex = DUPLEX_FULL;
1356 }
1357 } else
1358 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1359}
1360
Michael Chanb6016b72005-05-26 13:03:09 -07001361static int
1362bnx2_setup_copper_phy(struct bnx2 *bp)
1363{
1364 u32 bmcr;
1365 u32 new_bmcr;
1366
Michael Chanca58c3a2007-05-03 13:22:52 -07001367 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001368
1369 if (bp->autoneg & AUTONEG_SPEED) {
1370 u32 adv_reg, adv1000_reg;
1371 u32 new_adv_reg = 0;
1372 u32 new_adv1000_reg = 0;
1373
Michael Chanca58c3a2007-05-03 13:22:52 -07001374 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001375 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1376 ADVERTISE_PAUSE_ASYM);
1377
1378 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1379 adv1000_reg &= PHY_ALL_1000_SPEED;
1380
1381 if (bp->advertising & ADVERTISED_10baseT_Half)
1382 new_adv_reg |= ADVERTISE_10HALF;
1383 if (bp->advertising & ADVERTISED_10baseT_Full)
1384 new_adv_reg |= ADVERTISE_10FULL;
1385 if (bp->advertising & ADVERTISED_100baseT_Half)
1386 new_adv_reg |= ADVERTISE_100HALF;
1387 if (bp->advertising & ADVERTISED_100baseT_Full)
1388 new_adv_reg |= ADVERTISE_100FULL;
1389 if (bp->advertising & ADVERTISED_1000baseT_Full)
1390 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001391
Michael Chanb6016b72005-05-26 13:03:09 -07001392 new_adv_reg |= ADVERTISE_CSMA;
1393
1394 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1395
1396 if ((adv1000_reg != new_adv1000_reg) ||
1397 (adv_reg != new_adv_reg) ||
1398 ((bmcr & BMCR_ANENABLE) == 0)) {
1399
Michael Chanca58c3a2007-05-03 13:22:52 -07001400 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001401 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001403 BMCR_ANENABLE);
1404 }
1405 else if (bp->link_up) {
1406 /* Flow ctrl may have changed from auto to forced */
1407 /* or vice-versa. */
1408
1409 bnx2_resolve_flow_ctrl(bp);
1410 bnx2_set_mac_link(bp);
1411 }
1412 return 0;
1413 }
1414
1415 new_bmcr = 0;
1416 if (bp->req_line_speed == SPEED_100) {
1417 new_bmcr |= BMCR_SPEED100;
1418 }
1419 if (bp->req_duplex == DUPLEX_FULL) {
1420 new_bmcr |= BMCR_FULLDPLX;
1421 }
1422 if (new_bmcr != bmcr) {
1423 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001424
Michael Chanca58c3a2007-05-03 13:22:52 -07001425 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1426 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001427
Michael Chanb6016b72005-05-26 13:03:09 -07001428 if (bmsr & BMSR_LSTATUS) {
1429 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001430 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001431 spin_unlock_bh(&bp->phy_lock);
1432 msleep(50);
1433 spin_lock_bh(&bp->phy_lock);
1434
Michael Chanca58c3a2007-05-03 13:22:52 -07001435 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1436 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001437 }
1438
Michael Chanca58c3a2007-05-03 13:22:52 -07001439 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001440
1441 /* Normally, the new speed is setup after the link has
1442 * gone down and up again. In some cases, link will not go
1443 * down so we need to set up the new speed here.
1444 */
1445 if (bmsr & BMSR_LSTATUS) {
1446 bp->line_speed = bp->req_line_speed;
1447 bp->duplex = bp->req_duplex;
1448 bnx2_resolve_flow_ctrl(bp);
1449 bnx2_set_mac_link(bp);
1450 }
Michael Chan27a005b2007-05-03 13:23:41 -07001451 } else {
1452 bnx2_resolve_flow_ctrl(bp);
1453 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001454 }
1455 return 0;
1456}
1457
1458static int
1459bnx2_setup_phy(struct bnx2 *bp)
1460{
1461 if (bp->loopback == MAC_LOOPBACK)
1462 return 0;
1463
1464 if (bp->phy_flags & PHY_SERDES_FLAG) {
1465 return (bnx2_setup_serdes_phy(bp));
1466 }
1467 else {
1468 return (bnx2_setup_copper_phy(bp));
1469 }
1470}
1471
1472static int
Michael Chan27a005b2007-05-03 13:23:41 -07001473bnx2_init_5709s_phy(struct bnx2 *bp)
1474{
1475 u32 val;
1476
1477 bp->mii_bmcr = MII_BMCR + 0x10;
1478 bp->mii_bmsr = MII_BMSR + 0x10;
1479 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1480 bp->mii_adv = MII_ADVERTISE + 0x10;
1481 bp->mii_lpa = MII_LPA + 0x10;
1482 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1483
1484 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1485 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1488 bnx2_reset_phy(bp);
1489
1490 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1491
1492 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1493 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1494 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1495 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1496
1497 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1498 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1499 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1500 val |= BCM5708S_UP1_2G5;
1501 else
1502 val &= ~BCM5708S_UP1_2G5;
1503 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1504
1505 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1506 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1507 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1508 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1509
1510 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1511
1512 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1513 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1514 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1515
1516 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1517
1518 return 0;
1519}
1520
1521static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001522bnx2_init_5708s_phy(struct bnx2 *bp)
1523{
1524 u32 val;
1525
Michael Chan27a005b2007-05-03 13:23:41 -07001526 bnx2_reset_phy(bp);
1527
1528 bp->mii_up1 = BCM5708S_UP1;
1529
Michael Chan5b0c76a2005-11-04 08:45:49 -08001530 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1531 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1532 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1533
1534 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1535 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1536 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1537
1538 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1539 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1540 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1541
1542 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1543 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1544 val |= BCM5708S_UP1_2G5;
1545 bnx2_write_phy(bp, BCM5708S_UP1, val);
1546 }
1547
1548 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001549 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1550 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001551 /* increase tx signal amplitude */
1552 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1553 BCM5708S_BLK_ADDR_TX_MISC);
1554 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1555 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1556 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1557 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1558 }
1559
Michael Chane3648b32005-11-04 08:51:21 -08001560 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001561 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1562
1563 if (val) {
1564 u32 is_backplane;
1565
Michael Chane3648b32005-11-04 08:51:21 -08001566 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001567 BNX2_SHARED_HW_CFG_CONFIG);
1568 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1569 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1570 BCM5708S_BLK_ADDR_TX_MISC);
1571 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1572 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1573 BCM5708S_BLK_ADDR_DIG);
1574 }
1575 }
1576 return 0;
1577}
1578
1579static int
1580bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001581{
Michael Chan27a005b2007-05-03 13:23:41 -07001582 bnx2_reset_phy(bp);
1583
Michael Chanb6016b72005-05-26 13:03:09 -07001584 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1585
Michael Chan59b47d82006-11-19 14:10:45 -08001586 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1587 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001588
1589 if (bp->dev->mtu > 1500) {
1590 u32 val;
1591
1592 /* Set extended packet length bit */
1593 bnx2_write_phy(bp, 0x18, 0x7);
1594 bnx2_read_phy(bp, 0x18, &val);
1595 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1596
1597 bnx2_write_phy(bp, 0x1c, 0x6c00);
1598 bnx2_read_phy(bp, 0x1c, &val);
1599 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1600 }
1601 else {
1602 u32 val;
1603
1604 bnx2_write_phy(bp, 0x18, 0x7);
1605 bnx2_read_phy(bp, 0x18, &val);
1606 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1607
1608 bnx2_write_phy(bp, 0x1c, 0x6c00);
1609 bnx2_read_phy(bp, 0x1c, &val);
1610 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1611 }
1612
1613 return 0;
1614}
1615
1616static int
1617bnx2_init_copper_phy(struct bnx2 *bp)
1618{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001619 u32 val;
1620
Michael Chan27a005b2007-05-03 13:23:41 -07001621 bnx2_reset_phy(bp);
1622
Michael Chanb6016b72005-05-26 13:03:09 -07001623 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1624 bnx2_write_phy(bp, 0x18, 0x0c00);
1625 bnx2_write_phy(bp, 0x17, 0x000a);
1626 bnx2_write_phy(bp, 0x15, 0x310b);
1627 bnx2_write_phy(bp, 0x17, 0x201f);
1628 bnx2_write_phy(bp, 0x15, 0x9506);
1629 bnx2_write_phy(bp, 0x17, 0x401f);
1630 bnx2_write_phy(bp, 0x15, 0x14e2);
1631 bnx2_write_phy(bp, 0x18, 0x0400);
1632 }
1633
Michael Chanb659f442007-02-02 00:46:35 -08001634 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1635 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1636 MII_BNX2_DSP_EXPAND_REG | 0x8);
1637 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1638 val &= ~(1 << 8);
1639 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1640 }
1641
Michael Chanb6016b72005-05-26 13:03:09 -07001642 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001643 /* Set extended packet length bit */
1644 bnx2_write_phy(bp, 0x18, 0x7);
1645 bnx2_read_phy(bp, 0x18, &val);
1646 bnx2_write_phy(bp, 0x18, val | 0x4000);
1647
1648 bnx2_read_phy(bp, 0x10, &val);
1649 bnx2_write_phy(bp, 0x10, val | 0x1);
1650 }
1651 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001652 bnx2_write_phy(bp, 0x18, 0x7);
1653 bnx2_read_phy(bp, 0x18, &val);
1654 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1655
1656 bnx2_read_phy(bp, 0x10, &val);
1657 bnx2_write_phy(bp, 0x10, val & ~0x1);
1658 }
1659
Michael Chan5b0c76a2005-11-04 08:45:49 -08001660 /* ethernet@wirespeed */
1661 bnx2_write_phy(bp, 0x18, 0x7007);
1662 bnx2_read_phy(bp, 0x18, &val);
1663 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001664 return 0;
1665}
1666
1667
1668static int
1669bnx2_init_phy(struct bnx2 *bp)
1670{
1671 u32 val;
1672 int rc = 0;
1673
1674 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1675 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1676
Michael Chanca58c3a2007-05-03 13:22:52 -07001677 bp->mii_bmcr = MII_BMCR;
1678 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001679 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001680 bp->mii_adv = MII_ADVERTISE;
1681 bp->mii_lpa = MII_LPA;
1682
Michael Chanb6016b72005-05-26 13:03:09 -07001683 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1684
Michael Chanb6016b72005-05-26 13:03:09 -07001685 bnx2_read_phy(bp, MII_PHYSID1, &val);
1686 bp->phy_id = val << 16;
1687 bnx2_read_phy(bp, MII_PHYSID2, &val);
1688 bp->phy_id |= val & 0xffff;
1689
1690 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001691 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1692 rc = bnx2_init_5706s_phy(bp);
1693 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1694 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001695 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1696 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001697 }
1698 else {
1699 rc = bnx2_init_copper_phy(bp);
1700 }
1701
1702 bnx2_setup_phy(bp);
1703
1704 return rc;
1705}
1706
1707static int
1708bnx2_set_mac_loopback(struct bnx2 *bp)
1709{
1710 u32 mac_mode;
1711
1712 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1713 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1714 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1715 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1716 bp->link_up = 1;
1717 return 0;
1718}
1719
Michael Chanbc5a0692006-01-23 16:13:22 -08001720static int bnx2_test_link(struct bnx2 *);
1721
1722static int
1723bnx2_set_phy_loopback(struct bnx2 *bp)
1724{
1725 u32 mac_mode;
1726 int rc, i;
1727
1728 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001729 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001730 BMCR_SPEED1000);
1731 spin_unlock_bh(&bp->phy_lock);
1732 if (rc)
1733 return rc;
1734
1735 for (i = 0; i < 10; i++) {
1736 if (bnx2_test_link(bp) == 0)
1737 break;
Michael Chan80be4432006-11-19 14:07:28 -08001738 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001739 }
1740
1741 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1742 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1743 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001744 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001745
1746 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1747 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1748 bp->link_up = 1;
1749 return 0;
1750}
1751
Michael Chanb6016b72005-05-26 13:03:09 -07001752static int
Michael Chanb090ae22006-01-23 16:07:10 -08001753bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001754{
1755 int i;
1756 u32 val;
1757
Michael Chanb6016b72005-05-26 13:03:09 -07001758 bp->fw_wr_seq++;
1759 msg_data |= bp->fw_wr_seq;
1760
Michael Chane3648b32005-11-04 08:51:21 -08001761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001762
1763 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001764 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1765 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001766
Michael Chane3648b32005-11-04 08:51:21 -08001767 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001768
1769 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1770 break;
1771 }
Michael Chanb090ae22006-01-23 16:07:10 -08001772 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1773 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001774
1775 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001776 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1777 if (!silent)
1778 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1779 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001780
1781 msg_data &= ~BNX2_DRV_MSG_CODE;
1782 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1783
Michael Chane3648b32005-11-04 08:51:21 -08001784 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001785
Michael Chanb6016b72005-05-26 13:03:09 -07001786 return -EBUSY;
1787 }
1788
Michael Chanb090ae22006-01-23 16:07:10 -08001789 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1790 return -EIO;
1791
Michael Chanb6016b72005-05-26 13:03:09 -07001792 return 0;
1793}
1794
Michael Chan59b47d82006-11-19 14:10:45 -08001795static int
1796bnx2_init_5709_context(struct bnx2 *bp)
1797{
1798 int i, ret = 0;
1799 u32 val;
1800
1801 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1802 val |= (BCM_PAGE_BITS - 8) << 16;
1803 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07001804 for (i = 0; i < 10; i++) {
1805 val = REG_RD(bp, BNX2_CTX_COMMAND);
1806 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
1807 break;
1808 udelay(2);
1809 }
1810 if (val & BNX2_CTX_COMMAND_MEM_INIT)
1811 return -EBUSY;
1812
Michael Chan59b47d82006-11-19 14:10:45 -08001813 for (i = 0; i < bp->ctx_pages; i++) {
1814 int j;
1815
1816 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1817 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1818 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1819 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1820 (u64) bp->ctx_blk_mapping[i] >> 32);
1821 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1822 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1823 for (j = 0; j < 10; j++) {
1824
1825 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1826 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1827 break;
1828 udelay(5);
1829 }
1830 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1831 ret = -EBUSY;
1832 break;
1833 }
1834 }
1835 return ret;
1836}
1837
Michael Chanb6016b72005-05-26 13:03:09 -07001838static void
1839bnx2_init_context(struct bnx2 *bp)
1840{
1841 u32 vcid;
1842
1843 vcid = 96;
1844 while (vcid) {
1845 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07001846 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07001847
1848 vcid--;
1849
1850 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1851 u32 new_vcid;
1852
1853 vcid_addr = GET_PCID_ADDR(vcid);
1854 if (vcid & 0x8) {
1855 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1856 }
1857 else {
1858 new_vcid = vcid;
1859 }
1860 pcid_addr = GET_PCID_ADDR(new_vcid);
1861 }
1862 else {
1863 vcid_addr = GET_CID_ADDR(vcid);
1864 pcid_addr = vcid_addr;
1865 }
1866
Michael Chan7947b202007-06-04 21:17:10 -07001867 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
1868 vcid_addr += (i << PHY_CTX_SHIFT);
1869 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07001870
Michael Chan7947b202007-06-04 21:17:10 -07001871 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1872 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1873
1874 /* Zero out the context. */
1875 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
1876 CTX_WR(bp, 0x00, offset, 0);
1877
1878 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1879 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
Michael Chanb6016b72005-05-26 13:03:09 -07001880 }
Michael Chanb6016b72005-05-26 13:03:09 -07001881 }
1882}
1883
1884static int
1885bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1886{
1887 u16 *good_mbuf;
1888 u32 good_mbuf_cnt;
1889 u32 val;
1890
1891 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1892 if (good_mbuf == NULL) {
1893 printk(KERN_ERR PFX "Failed to allocate memory in "
1894 "bnx2_alloc_bad_rbuf\n");
1895 return -ENOMEM;
1896 }
1897
1898 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1899 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1900
1901 good_mbuf_cnt = 0;
1902
1903 /* Allocate a bunch of mbufs and save the good ones in an array. */
1904 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1905 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1906 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1907
1908 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1909
1910 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1911
1912 /* The addresses with Bit 9 set are bad memory blocks. */
1913 if (!(val & (1 << 9))) {
1914 good_mbuf[good_mbuf_cnt] = (u16) val;
1915 good_mbuf_cnt++;
1916 }
1917
1918 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1919 }
1920
1921 /* Free the good ones back to the mbuf pool thus discarding
1922 * all the bad ones. */
1923 while (good_mbuf_cnt) {
1924 good_mbuf_cnt--;
1925
1926 val = good_mbuf[good_mbuf_cnt];
1927 val = (val << 9) | val | 1;
1928
1929 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1930 }
1931 kfree(good_mbuf);
1932 return 0;
1933}
1934
1935static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001936bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001937{
1938 u32 val;
1939 u8 *mac_addr = bp->dev->dev_addr;
1940
1941 val = (mac_addr[0] << 8) | mac_addr[1];
1942
1943 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1944
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001945 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001946 (mac_addr[4] << 8) | mac_addr[5];
1947
1948 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1949}
1950
1951static inline int
1952bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1953{
1954 struct sk_buff *skb;
1955 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1956 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001957 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001958 unsigned long align;
1959
Michael Chan932f3772006-08-15 01:39:36 -07001960 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001961 if (skb == NULL) {
1962 return -ENOMEM;
1963 }
1964
Michael Chan59b47d82006-11-19 14:10:45 -08001965 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1966 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001967
Michael Chanb6016b72005-05-26 13:03:09 -07001968 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1969 PCI_DMA_FROMDEVICE);
1970
1971 rx_buf->skb = skb;
1972 pci_unmap_addr_set(rx_buf, mapping, mapping);
1973
1974 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1975 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1976
1977 bp->rx_prod_bseq += bp->rx_buf_use_size;
1978
1979 return 0;
1980}
1981
Michael Chanda3e4fb2007-05-03 13:24:23 -07001982static int
1983bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1984{
1985 struct status_block *sblk = bp->status_blk;
1986 u32 new_link_state, old_link_state;
1987 int is_set = 1;
1988
1989 new_link_state = sblk->status_attn_bits & event;
1990 old_link_state = sblk->status_attn_bits_ack & event;
1991 if (new_link_state != old_link_state) {
1992 if (new_link_state)
1993 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1994 else
1995 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1996 } else
1997 is_set = 0;
1998
1999 return is_set;
2000}
2001
Michael Chanb6016b72005-05-26 13:03:09 -07002002static void
2003bnx2_phy_int(struct bnx2 *bp)
2004{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002005 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2006 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002007 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002008 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002009 }
2010}
2011
2012static void
2013bnx2_tx_int(struct bnx2 *bp)
2014{
Michael Chanf4e418f2005-11-04 08:53:48 -08002015 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002016 u16 hw_cons, sw_cons, sw_ring_cons;
2017 int tx_free_bd = 0;
2018
Michael Chanf4e418f2005-11-04 08:53:48 -08002019 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002020 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2021 hw_cons++;
2022 }
2023 sw_cons = bp->tx_cons;
2024
2025 while (sw_cons != hw_cons) {
2026 struct sw_bd *tx_buf;
2027 struct sk_buff *skb;
2028 int i, last;
2029
2030 sw_ring_cons = TX_RING_IDX(sw_cons);
2031
2032 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2033 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002034
Michael Chanb6016b72005-05-26 13:03:09 -07002035 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002036 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002037 u16 last_idx, last_ring_idx;
2038
2039 last_idx = sw_cons +
2040 skb_shinfo(skb)->nr_frags + 1;
2041 last_ring_idx = sw_ring_cons +
2042 skb_shinfo(skb)->nr_frags + 1;
2043 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2044 last_idx++;
2045 }
2046 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2047 break;
2048 }
2049 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002050
Michael Chanb6016b72005-05-26 13:03:09 -07002051 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2052 skb_headlen(skb), PCI_DMA_TODEVICE);
2053
2054 tx_buf->skb = NULL;
2055 last = skb_shinfo(skb)->nr_frags;
2056
2057 for (i = 0; i < last; i++) {
2058 sw_cons = NEXT_TX_BD(sw_cons);
2059
2060 pci_unmap_page(bp->pdev,
2061 pci_unmap_addr(
2062 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2063 mapping),
2064 skb_shinfo(skb)->frags[i].size,
2065 PCI_DMA_TODEVICE);
2066 }
2067
2068 sw_cons = NEXT_TX_BD(sw_cons);
2069
2070 tx_free_bd += last + 1;
2071
Michael Chan745720e2006-06-29 12:37:41 -07002072 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002073
Michael Chanf4e418f2005-11-04 08:53:48 -08002074 hw_cons = bp->hw_tx_cons =
2075 sblk->status_tx_quick_consumer_index0;
2076
Michael Chanb6016b72005-05-26 13:03:09 -07002077 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2078 hw_cons++;
2079 }
2080 }
2081
Michael Chane89bbf12005-08-25 15:36:58 -07002082 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002083 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2084 * before checking for netif_queue_stopped(). Without the
2085 * memory barrier, there is a small possibility that bnx2_start_xmit()
2086 * will miss it and cause the queue to be stopped forever.
2087 */
2088 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002089
Michael Chan2f8af122006-08-15 01:39:10 -07002090 if (unlikely(netif_queue_stopped(bp->dev)) &&
2091 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2092 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002093 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002094 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002095 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002096 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002097 }
Michael Chanb6016b72005-05-26 13:03:09 -07002098}
2099
2100static inline void
2101bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2102 u16 cons, u16 prod)
2103{
Michael Chan236b6392006-03-20 17:49:02 -08002104 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2105 struct rx_bd *cons_bd, *prod_bd;
2106
2107 cons_rx_buf = &bp->rx_buf_ring[cons];
2108 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002109
2110 pci_dma_sync_single_for_device(bp->pdev,
2111 pci_unmap_addr(cons_rx_buf, mapping),
2112 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2113
Michael Chan236b6392006-03-20 17:49:02 -08002114 bp->rx_prod_bseq += bp->rx_buf_use_size;
2115
2116 prod_rx_buf->skb = skb;
2117
2118 if (cons == prod)
2119 return;
2120
Michael Chanb6016b72005-05-26 13:03:09 -07002121 pci_unmap_addr_set(prod_rx_buf, mapping,
2122 pci_unmap_addr(cons_rx_buf, mapping));
2123
Michael Chan3fdfcc22006-03-20 17:49:49 -08002124 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2125 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002126 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2127 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002128}
2129
2130static int
2131bnx2_rx_int(struct bnx2 *bp, int budget)
2132{
Michael Chanf4e418f2005-11-04 08:53:48 -08002133 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002134 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2135 struct l2_fhdr *rx_hdr;
2136 int rx_pkt = 0;
2137
Michael Chanf4e418f2005-11-04 08:53:48 -08002138 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002139 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2140 hw_cons++;
2141 }
2142 sw_cons = bp->rx_cons;
2143 sw_prod = bp->rx_prod;
2144
2145 /* Memory barrier necessary as speculative reads of the rx
2146 * buffer can be ahead of the index in the status block
2147 */
2148 rmb();
2149 while (sw_cons != hw_cons) {
2150 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002151 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002152 struct sw_bd *rx_buf;
2153 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002154 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002155
2156 sw_ring_cons = RX_RING_IDX(sw_cons);
2157 sw_ring_prod = RX_RING_IDX(sw_prod);
2158
2159 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2160 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002161
2162 rx_buf->skb = NULL;
2163
2164 dma_addr = pci_unmap_addr(rx_buf, mapping);
2165
2166 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002167 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2168
2169 rx_hdr = (struct l2_fhdr *) skb->data;
2170 len = rx_hdr->l2_fhdr_pkt_len - 4;
2171
Michael Chanade2bfe2006-01-23 16:09:51 -08002172 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002173 (L2_FHDR_ERRORS_BAD_CRC |
2174 L2_FHDR_ERRORS_PHY_DECODE |
2175 L2_FHDR_ERRORS_ALIGNMENT |
2176 L2_FHDR_ERRORS_TOO_SHORT |
2177 L2_FHDR_ERRORS_GIANT_FRAME)) {
2178
2179 goto reuse_rx;
2180 }
2181
2182 /* Since we don't have a jumbo ring, copy small packets
2183 * if mtu > 1500
2184 */
2185 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2186 struct sk_buff *new_skb;
2187
Michael Chan932f3772006-08-15 01:39:36 -07002188 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002189 if (new_skb == NULL)
2190 goto reuse_rx;
2191
2192 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002193 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2194 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002195 skb_reserve(new_skb, 2);
2196 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002197
2198 bnx2_reuse_rx_skb(bp, skb,
2199 sw_ring_cons, sw_ring_prod);
2200
2201 skb = new_skb;
2202 }
2203 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002204 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002205 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2206
2207 skb_reserve(skb, bp->rx_offset);
2208 skb_put(skb, len);
2209 }
2210 else {
2211reuse_rx:
2212 bnx2_reuse_rx_skb(bp, skb,
2213 sw_ring_cons, sw_ring_prod);
2214 goto next_rx;
2215 }
2216
2217 skb->protocol = eth_type_trans(skb, bp->dev);
2218
2219 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002220 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002221
Michael Chan745720e2006-06-29 12:37:41 -07002222 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002223 goto next_rx;
2224
2225 }
2226
Michael Chanb6016b72005-05-26 13:03:09 -07002227 skb->ip_summed = CHECKSUM_NONE;
2228 if (bp->rx_csum &&
2229 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2230 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2231
Michael Chanade2bfe2006-01-23 16:09:51 -08002232 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2233 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002234 skb->ip_summed = CHECKSUM_UNNECESSARY;
2235 }
2236
2237#ifdef BCM_VLAN
2238 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2239 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2240 rx_hdr->l2_fhdr_vlan_tag);
2241 }
2242 else
2243#endif
2244 netif_receive_skb(skb);
2245
2246 bp->dev->last_rx = jiffies;
2247 rx_pkt++;
2248
2249next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002250 sw_cons = NEXT_RX_BD(sw_cons);
2251 sw_prod = NEXT_RX_BD(sw_prod);
2252
2253 if ((rx_pkt == budget))
2254 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002255
2256 /* Refresh hw_cons to see if there is new work */
2257 if (sw_cons == hw_cons) {
2258 hw_cons = bp->hw_rx_cons =
2259 sblk->status_rx_quick_consumer_index0;
2260 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2261 hw_cons++;
2262 rmb();
2263 }
Michael Chanb6016b72005-05-26 13:03:09 -07002264 }
2265 bp->rx_cons = sw_cons;
2266 bp->rx_prod = sw_prod;
2267
2268 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2269
2270 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2271
2272 mmiowb();
2273
2274 return rx_pkt;
2275
2276}
2277
2278/* MSI ISR - The only difference between this and the INTx ISR
2279 * is that the MSI interrupt is always serviced.
2280 */
2281static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002282bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002283{
2284 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002285 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002286
Michael Chanc921e4c2005-09-08 13:15:32 -07002287 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002288 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2289 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2290 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2291
2292 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002293 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2294 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002295
Michael Chan73eef4c2005-08-25 15:39:15 -07002296 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002297
Michael Chan73eef4c2005-08-25 15:39:15 -07002298 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002299}
2300
2301static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002302bnx2_msi_1shot(int irq, void *dev_instance)
2303{
2304 struct net_device *dev = dev_instance;
2305 struct bnx2 *bp = netdev_priv(dev);
2306
2307 prefetch(bp->status_blk);
2308
2309 /* Return here if interrupt is disabled. */
2310 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2311 return IRQ_HANDLED;
2312
2313 netif_rx_schedule(dev);
2314
2315 return IRQ_HANDLED;
2316}
2317
2318static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002319bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002320{
2321 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002322 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002323
2324 /* When using INTx, it is possible for the interrupt to arrive
2325 * at the CPU before the status block posted prior to the
2326 * interrupt. Reading a register will flush the status block.
2327 * When using MSI, the MSI message will always complete after
2328 * the status block write.
2329 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002330 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002331 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2332 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002333 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002334
2335 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2336 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2337 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2338
2339 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002340 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2341 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002342
Michael Chan73eef4c2005-08-25 15:39:15 -07002343 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002344
Michael Chan73eef4c2005-08-25 15:39:15 -07002345 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002346}
2347
Michael Chanda3e4fb2007-05-03 13:24:23 -07002348#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2349
Michael Chanf4e418f2005-11-04 08:53:48 -08002350static inline int
2351bnx2_has_work(struct bnx2 *bp)
2352{
2353 struct status_block *sblk = bp->status_blk;
2354
2355 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2356 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2357 return 1;
2358
Michael Chanda3e4fb2007-05-03 13:24:23 -07002359 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2360 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002361 return 1;
2362
2363 return 0;
2364}
2365
Michael Chanb6016b72005-05-26 13:03:09 -07002366static int
2367bnx2_poll(struct net_device *dev, int *budget)
2368{
Michael Chan972ec0d2006-01-23 16:12:43 -08002369 struct bnx2 *bp = netdev_priv(dev);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002370 struct status_block *sblk = bp->status_blk;
2371 u32 status_attn_bits = sblk->status_attn_bits;
2372 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002373
Michael Chanda3e4fb2007-05-03 13:24:23 -07002374 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2375 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002376
Michael Chanb6016b72005-05-26 13:03:09 -07002377 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002378
2379 /* This is needed to take care of transient status
2380 * during link changes.
2381 */
2382 REG_WR(bp, BNX2_HC_COMMAND,
2383 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2384 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002385 }
2386
Michael Chanf4e418f2005-11-04 08:53:48 -08002387 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002388 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002389
Michael Chanf4e418f2005-11-04 08:53:48 -08002390 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002391 int orig_budget = *budget;
2392 int work_done;
2393
2394 if (orig_budget > dev->quota)
2395 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002396
Michael Chanb6016b72005-05-26 13:03:09 -07002397 work_done = bnx2_rx_int(bp, orig_budget);
2398 *budget -= work_done;
2399 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002400 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002401
Michael Chanf4e418f2005-11-04 08:53:48 -08002402 bp->last_status_idx = bp->status_blk->status_idx;
2403 rmb();
2404
2405 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002406 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002407 if (likely(bp->flags & USING_MSI_FLAG)) {
2408 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2409 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2410 bp->last_status_idx);
2411 return 0;
2412 }
Michael Chanb6016b72005-05-26 13:03:09 -07002413 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002414 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2415 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2416 bp->last_status_idx);
2417
2418 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2419 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2420 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002421 return 0;
2422 }
2423
2424 return 1;
2425}
2426
Herbert Xu932ff272006-06-09 12:20:56 -07002427/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002428 * from set_multicast.
2429 */
2430static void
2431bnx2_set_rx_mode(struct net_device *dev)
2432{
Michael Chan972ec0d2006-01-23 16:12:43 -08002433 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002434 u32 rx_mode, sort_mode;
2435 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002436
Michael Chanc770a652005-08-25 15:38:39 -07002437 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002438
2439 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2440 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2441 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2442#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002443 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002444 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002445#else
Michael Chane29054f2006-01-23 16:06:06 -08002446 if (!(bp->flags & ASF_ENABLE_FLAG))
2447 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002448#endif
2449 if (dev->flags & IFF_PROMISC) {
2450 /* Promiscuous mode. */
2451 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002452 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2453 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002454 }
2455 else if (dev->flags & IFF_ALLMULTI) {
2456 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2457 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2458 0xffffffff);
2459 }
2460 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2461 }
2462 else {
2463 /* Accept one or more multicast(s). */
2464 struct dev_mc_list *mclist;
2465 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2466 u32 regidx;
2467 u32 bit;
2468 u32 crc;
2469
2470 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2471
2472 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2473 i++, mclist = mclist->next) {
2474
2475 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2476 bit = crc & 0xff;
2477 regidx = (bit & 0xe0) >> 5;
2478 bit &= 0x1f;
2479 mc_filter[regidx] |= (1 << bit);
2480 }
2481
2482 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2483 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2484 mc_filter[i]);
2485 }
2486
2487 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2488 }
2489
2490 if (rx_mode != bp->rx_mode) {
2491 bp->rx_mode = rx_mode;
2492 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2493 }
2494
2495 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2496 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2497 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2498
Michael Chanc770a652005-08-25 15:38:39 -07002499 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002500}
2501
Michael Chanfba9fe92006-06-12 22:21:25 -07002502#define FW_BUF_SIZE 0x8000
2503
2504static int
2505bnx2_gunzip_init(struct bnx2 *bp)
2506{
2507 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2508 goto gunzip_nomem1;
2509
2510 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2511 goto gunzip_nomem2;
2512
2513 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2514 if (bp->strm->workspace == NULL)
2515 goto gunzip_nomem3;
2516
2517 return 0;
2518
2519gunzip_nomem3:
2520 kfree(bp->strm);
2521 bp->strm = NULL;
2522
2523gunzip_nomem2:
2524 vfree(bp->gunzip_buf);
2525 bp->gunzip_buf = NULL;
2526
2527gunzip_nomem1:
2528 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2529 "uncompression.\n", bp->dev->name);
2530 return -ENOMEM;
2531}
2532
2533static void
2534bnx2_gunzip_end(struct bnx2 *bp)
2535{
2536 kfree(bp->strm->workspace);
2537
2538 kfree(bp->strm);
2539 bp->strm = NULL;
2540
2541 if (bp->gunzip_buf) {
2542 vfree(bp->gunzip_buf);
2543 bp->gunzip_buf = NULL;
2544 }
2545}
2546
2547static int
2548bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2549{
2550 int n, rc;
2551
2552 /* check gzip header */
2553 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2554 return -EINVAL;
2555
2556 n = 10;
2557
2558#define FNAME 0x8
2559 if (zbuf[3] & FNAME)
2560 while ((zbuf[n++] != 0) && (n < len));
2561
2562 bp->strm->next_in = zbuf + n;
2563 bp->strm->avail_in = len - n;
2564 bp->strm->next_out = bp->gunzip_buf;
2565 bp->strm->avail_out = FW_BUF_SIZE;
2566
2567 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2568 if (rc != Z_OK)
2569 return rc;
2570
2571 rc = zlib_inflate(bp->strm, Z_FINISH);
2572
2573 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2574 *outbuf = bp->gunzip_buf;
2575
2576 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2577 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2578 bp->dev->name, bp->strm->msg);
2579
2580 zlib_inflateEnd(bp->strm);
2581
2582 if (rc == Z_STREAM_END)
2583 return 0;
2584
2585 return rc;
2586}
2587
Michael Chanb6016b72005-05-26 13:03:09 -07002588static void
2589load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2590 u32 rv2p_proc)
2591{
2592 int i;
2593 u32 val;
2594
2595
2596 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002597 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002598 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002599 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002600 rv2p_code++;
2601
2602 if (rv2p_proc == RV2P_PROC1) {
2603 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2604 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2605 }
2606 else {
2607 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2608 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2609 }
2610 }
2611
2612 /* Reset the processor, un-stall is done later. */
2613 if (rv2p_proc == RV2P_PROC1) {
2614 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2615 }
2616 else {
2617 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2618 }
2619}
2620
Michael Chanaf3ee512006-11-19 14:09:25 -08002621static int
Michael Chanb6016b72005-05-26 13:03:09 -07002622load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2623{
2624 u32 offset;
2625 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002626 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002627
2628 /* Halt the CPU. */
2629 val = REG_RD_IND(bp, cpu_reg->mode);
2630 val |= cpu_reg->mode_value_halt;
2631 REG_WR_IND(bp, cpu_reg->mode, val);
2632 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2633
2634 /* Load the Text area. */
2635 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002636 if (fw->gz_text) {
2637 u32 text_len;
2638 void *text;
2639
2640 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2641 &text_len);
2642 if (rc)
2643 return rc;
2644
2645 fw->text = text;
2646 }
2647 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002648 int j;
2649
2650 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002651 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002652 }
2653 }
2654
2655 /* Load the Data area. */
2656 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2657 if (fw->data) {
2658 int j;
2659
2660 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2661 REG_WR_IND(bp, offset, fw->data[j]);
2662 }
2663 }
2664
2665 /* Load the SBSS area. */
2666 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2667 if (fw->sbss) {
2668 int j;
2669
2670 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2671 REG_WR_IND(bp, offset, fw->sbss[j]);
2672 }
2673 }
2674
2675 /* Load the BSS area. */
2676 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2677 if (fw->bss) {
2678 int j;
2679
2680 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2681 REG_WR_IND(bp, offset, fw->bss[j]);
2682 }
2683 }
2684
2685 /* Load the Read-Only area. */
2686 offset = cpu_reg->spad_base +
2687 (fw->rodata_addr - cpu_reg->mips_view_base);
2688 if (fw->rodata) {
2689 int j;
2690
2691 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2692 REG_WR_IND(bp, offset, fw->rodata[j]);
2693 }
2694 }
2695
2696 /* Clear the pre-fetch instruction. */
2697 REG_WR_IND(bp, cpu_reg->inst, 0);
2698 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2699
2700 /* Start the CPU. */
2701 val = REG_RD_IND(bp, cpu_reg->mode);
2702 val &= ~cpu_reg->mode_value_halt;
2703 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2704 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002705
2706 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002707}
2708
Michael Chanfba9fe92006-06-12 22:21:25 -07002709static int
Michael Chanb6016b72005-05-26 13:03:09 -07002710bnx2_init_cpus(struct bnx2 *bp)
2711{
2712 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002713 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002714 int rc = 0;
2715 void *text;
2716 u32 text_len;
2717
2718 if ((rc = bnx2_gunzip_init(bp)) != 0)
2719 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002720
2721 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002722 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2723 &text_len);
2724 if (rc)
2725 goto init_cpu_err;
2726
2727 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2728
2729 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2730 &text_len);
2731 if (rc)
2732 goto init_cpu_err;
2733
2734 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002735
2736 /* Initialize the RX Processor. */
2737 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2738 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2739 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2740 cpu_reg.state = BNX2_RXP_CPU_STATE;
2741 cpu_reg.state_value_clear = 0xffffff;
2742 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2743 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2744 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2745 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2746 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2747 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2748 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002749
Michael Chand43584c2006-11-19 14:14:35 -08002750 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2751 fw = &bnx2_rxp_fw_09;
2752 else
2753 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002754
Michael Chanaf3ee512006-11-19 14:09:25 -08002755 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002756 if (rc)
2757 goto init_cpu_err;
2758
Michael Chanb6016b72005-05-26 13:03:09 -07002759 /* Initialize the TX Processor. */
2760 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2761 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2762 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2763 cpu_reg.state = BNX2_TXP_CPU_STATE;
2764 cpu_reg.state_value_clear = 0xffffff;
2765 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2766 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2767 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2768 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2769 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2770 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2771 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002772
Michael Chand43584c2006-11-19 14:14:35 -08002773 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2774 fw = &bnx2_txp_fw_09;
2775 else
2776 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002777
Michael Chanaf3ee512006-11-19 14:09:25 -08002778 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002779 if (rc)
2780 goto init_cpu_err;
2781
Michael Chanb6016b72005-05-26 13:03:09 -07002782 /* Initialize the TX Patch-up Processor. */
2783 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2784 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2785 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2786 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2787 cpu_reg.state_value_clear = 0xffffff;
2788 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2789 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2790 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2791 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2792 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2793 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2794 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002795
Michael Chand43584c2006-11-19 14:14:35 -08002796 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2797 fw = &bnx2_tpat_fw_09;
2798 else
2799 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002800
Michael Chanaf3ee512006-11-19 14:09:25 -08002801 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002802 if (rc)
2803 goto init_cpu_err;
2804
Michael Chanb6016b72005-05-26 13:03:09 -07002805 /* Initialize the Completion Processor. */
2806 cpu_reg.mode = BNX2_COM_CPU_MODE;
2807 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2808 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2809 cpu_reg.state = BNX2_COM_CPU_STATE;
2810 cpu_reg.state_value_clear = 0xffffff;
2811 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2812 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2813 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2814 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2815 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2816 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2817 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002818
Michael Chand43584c2006-11-19 14:14:35 -08002819 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2820 fw = &bnx2_com_fw_09;
2821 else
2822 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002823
Michael Chanaf3ee512006-11-19 14:09:25 -08002824 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002825 if (rc)
2826 goto init_cpu_err;
2827
Michael Chand43584c2006-11-19 14:14:35 -08002828 /* Initialize the Command Processor. */
2829 cpu_reg.mode = BNX2_CP_CPU_MODE;
2830 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2831 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2832 cpu_reg.state = BNX2_CP_CPU_STATE;
2833 cpu_reg.state_value_clear = 0xffffff;
2834 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2835 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2836 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2837 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2838 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2839 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2840 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002841
Michael Chand43584c2006-11-19 14:14:35 -08002842 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2843 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002844
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002845 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002846 if (rc)
2847 goto init_cpu_err;
2848 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002849init_cpu_err:
2850 bnx2_gunzip_end(bp);
2851 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002852}
2853
2854static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002855bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002856{
2857 u16 pmcsr;
2858
2859 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2860
2861 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002862 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002863 u32 val;
2864
2865 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2866 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2867 PCI_PM_CTRL_PME_STATUS);
2868
2869 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2870 /* delay required during transition out of D3hot */
2871 msleep(20);
2872
2873 val = REG_RD(bp, BNX2_EMAC_MODE);
2874 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2875 val &= ~BNX2_EMAC_MODE_MPKT;
2876 REG_WR(bp, BNX2_EMAC_MODE, val);
2877
2878 val = REG_RD(bp, BNX2_RPM_CONFIG);
2879 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2880 REG_WR(bp, BNX2_RPM_CONFIG, val);
2881 break;
2882 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002883 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002884 int i;
2885 u32 val, wol_msg;
2886
2887 if (bp->wol) {
2888 u32 advertising;
2889 u8 autoneg;
2890
2891 autoneg = bp->autoneg;
2892 advertising = bp->advertising;
2893
2894 bp->autoneg = AUTONEG_SPEED;
2895 bp->advertising = ADVERTISED_10baseT_Half |
2896 ADVERTISED_10baseT_Full |
2897 ADVERTISED_100baseT_Half |
2898 ADVERTISED_100baseT_Full |
2899 ADVERTISED_Autoneg;
2900
2901 bnx2_setup_copper_phy(bp);
2902
2903 bp->autoneg = autoneg;
2904 bp->advertising = advertising;
2905
2906 bnx2_set_mac_addr(bp);
2907
2908 val = REG_RD(bp, BNX2_EMAC_MODE);
2909
2910 /* Enable port mode. */
2911 val &= ~BNX2_EMAC_MODE_PORT;
2912 val |= BNX2_EMAC_MODE_PORT_MII |
2913 BNX2_EMAC_MODE_MPKT_RCVD |
2914 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002915 BNX2_EMAC_MODE_MPKT;
2916
2917 REG_WR(bp, BNX2_EMAC_MODE, val);
2918
2919 /* receive all multicast */
2920 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2921 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2922 0xffffffff);
2923 }
2924 REG_WR(bp, BNX2_EMAC_RX_MODE,
2925 BNX2_EMAC_RX_MODE_SORT_MODE);
2926
2927 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2928 BNX2_RPM_SORT_USER0_MC_EN;
2929 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2930 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2931 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2932 BNX2_RPM_SORT_USER0_ENA);
2933
2934 /* Need to enable EMAC and RPM for WOL. */
2935 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2936 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2937 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2938 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2939
2940 val = REG_RD(bp, BNX2_RPM_CONFIG);
2941 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2942 REG_WR(bp, BNX2_RPM_CONFIG, val);
2943
2944 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2945 }
2946 else {
2947 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2948 }
2949
Michael Chandda1e392006-01-23 16:08:14 -08002950 if (!(bp->flags & NO_WOL_FLAG))
2951 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002952
2953 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2954 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2955 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2956
2957 if (bp->wol)
2958 pmcsr |= 3;
2959 }
2960 else {
2961 pmcsr |= 3;
2962 }
2963 if (bp->wol) {
2964 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2965 }
2966 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2967 pmcsr);
2968
2969 /* No more memory access after this point until
2970 * device is brought back to D0.
2971 */
2972 udelay(50);
2973 break;
2974 }
2975 default:
2976 return -EINVAL;
2977 }
2978 return 0;
2979}
2980
2981static int
2982bnx2_acquire_nvram_lock(struct bnx2 *bp)
2983{
2984 u32 val;
2985 int j;
2986
2987 /* Request access to the flash interface. */
2988 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2989 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2990 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2991 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2992 break;
2993
2994 udelay(5);
2995 }
2996
2997 if (j >= NVRAM_TIMEOUT_COUNT)
2998 return -EBUSY;
2999
3000 return 0;
3001}
3002
3003static int
3004bnx2_release_nvram_lock(struct bnx2 *bp)
3005{
3006 int j;
3007 u32 val;
3008
3009 /* Relinquish nvram interface. */
3010 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3011
3012 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3013 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3014 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3015 break;
3016
3017 udelay(5);
3018 }
3019
3020 if (j >= NVRAM_TIMEOUT_COUNT)
3021 return -EBUSY;
3022
3023 return 0;
3024}
3025
3026
3027static int
3028bnx2_enable_nvram_write(struct bnx2 *bp)
3029{
3030 u32 val;
3031
3032 val = REG_RD(bp, BNX2_MISC_CFG);
3033 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3034
3035 if (!bp->flash_info->buffered) {
3036 int j;
3037
3038 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3039 REG_WR(bp, BNX2_NVM_COMMAND,
3040 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3041
3042 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3043 udelay(5);
3044
3045 val = REG_RD(bp, BNX2_NVM_COMMAND);
3046 if (val & BNX2_NVM_COMMAND_DONE)
3047 break;
3048 }
3049
3050 if (j >= NVRAM_TIMEOUT_COUNT)
3051 return -EBUSY;
3052 }
3053 return 0;
3054}
3055
3056static void
3057bnx2_disable_nvram_write(struct bnx2 *bp)
3058{
3059 u32 val;
3060
3061 val = REG_RD(bp, BNX2_MISC_CFG);
3062 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3063}
3064
3065
3066static void
3067bnx2_enable_nvram_access(struct bnx2 *bp)
3068{
3069 u32 val;
3070
3071 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3072 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003073 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003074 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3075}
3076
3077static void
3078bnx2_disable_nvram_access(struct bnx2 *bp)
3079{
3080 u32 val;
3081
3082 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3083 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003084 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003085 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3086 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3087}
3088
3089static int
3090bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3091{
3092 u32 cmd;
3093 int j;
3094
3095 if (bp->flash_info->buffered)
3096 /* Buffered flash, no erase needed */
3097 return 0;
3098
3099 /* Build an erase command */
3100 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3101 BNX2_NVM_COMMAND_DOIT;
3102
3103 /* Need to clear DONE bit separately. */
3104 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3105
3106 /* Address of the NVRAM to read from. */
3107 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3108
3109 /* Issue an erase command. */
3110 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3111
3112 /* Wait for completion. */
3113 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3114 u32 val;
3115
3116 udelay(5);
3117
3118 val = REG_RD(bp, BNX2_NVM_COMMAND);
3119 if (val & BNX2_NVM_COMMAND_DONE)
3120 break;
3121 }
3122
3123 if (j >= NVRAM_TIMEOUT_COUNT)
3124 return -EBUSY;
3125
3126 return 0;
3127}
3128
3129static int
3130bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3131{
3132 u32 cmd;
3133 int j;
3134
3135 /* Build the command word. */
3136 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3137
3138 /* Calculate an offset of a buffered flash. */
3139 if (bp->flash_info->buffered) {
3140 offset = ((offset / bp->flash_info->page_size) <<
3141 bp->flash_info->page_bits) +
3142 (offset % bp->flash_info->page_size);
3143 }
3144
3145 /* Need to clear DONE bit separately. */
3146 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3147
3148 /* Address of the NVRAM to read from. */
3149 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3150
3151 /* Issue a read command. */
3152 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3153
3154 /* Wait for completion. */
3155 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3156 u32 val;
3157
3158 udelay(5);
3159
3160 val = REG_RD(bp, BNX2_NVM_COMMAND);
3161 if (val & BNX2_NVM_COMMAND_DONE) {
3162 val = REG_RD(bp, BNX2_NVM_READ);
3163
3164 val = be32_to_cpu(val);
3165 memcpy(ret_val, &val, 4);
3166 break;
3167 }
3168 }
3169 if (j >= NVRAM_TIMEOUT_COUNT)
3170 return -EBUSY;
3171
3172 return 0;
3173}
3174
3175
3176static int
3177bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3178{
3179 u32 cmd, val32;
3180 int j;
3181
3182 /* Build the command word. */
3183 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3184
3185 /* Calculate an offset of a buffered flash. */
3186 if (bp->flash_info->buffered) {
3187 offset = ((offset / bp->flash_info->page_size) <<
3188 bp->flash_info->page_bits) +
3189 (offset % bp->flash_info->page_size);
3190 }
3191
3192 /* Need to clear DONE bit separately. */
3193 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3194
3195 memcpy(&val32, val, 4);
3196 val32 = cpu_to_be32(val32);
3197
3198 /* Write the data. */
3199 REG_WR(bp, BNX2_NVM_WRITE, val32);
3200
3201 /* Address of the NVRAM to write to. */
3202 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3203
3204 /* Issue the write command. */
3205 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3206
3207 /* Wait for completion. */
3208 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3209 udelay(5);
3210
3211 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3212 break;
3213 }
3214 if (j >= NVRAM_TIMEOUT_COUNT)
3215 return -EBUSY;
3216
3217 return 0;
3218}
3219
3220static int
3221bnx2_init_nvram(struct bnx2 *bp)
3222{
3223 u32 val;
3224 int j, entry_count, rc;
3225 struct flash_spec *flash;
3226
3227 /* Determine the selected interface. */
3228 val = REG_RD(bp, BNX2_NVM_CFG1);
3229
3230 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3231
3232 rc = 0;
3233 if (val & 0x40000000) {
3234
3235 /* Flash interface has been reconfigured */
3236 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003237 j++, flash++) {
3238 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3239 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003240 bp->flash_info = flash;
3241 break;
3242 }
3243 }
3244 }
3245 else {
Michael Chan37137702005-11-04 08:49:17 -08003246 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003247 /* Not yet been reconfigured */
3248
Michael Chan37137702005-11-04 08:49:17 -08003249 if (val & (1 << 23))
3250 mask = FLASH_BACKUP_STRAP_MASK;
3251 else
3252 mask = FLASH_STRAP_MASK;
3253
Michael Chanb6016b72005-05-26 13:03:09 -07003254 for (j = 0, flash = &flash_table[0]; j < entry_count;
3255 j++, flash++) {
3256
Michael Chan37137702005-11-04 08:49:17 -08003257 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003258 bp->flash_info = flash;
3259
3260 /* Request access to the flash interface. */
3261 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3262 return rc;
3263
3264 /* Enable access to flash interface */
3265 bnx2_enable_nvram_access(bp);
3266
3267 /* Reconfigure the flash interface */
3268 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3269 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3270 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3271 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3272
3273 /* Disable access to flash interface */
3274 bnx2_disable_nvram_access(bp);
3275 bnx2_release_nvram_lock(bp);
3276
3277 break;
3278 }
3279 }
3280 } /* if (val & 0x40000000) */
3281
3282 if (j == entry_count) {
3283 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003284 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003285 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003286 }
3287
Michael Chan1122db72006-01-23 16:11:42 -08003288 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3289 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3290 if (val)
3291 bp->flash_size = val;
3292 else
3293 bp->flash_size = bp->flash_info->total_size;
3294
Michael Chanb6016b72005-05-26 13:03:09 -07003295 return rc;
3296}
3297
3298static int
3299bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3300 int buf_size)
3301{
3302 int rc = 0;
3303 u32 cmd_flags, offset32, len32, extra;
3304
3305 if (buf_size == 0)
3306 return 0;
3307
3308 /* Request access to the flash interface. */
3309 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3310 return rc;
3311
3312 /* Enable access to flash interface */
3313 bnx2_enable_nvram_access(bp);
3314
3315 len32 = buf_size;
3316 offset32 = offset;
3317 extra = 0;
3318
3319 cmd_flags = 0;
3320
3321 if (offset32 & 3) {
3322 u8 buf[4];
3323 u32 pre_len;
3324
3325 offset32 &= ~3;
3326 pre_len = 4 - (offset & 3);
3327
3328 if (pre_len >= len32) {
3329 pre_len = len32;
3330 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3331 BNX2_NVM_COMMAND_LAST;
3332 }
3333 else {
3334 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3335 }
3336
3337 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3338
3339 if (rc)
3340 return rc;
3341
3342 memcpy(ret_buf, buf + (offset & 3), pre_len);
3343
3344 offset32 += 4;
3345 ret_buf += pre_len;
3346 len32 -= pre_len;
3347 }
3348 if (len32 & 3) {
3349 extra = 4 - (len32 & 3);
3350 len32 = (len32 + 4) & ~3;
3351 }
3352
3353 if (len32 == 4) {
3354 u8 buf[4];
3355
3356 if (cmd_flags)
3357 cmd_flags = BNX2_NVM_COMMAND_LAST;
3358 else
3359 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3360 BNX2_NVM_COMMAND_LAST;
3361
3362 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3363
3364 memcpy(ret_buf, buf, 4 - extra);
3365 }
3366 else if (len32 > 0) {
3367 u8 buf[4];
3368
3369 /* Read the first word. */
3370 if (cmd_flags)
3371 cmd_flags = 0;
3372 else
3373 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3374
3375 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3376
3377 /* Advance to the next dword. */
3378 offset32 += 4;
3379 ret_buf += 4;
3380 len32 -= 4;
3381
3382 while (len32 > 4 && rc == 0) {
3383 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3384
3385 /* Advance to the next dword. */
3386 offset32 += 4;
3387 ret_buf += 4;
3388 len32 -= 4;
3389 }
3390
3391 if (rc)
3392 return rc;
3393
3394 cmd_flags = BNX2_NVM_COMMAND_LAST;
3395 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3396
3397 memcpy(ret_buf, buf, 4 - extra);
3398 }
3399
3400 /* Disable access to flash interface */
3401 bnx2_disable_nvram_access(bp);
3402
3403 bnx2_release_nvram_lock(bp);
3404
3405 return rc;
3406}
3407
3408static int
3409bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3410 int buf_size)
3411{
3412 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003413 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003414 int rc = 0;
3415 int align_start, align_end;
3416
3417 buf = data_buf;
3418 offset32 = offset;
3419 len32 = buf_size;
3420 align_start = align_end = 0;
3421
3422 if ((align_start = (offset32 & 3))) {
3423 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003424 len32 += align_start;
3425 if (len32 < 4)
3426 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003427 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3428 return rc;
3429 }
3430
3431 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003432 align_end = 4 - (len32 & 3);
3433 len32 += align_end;
3434 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3435 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003436 }
3437
3438 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003439 align_buf = kmalloc(len32, GFP_KERNEL);
3440 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003441 return -ENOMEM;
3442 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003443 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003444 }
3445 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003446 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003447 }
Michael Chane6be7632007-01-08 19:56:13 -08003448 memcpy(align_buf + align_start, data_buf, buf_size);
3449 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003450 }
3451
Michael Chanae181bc2006-05-22 16:39:20 -07003452 if (bp->flash_info->buffered == 0) {
3453 flash_buffer = kmalloc(264, GFP_KERNEL);
3454 if (flash_buffer == NULL) {
3455 rc = -ENOMEM;
3456 goto nvram_write_end;
3457 }
3458 }
3459
Michael Chanb6016b72005-05-26 13:03:09 -07003460 written = 0;
3461 while ((written < len32) && (rc == 0)) {
3462 u32 page_start, page_end, data_start, data_end;
3463 u32 addr, cmd_flags;
3464 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003465
3466 /* Find the page_start addr */
3467 page_start = offset32 + written;
3468 page_start -= (page_start % bp->flash_info->page_size);
3469 /* Find the page_end addr */
3470 page_end = page_start + bp->flash_info->page_size;
3471 /* Find the data_start addr */
3472 data_start = (written == 0) ? offset32 : page_start;
3473 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003474 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003475 (offset32 + len32) : page_end;
3476
3477 /* Request access to the flash interface. */
3478 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3479 goto nvram_write_end;
3480
3481 /* Enable access to flash interface */
3482 bnx2_enable_nvram_access(bp);
3483
3484 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3485 if (bp->flash_info->buffered == 0) {
3486 int j;
3487
3488 /* Read the whole page into the buffer
3489 * (non-buffer flash only) */
3490 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3491 if (j == (bp->flash_info->page_size - 4)) {
3492 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3493 }
3494 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003495 page_start + j,
3496 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003497 cmd_flags);
3498
3499 if (rc)
3500 goto nvram_write_end;
3501
3502 cmd_flags = 0;
3503 }
3504 }
3505
3506 /* Enable writes to flash interface (unlock write-protect) */
3507 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3508 goto nvram_write_end;
3509
Michael Chanb6016b72005-05-26 13:03:09 -07003510 /* Loop to write back the buffer data from page_start to
3511 * data_start */
3512 i = 0;
3513 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003514 /* Erase the page */
3515 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3516 goto nvram_write_end;
3517
3518 /* Re-enable the write again for the actual write */
3519 bnx2_enable_nvram_write(bp);
3520
Michael Chanb6016b72005-05-26 13:03:09 -07003521 for (addr = page_start; addr < data_start;
3522 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003523
Michael Chanb6016b72005-05-26 13:03:09 -07003524 rc = bnx2_nvram_write_dword(bp, addr,
3525 &flash_buffer[i], cmd_flags);
3526
3527 if (rc != 0)
3528 goto nvram_write_end;
3529
3530 cmd_flags = 0;
3531 }
3532 }
3533
3534 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003535 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003536 if ((addr == page_end - 4) ||
3537 ((bp->flash_info->buffered) &&
3538 (addr == data_end - 4))) {
3539
3540 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3541 }
3542 rc = bnx2_nvram_write_dword(bp, addr, buf,
3543 cmd_flags);
3544
3545 if (rc != 0)
3546 goto nvram_write_end;
3547
3548 cmd_flags = 0;
3549 buf += 4;
3550 }
3551
3552 /* Loop to write back the buffer data from data_end
3553 * to page_end */
3554 if (bp->flash_info->buffered == 0) {
3555 for (addr = data_end; addr < page_end;
3556 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003557
Michael Chanb6016b72005-05-26 13:03:09 -07003558 if (addr == page_end-4) {
3559 cmd_flags = BNX2_NVM_COMMAND_LAST;
3560 }
3561 rc = bnx2_nvram_write_dword(bp, addr,
3562 &flash_buffer[i], cmd_flags);
3563
3564 if (rc != 0)
3565 goto nvram_write_end;
3566
3567 cmd_flags = 0;
3568 }
3569 }
3570
3571 /* Disable writes to flash interface (lock write-protect) */
3572 bnx2_disable_nvram_write(bp);
3573
3574 /* Disable access to flash interface */
3575 bnx2_disable_nvram_access(bp);
3576 bnx2_release_nvram_lock(bp);
3577
3578 /* Increment written */
3579 written += data_end - data_start;
3580 }
3581
3582nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003583 kfree(flash_buffer);
3584 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003585 return rc;
3586}
3587
3588static int
3589bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3590{
3591 u32 val;
3592 int i, rc = 0;
3593
3594 /* Wait for the current PCI transaction to complete before
3595 * issuing a reset. */
3596 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3597 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3598 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3599 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3600 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3601 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3602 udelay(5);
3603
Michael Chanb090ae22006-01-23 16:07:10 -08003604 /* Wait for the firmware to tell us it is ok to issue a reset. */
3605 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3606
Michael Chanb6016b72005-05-26 13:03:09 -07003607 /* Deposit a driver reset signature so the firmware knows that
3608 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003609 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003610 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3611
Michael Chanb6016b72005-05-26 13:03:09 -07003612 /* Do a dummy read to force the chip to complete all current transaction
3613 * before we issue a reset. */
3614 val = REG_RD(bp, BNX2_MISC_ID);
3615
Michael Chan234754d2006-11-19 14:11:41 -08003616 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3617 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3618 REG_RD(bp, BNX2_MISC_COMMAND);
3619 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003620
Michael Chan234754d2006-11-19 14:11:41 -08003621 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3622 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003623
Michael Chan234754d2006-11-19 14:11:41 -08003624 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003625
Michael Chan234754d2006-11-19 14:11:41 -08003626 } else {
3627 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3628 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3629 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3630
3631 /* Chip reset. */
3632 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3633
3634 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3635 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3636 current->state = TASK_UNINTERRUPTIBLE;
3637 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003638 }
Michael Chanb6016b72005-05-26 13:03:09 -07003639
Michael Chan234754d2006-11-19 14:11:41 -08003640 /* Reset takes approximate 30 usec */
3641 for (i = 0; i < 10; i++) {
3642 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3643 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3644 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3645 break;
3646 udelay(10);
3647 }
3648
3649 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3650 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3651 printk(KERN_ERR PFX "Chip reset did not complete\n");
3652 return -EBUSY;
3653 }
Michael Chanb6016b72005-05-26 13:03:09 -07003654 }
3655
3656 /* Make sure byte swapping is properly configured. */
3657 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3658 if (val != 0x01020304) {
3659 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3660 return -ENODEV;
3661 }
3662
Michael Chanb6016b72005-05-26 13:03:09 -07003663 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003664 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3665 if (rc)
3666 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003667
3668 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3669 /* Adjust the voltage regular to two steps lower. The default
3670 * of this register is 0x0000000e. */
3671 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3672
3673 /* Remove bad rbuf memory from the free pool. */
3674 rc = bnx2_alloc_bad_rbuf(bp);
3675 }
3676
3677 return rc;
3678}
3679
3680static int
3681bnx2_init_chip(struct bnx2 *bp)
3682{
3683 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003684 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003685
3686 /* Make sure the interrupt is not active. */
3687 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3688
3689 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3690 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3691#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003692 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003693#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003694 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003695 DMA_READ_CHANS << 12 |
3696 DMA_WRITE_CHANS << 16;
3697
3698 val |= (0x2 << 20) | (1 << 11);
3699
Michael Chandda1e392006-01-23 16:08:14 -08003700 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003701 val |= (1 << 23);
3702
3703 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3704 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3705 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3706
3707 REG_WR(bp, BNX2_DMA_CONFIG, val);
3708
3709 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3710 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3711 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3712 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3713 }
3714
3715 if (bp->flags & PCIX_FLAG) {
3716 u16 val16;
3717
3718 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3719 &val16);
3720 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3721 val16 & ~PCI_X_CMD_ERO);
3722 }
3723
3724 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3725 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3726 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3727 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3728
3729 /* Initialize context mapping and zero out the quick contexts. The
3730 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07003731 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3732 rc = bnx2_init_5709_context(bp);
3733 if (rc)
3734 return rc;
3735 } else
Michael Chan59b47d82006-11-19 14:10:45 -08003736 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003737
Michael Chanfba9fe92006-06-12 22:21:25 -07003738 if ((rc = bnx2_init_cpus(bp)) != 0)
3739 return rc;
3740
Michael Chanb6016b72005-05-26 13:03:09 -07003741 bnx2_init_nvram(bp);
3742
3743 bnx2_set_mac_addr(bp);
3744
3745 val = REG_RD(bp, BNX2_MQ_CONFIG);
3746 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3747 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003748 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3749 val |= BNX2_MQ_CONFIG_HALT_DIS;
3750
Michael Chanb6016b72005-05-26 13:03:09 -07003751 REG_WR(bp, BNX2_MQ_CONFIG, val);
3752
3753 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3754 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3755 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3756
3757 val = (BCM_PAGE_BITS - 8) << 24;
3758 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3759
3760 /* Configure page size. */
3761 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3762 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3763 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3764 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3765
3766 val = bp->mac_addr[0] +
3767 (bp->mac_addr[1] << 8) +
3768 (bp->mac_addr[2] << 16) +
3769 bp->mac_addr[3] +
3770 (bp->mac_addr[4] << 8) +
3771 (bp->mac_addr[5] << 16);
3772 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3773
3774 /* Program the MTU. Also include 4 bytes for CRC32. */
3775 val = bp->dev->mtu + ETH_HLEN + 4;
3776 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3777 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3778 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3779
3780 bp->last_status_idx = 0;
3781 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3782
3783 /* Set up how to generate a link change interrupt. */
3784 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3785
3786 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3787 (u64) bp->status_blk_mapping & 0xffffffff);
3788 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3789
3790 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3791 (u64) bp->stats_blk_mapping & 0xffffffff);
3792 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3793 (u64) bp->stats_blk_mapping >> 32);
3794
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003795 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003796 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3797
3798 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3799 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3800
3801 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3802 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3803
3804 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3805
3806 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3807
3808 REG_WR(bp, BNX2_HC_COM_TICKS,
3809 (bp->com_ticks_int << 16) | bp->com_ticks);
3810
3811 REG_WR(bp, BNX2_HC_CMD_TICKS,
3812 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3813
Michael Chan02537b062007-06-04 21:24:07 -07003814 if (CHIP_NUM(bp) == CHIP_NUM_5708)
3815 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
3816 else
3817 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
Michael Chanb6016b72005-05-26 13:03:09 -07003818 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3819
3820 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07003821 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003822 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07003823 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3824 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003825 }
3826
Michael Chan8e6a72c2007-05-03 13:24:48 -07003827 if (bp->flags & ONE_SHOT_MSI_FLAG)
3828 val |= BNX2_HC_CONFIG_ONE_SHOT;
3829
3830 REG_WR(bp, BNX2_HC_CONFIG, val);
3831
Michael Chanb6016b72005-05-26 13:03:09 -07003832 /* Clear internal stats counters. */
3833 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3834
Michael Chanda3e4fb2007-05-03 13:24:23 -07003835 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07003836
Michael Chane29054f2006-01-23 16:06:06 -08003837 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3838 BNX2_PORT_FEATURE_ASF_ENABLED)
3839 bp->flags |= ASF_ENABLE_FLAG;
3840
Michael Chanb6016b72005-05-26 13:03:09 -07003841 /* Initialize the receive filter. */
3842 bnx2_set_rx_mode(bp->dev);
3843
Michael Chan0aa38df2007-06-04 21:23:06 -07003844 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3845 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
3846 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
3847 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
3848 }
Michael Chanb090ae22006-01-23 16:07:10 -08003849 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3850 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003851
3852 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3853 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3854
3855 udelay(20);
3856
Michael Chanbf5295b2006-03-23 01:11:56 -08003857 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3858
Michael Chanb090ae22006-01-23 16:07:10 -08003859 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003860}
3861
Michael Chan59b47d82006-11-19 14:10:45 -08003862static void
3863bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3864{
3865 u32 val, offset0, offset1, offset2, offset3;
3866
3867 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3868 offset0 = BNX2_L2CTX_TYPE_XI;
3869 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3870 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3871 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3872 } else {
3873 offset0 = BNX2_L2CTX_TYPE;
3874 offset1 = BNX2_L2CTX_CMD_TYPE;
3875 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3876 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3877 }
3878 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3879 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3880
3881 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3882 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3883
3884 val = (u64) bp->tx_desc_mapping >> 32;
3885 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3886
3887 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3888 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3889}
Michael Chanb6016b72005-05-26 13:03:09 -07003890
3891static void
3892bnx2_init_tx_ring(struct bnx2 *bp)
3893{
3894 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003895 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003896
Michael Chan2f8af122006-08-15 01:39:10 -07003897 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3898
Michael Chanb6016b72005-05-26 13:03:09 -07003899 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003900
Michael Chanb6016b72005-05-26 13:03:09 -07003901 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3902 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3903
3904 bp->tx_prod = 0;
3905 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003906 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003907 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003908
Michael Chan59b47d82006-11-19 14:10:45 -08003909 cid = TX_CID;
3910 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3911 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003912
Michael Chan59b47d82006-11-19 14:10:45 -08003913 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003914}
3915
3916static void
3917bnx2_init_rx_ring(struct bnx2 *bp)
3918{
3919 struct rx_bd *rxbd;
3920 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003921 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003922 u32 val;
3923
3924 /* 8 for CRC and VLAN */
3925 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003926 /* hw alignment */
3927 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003928
3929 ring_prod = prod = bp->rx_prod = 0;
3930 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003931 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003932 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003933
Michael Chan13daffa2006-03-20 17:49:20 -08003934 for (i = 0; i < bp->rx_max_ring; i++) {
3935 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003936
Michael Chan13daffa2006-03-20 17:49:20 -08003937 rxbd = &bp->rx_desc_ring[i][0];
3938 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3939 rxbd->rx_bd_len = bp->rx_buf_use_size;
3940 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3941 }
3942 if (i == (bp->rx_max_ring - 1))
3943 j = 0;
3944 else
3945 j = i + 1;
3946 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3947 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3948 0xffffffff;
3949 }
Michael Chanb6016b72005-05-26 13:03:09 -07003950
3951 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3952 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3953 val |= 0x02 << 8;
3954 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3955
Michael Chan13daffa2006-03-20 17:49:20 -08003956 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003957 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3958
Michael Chan13daffa2006-03-20 17:49:20 -08003959 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003960 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3961
Michael Chan236b6392006-03-20 17:49:02 -08003962 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003963 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3964 break;
3965 }
3966 prod = NEXT_RX_BD(prod);
3967 ring_prod = RX_RING_IDX(prod);
3968 }
3969 bp->rx_prod = prod;
3970
3971 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3972
3973 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3974}
3975
3976static void
Michael Chan13daffa2006-03-20 17:49:20 -08003977bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3978{
3979 u32 num_rings, max;
3980
3981 bp->rx_ring_size = size;
3982 num_rings = 1;
3983 while (size > MAX_RX_DESC_CNT) {
3984 size -= MAX_RX_DESC_CNT;
3985 num_rings++;
3986 }
3987 /* round to next power of 2 */
3988 max = MAX_RX_RINGS;
3989 while ((max & num_rings) == 0)
3990 max >>= 1;
3991
3992 if (num_rings != max)
3993 max <<= 1;
3994
3995 bp->rx_max_ring = max;
3996 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3997}
3998
3999static void
Michael Chanb6016b72005-05-26 13:03:09 -07004000bnx2_free_tx_skbs(struct bnx2 *bp)
4001{
4002 int i;
4003
4004 if (bp->tx_buf_ring == NULL)
4005 return;
4006
4007 for (i = 0; i < TX_DESC_CNT; ) {
4008 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4009 struct sk_buff *skb = tx_buf->skb;
4010 int j, last;
4011
4012 if (skb == NULL) {
4013 i++;
4014 continue;
4015 }
4016
4017 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4018 skb_headlen(skb), PCI_DMA_TODEVICE);
4019
4020 tx_buf->skb = NULL;
4021
4022 last = skb_shinfo(skb)->nr_frags;
4023 for (j = 0; j < last; j++) {
4024 tx_buf = &bp->tx_buf_ring[i + j + 1];
4025 pci_unmap_page(bp->pdev,
4026 pci_unmap_addr(tx_buf, mapping),
4027 skb_shinfo(skb)->frags[j].size,
4028 PCI_DMA_TODEVICE);
4029 }
Michael Chan745720e2006-06-29 12:37:41 -07004030 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004031 i += j + 1;
4032 }
4033
4034}
4035
4036static void
4037bnx2_free_rx_skbs(struct bnx2 *bp)
4038{
4039 int i;
4040
4041 if (bp->rx_buf_ring == NULL)
4042 return;
4043
Michael Chan13daffa2006-03-20 17:49:20 -08004044 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004045 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4046 struct sk_buff *skb = rx_buf->skb;
4047
Michael Chan05d0f1c2005-11-04 08:53:48 -08004048 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004049 continue;
4050
4051 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4052 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4053
4054 rx_buf->skb = NULL;
4055
Michael Chan745720e2006-06-29 12:37:41 -07004056 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004057 }
4058}
4059
4060static void
4061bnx2_free_skbs(struct bnx2 *bp)
4062{
4063 bnx2_free_tx_skbs(bp);
4064 bnx2_free_rx_skbs(bp);
4065}
4066
4067static int
4068bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4069{
4070 int rc;
4071
4072 rc = bnx2_reset_chip(bp, reset_code);
4073 bnx2_free_skbs(bp);
4074 if (rc)
4075 return rc;
4076
Michael Chanfba9fe92006-06-12 22:21:25 -07004077 if ((rc = bnx2_init_chip(bp)) != 0)
4078 return rc;
4079
Michael Chanb6016b72005-05-26 13:03:09 -07004080 bnx2_init_tx_ring(bp);
4081 bnx2_init_rx_ring(bp);
4082 return 0;
4083}
4084
4085static int
4086bnx2_init_nic(struct bnx2 *bp)
4087{
4088 int rc;
4089
4090 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4091 return rc;
4092
Michael Chan80be4432006-11-19 14:07:28 -08004093 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004094 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08004095 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004096 bnx2_set_link(bp);
4097 return 0;
4098}
4099
4100static int
4101bnx2_test_registers(struct bnx2 *bp)
4102{
4103 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004104 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004105 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004106 u16 offset;
4107 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004108#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004109 u32 rw_mask;
4110 u32 ro_mask;
4111 } reg_tbl[] = {
4112 { 0x006c, 0, 0x00000000, 0x0000003f },
4113 { 0x0090, 0, 0xffffffff, 0x00000000 },
4114 { 0x0094, 0, 0x00000000, 0x00000000 },
4115
Michael Chan5bae30c2007-05-03 13:18:46 -07004116 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4117 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4118 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4119 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4120 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4121 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4122 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4123 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4124 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004125
Michael Chan5bae30c2007-05-03 13:18:46 -07004126 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4127 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4128 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4129 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4130 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4131 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004132
Michael Chan5bae30c2007-05-03 13:18:46 -07004133 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4134 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4135 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004136
4137 { 0x1000, 0, 0x00000000, 0x00000001 },
4138 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004139
4140 { 0x1408, 0, 0x01c00800, 0x00000000 },
4141 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4142 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004143 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004144 { 0x14b0, 0, 0x00000002, 0x00000001 },
4145 { 0x14b8, 0, 0x00000000, 0x00000000 },
4146 { 0x14c0, 0, 0x00000000, 0x00000009 },
4147 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4148 { 0x14cc, 0, 0x00000000, 0x00000001 },
4149 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004150
4151 { 0x1800, 0, 0x00000000, 0x00000001 },
4152 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004153
4154 { 0x2800, 0, 0x00000000, 0x00000001 },
4155 { 0x2804, 0, 0x00000000, 0x00003f01 },
4156 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4157 { 0x2810, 0, 0xffff0000, 0x00000000 },
4158 { 0x2814, 0, 0xffff0000, 0x00000000 },
4159 { 0x2818, 0, 0xffff0000, 0x00000000 },
4160 { 0x281c, 0, 0xffff0000, 0x00000000 },
4161 { 0x2834, 0, 0xffffffff, 0x00000000 },
4162 { 0x2840, 0, 0x00000000, 0xffffffff },
4163 { 0x2844, 0, 0x00000000, 0xffffffff },
4164 { 0x2848, 0, 0xffffffff, 0x00000000 },
4165 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4166
4167 { 0x2c00, 0, 0x00000000, 0x00000011 },
4168 { 0x2c04, 0, 0x00000000, 0x00030007 },
4169
Michael Chanb6016b72005-05-26 13:03:09 -07004170 { 0x3c00, 0, 0x00000000, 0x00000001 },
4171 { 0x3c04, 0, 0x00000000, 0x00070000 },
4172 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4173 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4174 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4175 { 0x3c14, 0, 0x00000000, 0xffffffff },
4176 { 0x3c18, 0, 0x00000000, 0xffffffff },
4177 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4178 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004179
4180 { 0x5004, 0, 0x00000000, 0x0000007f },
4181 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004182
Michael Chanb6016b72005-05-26 13:03:09 -07004183 { 0x5c00, 0, 0x00000000, 0x00000001 },
4184 { 0x5c04, 0, 0x00000000, 0x0003000f },
4185 { 0x5c08, 0, 0x00000003, 0x00000000 },
4186 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4187 { 0x5c10, 0, 0x00000000, 0xffffffff },
4188 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4189 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4190 { 0x5c88, 0, 0x00000000, 0x00077373 },
4191 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4192
4193 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4194 { 0x680c, 0, 0xffffffff, 0x00000000 },
4195 { 0x6810, 0, 0xffffffff, 0x00000000 },
4196 { 0x6814, 0, 0xffffffff, 0x00000000 },
4197 { 0x6818, 0, 0xffffffff, 0x00000000 },
4198 { 0x681c, 0, 0xffffffff, 0x00000000 },
4199 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4200 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4201 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4202 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4203 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4204 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4205 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4206 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4207 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4208 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4209 { 0x684c, 0, 0xffffffff, 0x00000000 },
4210 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4211 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4212 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4213 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4214 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4215 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4216
4217 { 0xffff, 0, 0x00000000, 0x00000000 },
4218 };
4219
4220 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004221 is_5709 = 0;
4222 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4223 is_5709 = 1;
4224
Michael Chanb6016b72005-05-26 13:03:09 -07004225 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4226 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004227 u16 flags = reg_tbl[i].flags;
4228
4229 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4230 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004231
4232 offset = (u32) reg_tbl[i].offset;
4233 rw_mask = reg_tbl[i].rw_mask;
4234 ro_mask = reg_tbl[i].ro_mask;
4235
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004236 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004237
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004238 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004239
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004240 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004241 if ((val & rw_mask) != 0) {
4242 goto reg_test_err;
4243 }
4244
4245 if ((val & ro_mask) != (save_val & ro_mask)) {
4246 goto reg_test_err;
4247 }
4248
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004249 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004250
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004251 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004252 if ((val & rw_mask) != rw_mask) {
4253 goto reg_test_err;
4254 }
4255
4256 if ((val & ro_mask) != (save_val & ro_mask)) {
4257 goto reg_test_err;
4258 }
4259
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004260 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004261 continue;
4262
4263reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004264 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004265 ret = -ENODEV;
4266 break;
4267 }
4268 return ret;
4269}
4270
4271static int
4272bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4273{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004274 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004275 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4276 int i;
4277
4278 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4279 u32 offset;
4280
4281 for (offset = 0; offset < size; offset += 4) {
4282
4283 REG_WR_IND(bp, start + offset, test_pattern[i]);
4284
4285 if (REG_RD_IND(bp, start + offset) !=
4286 test_pattern[i]) {
4287 return -ENODEV;
4288 }
4289 }
4290 }
4291 return 0;
4292}
4293
4294static int
4295bnx2_test_memory(struct bnx2 *bp)
4296{
4297 int ret = 0;
4298 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004299 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004300 u32 offset;
4301 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004302 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004303 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004304 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004305 { 0xe0000, 0x4000 },
4306 { 0x120000, 0x4000 },
4307 { 0x1a0000, 0x4000 },
4308 { 0x160000, 0x4000 },
4309 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004310 },
4311 mem_tbl_5709[] = {
4312 { 0x60000, 0x4000 },
4313 { 0xa0000, 0x3000 },
4314 { 0xe0000, 0x4000 },
4315 { 0x120000, 0x4000 },
4316 { 0x1a0000, 0x4000 },
4317 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004318 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004319 struct mem_entry *mem_tbl;
4320
4321 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4322 mem_tbl = mem_tbl_5709;
4323 else
4324 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004325
4326 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4327 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4328 mem_tbl[i].len)) != 0) {
4329 return ret;
4330 }
4331 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004332
Michael Chanb6016b72005-05-26 13:03:09 -07004333 return ret;
4334}
4335
Michael Chanbc5a0692006-01-23 16:13:22 -08004336#define BNX2_MAC_LOOPBACK 0
4337#define BNX2_PHY_LOOPBACK 1
4338
Michael Chanb6016b72005-05-26 13:03:09 -07004339static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004340bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004341{
4342 unsigned int pkt_size, num_pkts, i;
4343 struct sk_buff *skb, *rx_skb;
4344 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004345 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004346 dma_addr_t map;
4347 struct tx_bd *txbd;
4348 struct sw_bd *rx_buf;
4349 struct l2_fhdr *rx_hdr;
4350 int ret = -ENODEV;
4351
Michael Chanbc5a0692006-01-23 16:13:22 -08004352 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4353 bp->loopback = MAC_LOOPBACK;
4354 bnx2_set_mac_loopback(bp);
4355 }
4356 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004357 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004358 bnx2_set_phy_loopback(bp);
4359 }
4360 else
4361 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004362
4363 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004364 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004365 if (!skb)
4366 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004367 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004368 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004369 memset(packet + 6, 0x0, 8);
4370 for (i = 14; i < pkt_size; i++)
4371 packet[i] = (unsigned char) (i & 0xff);
4372
4373 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4374 PCI_DMA_TODEVICE);
4375
Michael Chanbf5295b2006-03-23 01:11:56 -08004376 REG_WR(bp, BNX2_HC_COMMAND,
4377 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4378
Michael Chanb6016b72005-05-26 13:03:09 -07004379 REG_RD(bp, BNX2_HC_COMMAND);
4380
4381 udelay(5);
4382 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4383
Michael Chanb6016b72005-05-26 13:03:09 -07004384 num_pkts = 0;
4385
Michael Chanbc5a0692006-01-23 16:13:22 -08004386 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004387
4388 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4389 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4390 txbd->tx_bd_mss_nbytes = pkt_size;
4391 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4392
4393 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004394 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4395 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004396
Michael Chan234754d2006-11-19 14:11:41 -08004397 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4398 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004399
4400 udelay(100);
4401
Michael Chanbf5295b2006-03-23 01:11:56 -08004402 REG_WR(bp, BNX2_HC_COMMAND,
4403 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4404
Michael Chanb6016b72005-05-26 13:03:09 -07004405 REG_RD(bp, BNX2_HC_COMMAND);
4406
4407 udelay(5);
4408
4409 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004410 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004411
Michael Chanbc5a0692006-01-23 16:13:22 -08004412 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004413 goto loopback_test_done;
4414 }
4415
4416 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4417 if (rx_idx != rx_start_idx + num_pkts) {
4418 goto loopback_test_done;
4419 }
4420
4421 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4422 rx_skb = rx_buf->skb;
4423
4424 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4425 skb_reserve(rx_skb, bp->rx_offset);
4426
4427 pci_dma_sync_single_for_cpu(bp->pdev,
4428 pci_unmap_addr(rx_buf, mapping),
4429 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4430
Michael Chanade2bfe2006-01-23 16:09:51 -08004431 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004432 (L2_FHDR_ERRORS_BAD_CRC |
4433 L2_FHDR_ERRORS_PHY_DECODE |
4434 L2_FHDR_ERRORS_ALIGNMENT |
4435 L2_FHDR_ERRORS_TOO_SHORT |
4436 L2_FHDR_ERRORS_GIANT_FRAME)) {
4437
4438 goto loopback_test_done;
4439 }
4440
4441 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4442 goto loopback_test_done;
4443 }
4444
4445 for (i = 14; i < pkt_size; i++) {
4446 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4447 goto loopback_test_done;
4448 }
4449 }
4450
4451 ret = 0;
4452
4453loopback_test_done:
4454 bp->loopback = 0;
4455 return ret;
4456}
4457
Michael Chanbc5a0692006-01-23 16:13:22 -08004458#define BNX2_MAC_LOOPBACK_FAILED 1
4459#define BNX2_PHY_LOOPBACK_FAILED 2
4460#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4461 BNX2_PHY_LOOPBACK_FAILED)
4462
4463static int
4464bnx2_test_loopback(struct bnx2 *bp)
4465{
4466 int rc = 0;
4467
4468 if (!netif_running(bp->dev))
4469 return BNX2_LOOPBACK_FAILED;
4470
4471 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4472 spin_lock_bh(&bp->phy_lock);
4473 bnx2_init_phy(bp);
4474 spin_unlock_bh(&bp->phy_lock);
4475 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4476 rc |= BNX2_MAC_LOOPBACK_FAILED;
4477 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4478 rc |= BNX2_PHY_LOOPBACK_FAILED;
4479 return rc;
4480}
4481
Michael Chanb6016b72005-05-26 13:03:09 -07004482#define NVRAM_SIZE 0x200
4483#define CRC32_RESIDUAL 0xdebb20e3
4484
4485static int
4486bnx2_test_nvram(struct bnx2 *bp)
4487{
4488 u32 buf[NVRAM_SIZE / 4];
4489 u8 *data = (u8 *) buf;
4490 int rc = 0;
4491 u32 magic, csum;
4492
4493 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4494 goto test_nvram_done;
4495
4496 magic = be32_to_cpu(buf[0]);
4497 if (magic != 0x669955aa) {
4498 rc = -ENODEV;
4499 goto test_nvram_done;
4500 }
4501
4502 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4503 goto test_nvram_done;
4504
4505 csum = ether_crc_le(0x100, data);
4506 if (csum != CRC32_RESIDUAL) {
4507 rc = -ENODEV;
4508 goto test_nvram_done;
4509 }
4510
4511 csum = ether_crc_le(0x100, data + 0x100);
4512 if (csum != CRC32_RESIDUAL) {
4513 rc = -ENODEV;
4514 }
4515
4516test_nvram_done:
4517 return rc;
4518}
4519
4520static int
4521bnx2_test_link(struct bnx2 *bp)
4522{
4523 u32 bmsr;
4524
Michael Chanc770a652005-08-25 15:38:39 -07004525 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004526 bnx2_enable_bmsr1(bp);
4527 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4528 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4529 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004530 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004531
Michael Chanb6016b72005-05-26 13:03:09 -07004532 if (bmsr & BMSR_LSTATUS) {
4533 return 0;
4534 }
4535 return -ENODEV;
4536}
4537
4538static int
4539bnx2_test_intr(struct bnx2 *bp)
4540{
4541 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004542 u16 status_idx;
4543
4544 if (!netif_running(bp->dev))
4545 return -ENODEV;
4546
4547 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4548
4549 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004550 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004551 REG_RD(bp, BNX2_HC_COMMAND);
4552
4553 for (i = 0; i < 10; i++) {
4554 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4555 status_idx) {
4556
4557 break;
4558 }
4559
4560 msleep_interruptible(10);
4561 }
4562 if (i < 10)
4563 return 0;
4564
4565 return -ENODEV;
4566}
4567
4568static void
Michael Chan48b01e22006-11-19 14:08:00 -08004569bnx2_5706_serdes_timer(struct bnx2 *bp)
4570{
4571 spin_lock(&bp->phy_lock);
4572 if (bp->serdes_an_pending)
4573 bp->serdes_an_pending--;
4574 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4575 u32 bmcr;
4576
4577 bp->current_interval = bp->timer_interval;
4578
Michael Chanca58c3a2007-05-03 13:22:52 -07004579 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004580
4581 if (bmcr & BMCR_ANENABLE) {
4582 u32 phy1, phy2;
4583
4584 bnx2_write_phy(bp, 0x1c, 0x7c00);
4585 bnx2_read_phy(bp, 0x1c, &phy1);
4586
4587 bnx2_write_phy(bp, 0x17, 0x0f01);
4588 bnx2_read_phy(bp, 0x15, &phy2);
4589 bnx2_write_phy(bp, 0x17, 0x0f01);
4590 bnx2_read_phy(bp, 0x15, &phy2);
4591
4592 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4593 !(phy2 & 0x20)) { /* no CONFIG */
4594
4595 bmcr &= ~BMCR_ANENABLE;
4596 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004597 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004598 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4599 }
4600 }
4601 }
4602 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4603 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4604 u32 phy2;
4605
4606 bnx2_write_phy(bp, 0x17, 0x0f01);
4607 bnx2_read_phy(bp, 0x15, &phy2);
4608 if (phy2 & 0x20) {
4609 u32 bmcr;
4610
Michael Chanca58c3a2007-05-03 13:22:52 -07004611 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004612 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004613 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004614
4615 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4616 }
4617 } else
4618 bp->current_interval = bp->timer_interval;
4619
4620 spin_unlock(&bp->phy_lock);
4621}
4622
4623static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004624bnx2_5708_serdes_timer(struct bnx2 *bp)
4625{
4626 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4627 bp->serdes_an_pending = 0;
4628 return;
4629 }
4630
4631 spin_lock(&bp->phy_lock);
4632 if (bp->serdes_an_pending)
4633 bp->serdes_an_pending--;
4634 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4635 u32 bmcr;
4636
Michael Chanca58c3a2007-05-03 13:22:52 -07004637 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004638 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004639 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004640 bp->current_interval = SERDES_FORCED_TIMEOUT;
4641 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004642 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004643 bp->serdes_an_pending = 2;
4644 bp->current_interval = bp->timer_interval;
4645 }
4646
4647 } else
4648 bp->current_interval = bp->timer_interval;
4649
4650 spin_unlock(&bp->phy_lock);
4651}
4652
4653static void
Michael Chanb6016b72005-05-26 13:03:09 -07004654bnx2_timer(unsigned long data)
4655{
4656 struct bnx2 *bp = (struct bnx2 *) data;
4657 u32 msg;
4658
Michael Chancd339a02005-08-25 15:35:24 -07004659 if (!netif_running(bp->dev))
4660 return;
4661
Michael Chanb6016b72005-05-26 13:03:09 -07004662 if (atomic_read(&bp->intr_sem) != 0)
4663 goto bnx2_restart_timer;
4664
4665 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004666 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004667
Michael Chancea94db2006-06-12 22:16:13 -07004668 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4669
Michael Chan02537b062007-06-04 21:24:07 -07004670 /* workaround occasional corrupted counters */
4671 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4672 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4673 BNX2_HC_COMMAND_STATS_NOW);
4674
Michael Chanf8dd0642006-11-19 14:08:29 -08004675 if (bp->phy_flags & PHY_SERDES_FLAG) {
4676 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4677 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004678 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004679 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004680 }
4681
4682bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004683 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004684}
4685
Michael Chan8e6a72c2007-05-03 13:24:48 -07004686static int
4687bnx2_request_irq(struct bnx2 *bp)
4688{
4689 struct net_device *dev = bp->dev;
4690 int rc = 0;
4691
4692 if (bp->flags & USING_MSI_FLAG) {
4693 irq_handler_t fn = bnx2_msi;
4694
4695 if (bp->flags & ONE_SHOT_MSI_FLAG)
4696 fn = bnx2_msi_1shot;
4697
4698 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4699 } else
4700 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4701 IRQF_SHARED, dev->name, dev);
4702 return rc;
4703}
4704
4705static void
4706bnx2_free_irq(struct bnx2 *bp)
4707{
4708 struct net_device *dev = bp->dev;
4709
4710 if (bp->flags & USING_MSI_FLAG) {
4711 free_irq(bp->pdev->irq, dev);
4712 pci_disable_msi(bp->pdev);
4713 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4714 } else
4715 free_irq(bp->pdev->irq, dev);
4716}
4717
Michael Chanb6016b72005-05-26 13:03:09 -07004718/* Called with rtnl_lock */
4719static int
4720bnx2_open(struct net_device *dev)
4721{
Michael Chan972ec0d2006-01-23 16:12:43 -08004722 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004723 int rc;
4724
Michael Chan1b2f9222007-05-03 13:20:19 -07004725 netif_carrier_off(dev);
4726
Pavel Machek829ca9a2005-09-03 15:56:56 -07004727 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004728 bnx2_disable_int(bp);
4729
4730 rc = bnx2_alloc_mem(bp);
4731 if (rc)
4732 return rc;
4733
Michael Chan8e6a72c2007-05-03 13:24:48 -07004734 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07004735 if (pci_enable_msi(bp->pdev) == 0) {
4736 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07004737 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4738 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07004739 }
4740 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07004741 rc = bnx2_request_irq(bp);
4742
Michael Chanb6016b72005-05-26 13:03:09 -07004743 if (rc) {
4744 bnx2_free_mem(bp);
4745 return rc;
4746 }
4747
4748 rc = bnx2_init_nic(bp);
4749
4750 if (rc) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004751 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004752 bnx2_free_skbs(bp);
4753 bnx2_free_mem(bp);
4754 return rc;
4755 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004756
Michael Chancd339a02005-08-25 15:35:24 -07004757 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004758
4759 atomic_set(&bp->intr_sem, 0);
4760
4761 bnx2_enable_int(bp);
4762
4763 if (bp->flags & USING_MSI_FLAG) {
4764 /* Test MSI to make sure it is working
4765 * If MSI test fails, go back to INTx mode
4766 */
4767 if (bnx2_test_intr(bp) != 0) {
4768 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4769 " using MSI, switching to INTx mode. Please"
4770 " report this failure to the PCI maintainer"
4771 " and include system chipset information.\n",
4772 bp->dev->name);
4773
4774 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07004775 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004776
4777 rc = bnx2_init_nic(bp);
4778
Michael Chan8e6a72c2007-05-03 13:24:48 -07004779 if (!rc)
4780 rc = bnx2_request_irq(bp);
4781
Michael Chanb6016b72005-05-26 13:03:09 -07004782 if (rc) {
4783 bnx2_free_skbs(bp);
4784 bnx2_free_mem(bp);
4785 del_timer_sync(&bp->timer);
4786 return rc;
4787 }
4788 bnx2_enable_int(bp);
4789 }
4790 }
4791 if (bp->flags & USING_MSI_FLAG) {
4792 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4793 }
4794
4795 netif_start_queue(dev);
4796
4797 return 0;
4798}
4799
4800static void
David Howellsc4028952006-11-22 14:57:56 +00004801bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004802{
David Howellsc4028952006-11-22 14:57:56 +00004803 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004804
Michael Chanafdc08b2005-08-25 15:34:29 -07004805 if (!netif_running(bp->dev))
4806 return;
4807
4808 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004809 bnx2_netif_stop(bp);
4810
4811 bnx2_init_nic(bp);
4812
4813 atomic_set(&bp->intr_sem, 1);
4814 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004815 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004816}
4817
4818static void
4819bnx2_tx_timeout(struct net_device *dev)
4820{
Michael Chan972ec0d2006-01-23 16:12:43 -08004821 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004822
4823 /* This allows the netif to be shutdown gracefully before resetting */
4824 schedule_work(&bp->reset_task);
4825}
4826
4827#ifdef BCM_VLAN
4828/* Called with rtnl_lock */
4829static void
4830bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4831{
Michael Chan972ec0d2006-01-23 16:12:43 -08004832 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004833
4834 bnx2_netif_stop(bp);
4835
4836 bp->vlgrp = vlgrp;
4837 bnx2_set_rx_mode(dev);
4838
4839 bnx2_netif_start(bp);
4840}
Michael Chanb6016b72005-05-26 13:03:09 -07004841#endif
4842
Herbert Xu932ff272006-06-09 12:20:56 -07004843/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004844 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4845 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004846 */
4847static int
4848bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4849{
Michael Chan972ec0d2006-01-23 16:12:43 -08004850 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004851 dma_addr_t mapping;
4852 struct tx_bd *txbd;
4853 struct sw_bd *tx_buf;
4854 u32 len, vlan_tag_flags, last_frag, mss;
4855 u16 prod, ring_prod;
4856 int i;
4857
Michael Chane89bbf12005-08-25 15:36:58 -07004858 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004859 netif_stop_queue(dev);
4860 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4861 dev->name);
4862
4863 return NETDEV_TX_BUSY;
4864 }
4865 len = skb_headlen(skb);
4866 prod = bp->tx_prod;
4867 ring_prod = TX_RING_IDX(prod);
4868
4869 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004870 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004871 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4872 }
4873
4874 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4875 vlan_tag_flags |=
4876 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4877 }
Michael Chanfde82052007-05-03 17:23:35 -07004878 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07004879 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004880 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004881
Michael Chanb6016b72005-05-26 13:03:09 -07004882 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4883
Michael Chan4666f872007-05-03 13:22:28 -07004884 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004885
Michael Chan4666f872007-05-03 13:22:28 -07004886 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4887 u32 tcp_off = skb_transport_offset(skb) -
4888 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004889
Michael Chan4666f872007-05-03 13:22:28 -07004890 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4891 TX_BD_FLAGS_SW_FLAGS;
4892 if (likely(tcp_off == 0))
4893 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4894 else {
4895 tcp_off >>= 3;
4896 vlan_tag_flags |= ((tcp_off & 0x3) <<
4897 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4898 ((tcp_off & 0x10) <<
4899 TX_BD_FLAGS_TCP6_OFF4_SHL);
4900 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4901 }
4902 } else {
4903 if (skb_header_cloned(skb) &&
4904 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4905 dev_kfree_skb(skb);
4906 return NETDEV_TX_OK;
4907 }
4908
4909 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4910
4911 iph = ip_hdr(skb);
4912 iph->check = 0;
4913 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4914 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4915 iph->daddr, 0,
4916 IPPROTO_TCP,
4917 0);
4918 if (tcp_opt_len || (iph->ihl > 5)) {
4919 vlan_tag_flags |= ((iph->ihl - 5) +
4920 (tcp_opt_len >> 2)) << 8;
4921 }
Michael Chanb6016b72005-05-26 13:03:09 -07004922 }
Michael Chan4666f872007-05-03 13:22:28 -07004923 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004924 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004925
4926 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004927
Michael Chanb6016b72005-05-26 13:03:09 -07004928 tx_buf = &bp->tx_buf_ring[ring_prod];
4929 tx_buf->skb = skb;
4930 pci_unmap_addr_set(tx_buf, mapping, mapping);
4931
4932 txbd = &bp->tx_desc_ring[ring_prod];
4933
4934 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4935 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4936 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4937 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4938
4939 last_frag = skb_shinfo(skb)->nr_frags;
4940
4941 for (i = 0; i < last_frag; i++) {
4942 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4943
4944 prod = NEXT_TX_BD(prod);
4945 ring_prod = TX_RING_IDX(prod);
4946 txbd = &bp->tx_desc_ring[ring_prod];
4947
4948 len = frag->size;
4949 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4950 len, PCI_DMA_TODEVICE);
4951 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4952 mapping, mapping);
4953
4954 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4955 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4956 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4957 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4958
4959 }
4960 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4961
4962 prod = NEXT_TX_BD(prod);
4963 bp->tx_prod_bseq += skb->len;
4964
Michael Chan234754d2006-11-19 14:11:41 -08004965 REG_WR16(bp, bp->tx_bidx_addr, prod);
4966 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004967
4968 mmiowb();
4969
4970 bp->tx_prod = prod;
4971 dev->trans_start = jiffies;
4972
Michael Chane89bbf12005-08-25 15:36:58 -07004973 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004974 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004975 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004976 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004977 }
4978
4979 return NETDEV_TX_OK;
4980}
4981
4982/* Called with rtnl_lock */
4983static int
4984bnx2_close(struct net_device *dev)
4985{
Michael Chan972ec0d2006-01-23 16:12:43 -08004986 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004987 u32 reset_code;
4988
Michael Chanafdc08b2005-08-25 15:34:29 -07004989 /* Calling flush_scheduled_work() may deadlock because
4990 * linkwatch_event() may be on the workqueue and it will try to get
4991 * the rtnl_lock which we are holding.
4992 */
4993 while (bp->in_reset_task)
4994 msleep(1);
4995
Michael Chanb6016b72005-05-26 13:03:09 -07004996 bnx2_netif_stop(bp);
4997 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004998 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004999 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005000 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005001 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5002 else
5003 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5004 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005005 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005006 bnx2_free_skbs(bp);
5007 bnx2_free_mem(bp);
5008 bp->link_up = 0;
5009 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005010 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07005011 return 0;
5012}
5013
5014#define GET_NET_STATS64(ctr) \
5015 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5016 (unsigned long) (ctr##_lo)
5017
5018#define GET_NET_STATS32(ctr) \
5019 (ctr##_lo)
5020
5021#if (BITS_PER_LONG == 64)
5022#define GET_NET_STATS GET_NET_STATS64
5023#else
5024#define GET_NET_STATS GET_NET_STATS32
5025#endif
5026
5027static struct net_device_stats *
5028bnx2_get_stats(struct net_device *dev)
5029{
Michael Chan972ec0d2006-01-23 16:12:43 -08005030 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005031 struct statistics_block *stats_blk = bp->stats_blk;
5032 struct net_device_stats *net_stats = &bp->net_stats;
5033
5034 if (bp->stats_blk == NULL) {
5035 return net_stats;
5036 }
5037 net_stats->rx_packets =
5038 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5039 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5040 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5041
5042 net_stats->tx_packets =
5043 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5044 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5045 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5046
5047 net_stats->rx_bytes =
5048 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5049
5050 net_stats->tx_bytes =
5051 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5052
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005053 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005054 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5055
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005056 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005057 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5058
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005059 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005060 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5061 stats_blk->stat_EtherStatsOverrsizePkts);
5062
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005063 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005064 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5065
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005066 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005067 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5068
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005069 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005070 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5071
5072 net_stats->rx_errors = net_stats->rx_length_errors +
5073 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5074 net_stats->rx_crc_errors;
5075
5076 net_stats->tx_aborted_errors =
5077 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5078 stats_blk->stat_Dot3StatsLateCollisions);
5079
Michael Chan5b0c76a2005-11-04 08:45:49 -08005080 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5081 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005082 net_stats->tx_carrier_errors = 0;
5083 else {
5084 net_stats->tx_carrier_errors =
5085 (unsigned long)
5086 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5087 }
5088
5089 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005090 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005091 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5092 +
5093 net_stats->tx_aborted_errors +
5094 net_stats->tx_carrier_errors;
5095
Michael Chancea94db2006-06-12 22:16:13 -07005096 net_stats->rx_missed_errors =
5097 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5098 stats_blk->stat_FwRxDrop);
5099
Michael Chanb6016b72005-05-26 13:03:09 -07005100 return net_stats;
5101}
5102
5103/* All ethtool functions called with rtnl_lock */
5104
5105static int
5106bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5107{
Michael Chan972ec0d2006-01-23 16:12:43 -08005108 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005109
5110 cmd->supported = SUPPORTED_Autoneg;
5111 if (bp->phy_flags & PHY_SERDES_FLAG) {
5112 cmd->supported |= SUPPORTED_1000baseT_Full |
5113 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005114 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5115 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005116
5117 cmd->port = PORT_FIBRE;
5118 }
5119 else {
5120 cmd->supported |= SUPPORTED_10baseT_Half |
5121 SUPPORTED_10baseT_Full |
5122 SUPPORTED_100baseT_Half |
5123 SUPPORTED_100baseT_Full |
5124 SUPPORTED_1000baseT_Full |
5125 SUPPORTED_TP;
5126
5127 cmd->port = PORT_TP;
5128 }
5129
5130 cmd->advertising = bp->advertising;
5131
5132 if (bp->autoneg & AUTONEG_SPEED) {
5133 cmd->autoneg = AUTONEG_ENABLE;
5134 }
5135 else {
5136 cmd->autoneg = AUTONEG_DISABLE;
5137 }
5138
5139 if (netif_carrier_ok(dev)) {
5140 cmd->speed = bp->line_speed;
5141 cmd->duplex = bp->duplex;
5142 }
5143 else {
5144 cmd->speed = -1;
5145 cmd->duplex = -1;
5146 }
5147
5148 cmd->transceiver = XCVR_INTERNAL;
5149 cmd->phy_address = bp->phy_addr;
5150
5151 return 0;
5152}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005153
Michael Chanb6016b72005-05-26 13:03:09 -07005154static int
5155bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5156{
Michael Chan972ec0d2006-01-23 16:12:43 -08005157 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005158 u8 autoneg = bp->autoneg;
5159 u8 req_duplex = bp->req_duplex;
5160 u16 req_line_speed = bp->req_line_speed;
5161 u32 advertising = bp->advertising;
5162
5163 if (cmd->autoneg == AUTONEG_ENABLE) {
5164 autoneg |= AUTONEG_SPEED;
5165
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005166 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005167
5168 /* allow advertising 1 speed */
5169 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5170 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5171 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5172 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5173
5174 if (bp->phy_flags & PHY_SERDES_FLAG)
5175 return -EINVAL;
5176
5177 advertising = cmd->advertising;
5178
Michael Chan27a005b2007-05-03 13:23:41 -07005179 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5180 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5181 return -EINVAL;
5182 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005183 advertising = cmd->advertising;
5184 }
5185 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5186 return -EINVAL;
5187 }
5188 else {
5189 if (bp->phy_flags & PHY_SERDES_FLAG) {
5190 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5191 }
5192 else {
5193 advertising = ETHTOOL_ALL_COPPER_SPEED;
5194 }
5195 }
5196 advertising |= ADVERTISED_Autoneg;
5197 }
5198 else {
5199 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005200 if ((cmd->speed != SPEED_1000 &&
5201 cmd->speed != SPEED_2500) ||
5202 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005203 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005204
5205 if (cmd->speed == SPEED_2500 &&
5206 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5207 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005208 }
5209 else if (cmd->speed == SPEED_1000) {
5210 return -EINVAL;
5211 }
5212 autoneg &= ~AUTONEG_SPEED;
5213 req_line_speed = cmd->speed;
5214 req_duplex = cmd->duplex;
5215 advertising = 0;
5216 }
5217
5218 bp->autoneg = autoneg;
5219 bp->advertising = advertising;
5220 bp->req_line_speed = req_line_speed;
5221 bp->req_duplex = req_duplex;
5222
Michael Chanc770a652005-08-25 15:38:39 -07005223 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005224
5225 bnx2_setup_phy(bp);
5226
Michael Chanc770a652005-08-25 15:38:39 -07005227 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005228
5229 return 0;
5230}
5231
5232static void
5233bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5234{
Michael Chan972ec0d2006-01-23 16:12:43 -08005235 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005236
5237 strcpy(info->driver, DRV_MODULE_NAME);
5238 strcpy(info->version, DRV_MODULE_VERSION);
5239 strcpy(info->bus_info, pci_name(bp->pdev));
5240 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5241 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5242 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005243 info->fw_version[1] = info->fw_version[3] = '.';
5244 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005245}
5246
Michael Chan244ac4f2006-03-20 17:48:46 -08005247#define BNX2_REGDUMP_LEN (32 * 1024)
5248
5249static int
5250bnx2_get_regs_len(struct net_device *dev)
5251{
5252 return BNX2_REGDUMP_LEN;
5253}
5254
5255static void
5256bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5257{
5258 u32 *p = _p, i, offset;
5259 u8 *orig_p = _p;
5260 struct bnx2 *bp = netdev_priv(dev);
5261 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5262 0x0800, 0x0880, 0x0c00, 0x0c10,
5263 0x0c30, 0x0d08, 0x1000, 0x101c,
5264 0x1040, 0x1048, 0x1080, 0x10a4,
5265 0x1400, 0x1490, 0x1498, 0x14f0,
5266 0x1500, 0x155c, 0x1580, 0x15dc,
5267 0x1600, 0x1658, 0x1680, 0x16d8,
5268 0x1800, 0x1820, 0x1840, 0x1854,
5269 0x1880, 0x1894, 0x1900, 0x1984,
5270 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5271 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5272 0x2000, 0x2030, 0x23c0, 0x2400,
5273 0x2800, 0x2820, 0x2830, 0x2850,
5274 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5275 0x3c00, 0x3c94, 0x4000, 0x4010,
5276 0x4080, 0x4090, 0x43c0, 0x4458,
5277 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5278 0x4fc0, 0x5010, 0x53c0, 0x5444,
5279 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5280 0x5fc0, 0x6000, 0x6400, 0x6428,
5281 0x6800, 0x6848, 0x684c, 0x6860,
5282 0x6888, 0x6910, 0x8000 };
5283
5284 regs->version = 0;
5285
5286 memset(p, 0, BNX2_REGDUMP_LEN);
5287
5288 if (!netif_running(bp->dev))
5289 return;
5290
5291 i = 0;
5292 offset = reg_boundaries[0];
5293 p += offset;
5294 while (offset < BNX2_REGDUMP_LEN) {
5295 *p++ = REG_RD(bp, offset);
5296 offset += 4;
5297 if (offset == reg_boundaries[i + 1]) {
5298 offset = reg_boundaries[i + 2];
5299 p = (u32 *) (orig_p + offset);
5300 i += 2;
5301 }
5302 }
5303}
5304
Michael Chanb6016b72005-05-26 13:03:09 -07005305static void
5306bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5307{
Michael Chan972ec0d2006-01-23 16:12:43 -08005308 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005309
5310 if (bp->flags & NO_WOL_FLAG) {
5311 wol->supported = 0;
5312 wol->wolopts = 0;
5313 }
5314 else {
5315 wol->supported = WAKE_MAGIC;
5316 if (bp->wol)
5317 wol->wolopts = WAKE_MAGIC;
5318 else
5319 wol->wolopts = 0;
5320 }
5321 memset(&wol->sopass, 0, sizeof(wol->sopass));
5322}
5323
5324static int
5325bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5326{
Michael Chan972ec0d2006-01-23 16:12:43 -08005327 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005328
5329 if (wol->wolopts & ~WAKE_MAGIC)
5330 return -EINVAL;
5331
5332 if (wol->wolopts & WAKE_MAGIC) {
5333 if (bp->flags & NO_WOL_FLAG)
5334 return -EINVAL;
5335
5336 bp->wol = 1;
5337 }
5338 else {
5339 bp->wol = 0;
5340 }
5341 return 0;
5342}
5343
5344static int
5345bnx2_nway_reset(struct net_device *dev)
5346{
Michael Chan972ec0d2006-01-23 16:12:43 -08005347 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005348 u32 bmcr;
5349
5350 if (!(bp->autoneg & AUTONEG_SPEED)) {
5351 return -EINVAL;
5352 }
5353
Michael Chanc770a652005-08-25 15:38:39 -07005354 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005355
5356 /* Force a link down visible on the other side */
5357 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005358 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005359 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005360
5361 msleep(20);
5362
Michael Chanc770a652005-08-25 15:38:39 -07005363 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005364
5365 bp->current_interval = SERDES_AN_TIMEOUT;
5366 bp->serdes_an_pending = 1;
5367 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005368 }
5369
Michael Chanca58c3a2007-05-03 13:22:52 -07005370 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005371 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005372 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005373
Michael Chanc770a652005-08-25 15:38:39 -07005374 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005375
5376 return 0;
5377}
5378
5379static int
5380bnx2_get_eeprom_len(struct net_device *dev)
5381{
Michael Chan972ec0d2006-01-23 16:12:43 -08005382 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005383
Michael Chan1122db72006-01-23 16:11:42 -08005384 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005385 return 0;
5386
Michael Chan1122db72006-01-23 16:11:42 -08005387 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005388}
5389
5390static int
5391bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5392 u8 *eebuf)
5393{
Michael Chan972ec0d2006-01-23 16:12:43 -08005394 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005395 int rc;
5396
John W. Linville1064e942005-11-10 12:58:24 -08005397 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005398
5399 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5400
5401 return rc;
5402}
5403
5404static int
5405bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5406 u8 *eebuf)
5407{
Michael Chan972ec0d2006-01-23 16:12:43 -08005408 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005409 int rc;
5410
John W. Linville1064e942005-11-10 12:58:24 -08005411 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005412
5413 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5414
5415 return rc;
5416}
5417
5418static int
5419bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5420{
Michael Chan972ec0d2006-01-23 16:12:43 -08005421 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005422
5423 memset(coal, 0, sizeof(struct ethtool_coalesce));
5424
5425 coal->rx_coalesce_usecs = bp->rx_ticks;
5426 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5427 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5428 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5429
5430 coal->tx_coalesce_usecs = bp->tx_ticks;
5431 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5432 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5433 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5434
5435 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5436
5437 return 0;
5438}
5439
5440static int
5441bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5442{
Michael Chan972ec0d2006-01-23 16:12:43 -08005443 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005444
5445 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5446 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5447
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005448 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005449 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5450
5451 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5452 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5453
5454 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5455 if (bp->rx_quick_cons_trip_int > 0xff)
5456 bp->rx_quick_cons_trip_int = 0xff;
5457
5458 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5459 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5460
5461 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5462 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5463
5464 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5465 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5466
5467 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5468 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5469 0xff;
5470
5471 bp->stats_ticks = coal->stats_block_coalesce_usecs;
Michael Chan02537b062007-06-04 21:24:07 -07005472 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5473 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5474 bp->stats_ticks = USEC_PER_SEC;
5475 }
Michael Chanb6016b72005-05-26 13:03:09 -07005476 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5477 bp->stats_ticks &= 0xffff00;
5478
5479 if (netif_running(bp->dev)) {
5480 bnx2_netif_stop(bp);
5481 bnx2_init_nic(bp);
5482 bnx2_netif_start(bp);
5483 }
5484
5485 return 0;
5486}
5487
5488static void
5489bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5490{
Michael Chan972ec0d2006-01-23 16:12:43 -08005491 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005492
Michael Chan13daffa2006-03-20 17:49:20 -08005493 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005494 ering->rx_mini_max_pending = 0;
5495 ering->rx_jumbo_max_pending = 0;
5496
5497 ering->rx_pending = bp->rx_ring_size;
5498 ering->rx_mini_pending = 0;
5499 ering->rx_jumbo_pending = 0;
5500
5501 ering->tx_max_pending = MAX_TX_DESC_CNT;
5502 ering->tx_pending = bp->tx_ring_size;
5503}
5504
5505static int
5506bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5507{
Michael Chan972ec0d2006-01-23 16:12:43 -08005508 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005509
Michael Chan13daffa2006-03-20 17:49:20 -08005510 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005511 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5512 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5513
5514 return -EINVAL;
5515 }
Michael Chan13daffa2006-03-20 17:49:20 -08005516 if (netif_running(bp->dev)) {
5517 bnx2_netif_stop(bp);
5518 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5519 bnx2_free_skbs(bp);
5520 bnx2_free_mem(bp);
5521 }
5522
5523 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005524 bp->tx_ring_size = ering->tx_pending;
5525
5526 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005527 int rc;
5528
5529 rc = bnx2_alloc_mem(bp);
5530 if (rc)
5531 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005532 bnx2_init_nic(bp);
5533 bnx2_netif_start(bp);
5534 }
5535
5536 return 0;
5537}
5538
5539static void
5540bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5541{
Michael Chan972ec0d2006-01-23 16:12:43 -08005542 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005543
5544 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5545 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5546 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5547}
5548
5549static int
5550bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5551{
Michael Chan972ec0d2006-01-23 16:12:43 -08005552 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005553
5554 bp->req_flow_ctrl = 0;
5555 if (epause->rx_pause)
5556 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5557 if (epause->tx_pause)
5558 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5559
5560 if (epause->autoneg) {
5561 bp->autoneg |= AUTONEG_FLOW_CTRL;
5562 }
5563 else {
5564 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5565 }
5566
Michael Chanc770a652005-08-25 15:38:39 -07005567 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005568
5569 bnx2_setup_phy(bp);
5570
Michael Chanc770a652005-08-25 15:38:39 -07005571 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005572
5573 return 0;
5574}
5575
5576static u32
5577bnx2_get_rx_csum(struct net_device *dev)
5578{
Michael Chan972ec0d2006-01-23 16:12:43 -08005579 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005580
5581 return bp->rx_csum;
5582}
5583
5584static int
5585bnx2_set_rx_csum(struct net_device *dev, u32 data)
5586{
Michael Chan972ec0d2006-01-23 16:12:43 -08005587 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005588
5589 bp->rx_csum = data;
5590 return 0;
5591}
5592
Michael Chanb11d6212006-06-29 12:31:21 -07005593static int
5594bnx2_set_tso(struct net_device *dev, u32 data)
5595{
Michael Chan4666f872007-05-03 13:22:28 -07005596 struct bnx2 *bp = netdev_priv(dev);
5597
5598 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005599 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005600 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5601 dev->features |= NETIF_F_TSO6;
5602 } else
5603 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5604 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005605 return 0;
5606}
5607
Michael Chancea94db2006-06-12 22:16:13 -07005608#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005609
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005610static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005611 char string[ETH_GSTRING_LEN];
5612} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5613 { "rx_bytes" },
5614 { "rx_error_bytes" },
5615 { "tx_bytes" },
5616 { "tx_error_bytes" },
5617 { "rx_ucast_packets" },
5618 { "rx_mcast_packets" },
5619 { "rx_bcast_packets" },
5620 { "tx_ucast_packets" },
5621 { "tx_mcast_packets" },
5622 { "tx_bcast_packets" },
5623 { "tx_mac_errors" },
5624 { "tx_carrier_errors" },
5625 { "rx_crc_errors" },
5626 { "rx_align_errors" },
5627 { "tx_single_collisions" },
5628 { "tx_multi_collisions" },
5629 { "tx_deferred" },
5630 { "tx_excess_collisions" },
5631 { "tx_late_collisions" },
5632 { "tx_total_collisions" },
5633 { "rx_fragments" },
5634 { "rx_jabbers" },
5635 { "rx_undersize_packets" },
5636 { "rx_oversize_packets" },
5637 { "rx_64_byte_packets" },
5638 { "rx_65_to_127_byte_packets" },
5639 { "rx_128_to_255_byte_packets" },
5640 { "rx_256_to_511_byte_packets" },
5641 { "rx_512_to_1023_byte_packets" },
5642 { "rx_1024_to_1522_byte_packets" },
5643 { "rx_1523_to_9022_byte_packets" },
5644 { "tx_64_byte_packets" },
5645 { "tx_65_to_127_byte_packets" },
5646 { "tx_128_to_255_byte_packets" },
5647 { "tx_256_to_511_byte_packets" },
5648 { "tx_512_to_1023_byte_packets" },
5649 { "tx_1024_to_1522_byte_packets" },
5650 { "tx_1523_to_9022_byte_packets" },
5651 { "rx_xon_frames" },
5652 { "rx_xoff_frames" },
5653 { "tx_xon_frames" },
5654 { "tx_xoff_frames" },
5655 { "rx_mac_ctrl_frames" },
5656 { "rx_filtered_packets" },
5657 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005658 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005659};
5660
5661#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5662
Arjan van de Venf71e1302006-03-03 21:33:57 -05005663static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005664 STATS_OFFSET32(stat_IfHCInOctets_hi),
5665 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5666 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5667 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5668 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5669 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5670 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5671 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5672 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5673 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5674 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005675 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5676 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5677 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5678 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5679 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5680 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5681 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5682 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5683 STATS_OFFSET32(stat_EtherStatsCollisions),
5684 STATS_OFFSET32(stat_EtherStatsFragments),
5685 STATS_OFFSET32(stat_EtherStatsJabbers),
5686 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5687 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5688 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5689 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5690 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5691 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5692 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5693 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5694 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5695 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5696 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5697 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5698 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5699 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5700 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5701 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5702 STATS_OFFSET32(stat_XonPauseFramesReceived),
5703 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5704 STATS_OFFSET32(stat_OutXonSent),
5705 STATS_OFFSET32(stat_OutXoffSent),
5706 STATS_OFFSET32(stat_MacControlFramesReceived),
5707 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5708 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005709 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005710};
5711
5712/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5713 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005714 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005715static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005716 8,0,8,8,8,8,8,8,8,8,
5717 4,0,4,4,4,4,4,4,4,4,
5718 4,4,4,4,4,4,4,4,4,4,
5719 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005720 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005721};
5722
Michael Chan5b0c76a2005-11-04 08:45:49 -08005723static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5724 8,0,8,8,8,8,8,8,8,8,
5725 4,4,4,4,4,4,4,4,4,4,
5726 4,4,4,4,4,4,4,4,4,4,
5727 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005728 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005729};
5730
Michael Chanb6016b72005-05-26 13:03:09 -07005731#define BNX2_NUM_TESTS 6
5732
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005733static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005734 char string[ETH_GSTRING_LEN];
5735} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5736 { "register_test (offline)" },
5737 { "memory_test (offline)" },
5738 { "loopback_test (offline)" },
5739 { "nvram_test (online)" },
5740 { "interrupt_test (online)" },
5741 { "link_test (online)" },
5742};
5743
5744static int
5745bnx2_self_test_count(struct net_device *dev)
5746{
5747 return BNX2_NUM_TESTS;
5748}
5749
5750static void
5751bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5752{
Michael Chan972ec0d2006-01-23 16:12:43 -08005753 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005754
5755 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5756 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005757 int i;
5758
Michael Chanb6016b72005-05-26 13:03:09 -07005759 bnx2_netif_stop(bp);
5760 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5761 bnx2_free_skbs(bp);
5762
5763 if (bnx2_test_registers(bp) != 0) {
5764 buf[0] = 1;
5765 etest->flags |= ETH_TEST_FL_FAILED;
5766 }
5767 if (bnx2_test_memory(bp) != 0) {
5768 buf[1] = 1;
5769 etest->flags |= ETH_TEST_FL_FAILED;
5770 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005771 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005772 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005773
5774 if (!netif_running(bp->dev)) {
5775 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5776 }
5777 else {
5778 bnx2_init_nic(bp);
5779 bnx2_netif_start(bp);
5780 }
5781
5782 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005783 for (i = 0; i < 7; i++) {
5784 if (bp->link_up)
5785 break;
5786 msleep_interruptible(1000);
5787 }
Michael Chanb6016b72005-05-26 13:03:09 -07005788 }
5789
5790 if (bnx2_test_nvram(bp) != 0) {
5791 buf[3] = 1;
5792 etest->flags |= ETH_TEST_FL_FAILED;
5793 }
5794 if (bnx2_test_intr(bp) != 0) {
5795 buf[4] = 1;
5796 etest->flags |= ETH_TEST_FL_FAILED;
5797 }
5798
5799 if (bnx2_test_link(bp) != 0) {
5800 buf[5] = 1;
5801 etest->flags |= ETH_TEST_FL_FAILED;
5802
5803 }
5804}
5805
5806static void
5807bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5808{
5809 switch (stringset) {
5810 case ETH_SS_STATS:
5811 memcpy(buf, bnx2_stats_str_arr,
5812 sizeof(bnx2_stats_str_arr));
5813 break;
5814 case ETH_SS_TEST:
5815 memcpy(buf, bnx2_tests_str_arr,
5816 sizeof(bnx2_tests_str_arr));
5817 break;
5818 }
5819}
5820
5821static int
5822bnx2_get_stats_count(struct net_device *dev)
5823{
5824 return BNX2_NUM_STATS;
5825}
5826
5827static void
5828bnx2_get_ethtool_stats(struct net_device *dev,
5829 struct ethtool_stats *stats, u64 *buf)
5830{
Michael Chan972ec0d2006-01-23 16:12:43 -08005831 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005832 int i;
5833 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005834 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005835
5836 if (hw_stats == NULL) {
5837 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5838 return;
5839 }
5840
Michael Chan5b0c76a2005-11-04 08:45:49 -08005841 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5842 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5843 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5844 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005845 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005846 else
5847 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005848
5849 for (i = 0; i < BNX2_NUM_STATS; i++) {
5850 if (stats_len_arr[i] == 0) {
5851 /* skip this counter */
5852 buf[i] = 0;
5853 continue;
5854 }
5855 if (stats_len_arr[i] == 4) {
5856 /* 4-byte counter */
5857 buf[i] = (u64)
5858 *(hw_stats + bnx2_stats_offset_arr[i]);
5859 continue;
5860 }
5861 /* 8-byte counter */
5862 buf[i] = (((u64) *(hw_stats +
5863 bnx2_stats_offset_arr[i])) << 32) +
5864 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5865 }
5866}
5867
5868static int
5869bnx2_phys_id(struct net_device *dev, u32 data)
5870{
Michael Chan972ec0d2006-01-23 16:12:43 -08005871 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005872 int i;
5873 u32 save;
5874
5875 if (data == 0)
5876 data = 2;
5877
5878 save = REG_RD(bp, BNX2_MISC_CFG);
5879 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5880
5881 for (i = 0; i < (data * 2); i++) {
5882 if ((i % 2) == 0) {
5883 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5884 }
5885 else {
5886 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5887 BNX2_EMAC_LED_1000MB_OVERRIDE |
5888 BNX2_EMAC_LED_100MB_OVERRIDE |
5889 BNX2_EMAC_LED_10MB_OVERRIDE |
5890 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5891 BNX2_EMAC_LED_TRAFFIC);
5892 }
5893 msleep_interruptible(500);
5894 if (signal_pending(current))
5895 break;
5896 }
5897 REG_WR(bp, BNX2_EMAC_LED, 0);
5898 REG_WR(bp, BNX2_MISC_CFG, save);
5899 return 0;
5900}
5901
Michael Chan4666f872007-05-03 13:22:28 -07005902static int
5903bnx2_set_tx_csum(struct net_device *dev, u32 data)
5904{
5905 struct bnx2 *bp = netdev_priv(dev);
5906
5907 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5908 return (ethtool_op_set_tx_hw_csum(dev, data));
5909 else
5910 return (ethtool_op_set_tx_csum(dev, data));
5911}
5912
Jeff Garzik7282d492006-09-13 14:30:00 -04005913static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005914 .get_settings = bnx2_get_settings,
5915 .set_settings = bnx2_set_settings,
5916 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005917 .get_regs_len = bnx2_get_regs_len,
5918 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005919 .get_wol = bnx2_get_wol,
5920 .set_wol = bnx2_set_wol,
5921 .nway_reset = bnx2_nway_reset,
5922 .get_link = ethtool_op_get_link,
5923 .get_eeprom_len = bnx2_get_eeprom_len,
5924 .get_eeprom = bnx2_get_eeprom,
5925 .set_eeprom = bnx2_set_eeprom,
5926 .get_coalesce = bnx2_get_coalesce,
5927 .set_coalesce = bnx2_set_coalesce,
5928 .get_ringparam = bnx2_get_ringparam,
5929 .set_ringparam = bnx2_set_ringparam,
5930 .get_pauseparam = bnx2_get_pauseparam,
5931 .set_pauseparam = bnx2_set_pauseparam,
5932 .get_rx_csum = bnx2_get_rx_csum,
5933 .set_rx_csum = bnx2_set_rx_csum,
5934 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005935 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005936 .get_sg = ethtool_op_get_sg,
5937 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005938 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005939 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005940 .self_test_count = bnx2_self_test_count,
5941 .self_test = bnx2_self_test,
5942 .get_strings = bnx2_get_strings,
5943 .phys_id = bnx2_phys_id,
5944 .get_stats_count = bnx2_get_stats_count,
5945 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005946 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005947};
5948
5949/* Called with rtnl_lock */
5950static int
5951bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5952{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005953 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005954 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005955 int err;
5956
5957 switch(cmd) {
5958 case SIOCGMIIPHY:
5959 data->phy_id = bp->phy_addr;
5960
5961 /* fallthru */
5962 case SIOCGMIIREG: {
5963 u32 mii_regval;
5964
Michael Chandad3e452007-05-03 13:18:03 -07005965 if (!netif_running(dev))
5966 return -EAGAIN;
5967
Michael Chanc770a652005-08-25 15:38:39 -07005968 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005969 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005970 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005971
5972 data->val_out = mii_regval;
5973
5974 return err;
5975 }
5976
5977 case SIOCSMIIREG:
5978 if (!capable(CAP_NET_ADMIN))
5979 return -EPERM;
5980
Michael Chandad3e452007-05-03 13:18:03 -07005981 if (!netif_running(dev))
5982 return -EAGAIN;
5983
Michael Chanc770a652005-08-25 15:38:39 -07005984 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005985 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005986 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005987
5988 return err;
5989
5990 default:
5991 /* do nothing */
5992 break;
5993 }
5994 return -EOPNOTSUPP;
5995}
5996
5997/* Called with rtnl_lock */
5998static int
5999bnx2_change_mac_addr(struct net_device *dev, void *p)
6000{
6001 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08006002 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006003
Michael Chan73eef4c2005-08-25 15:39:15 -07006004 if (!is_valid_ether_addr(addr->sa_data))
6005 return -EINVAL;
6006
Michael Chanb6016b72005-05-26 13:03:09 -07006007 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6008 if (netif_running(dev))
6009 bnx2_set_mac_addr(bp);
6010
6011 return 0;
6012}
6013
6014/* Called with rtnl_lock */
6015static int
6016bnx2_change_mtu(struct net_device *dev, int new_mtu)
6017{
Michael Chan972ec0d2006-01-23 16:12:43 -08006018 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006019
6020 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6021 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6022 return -EINVAL;
6023
6024 dev->mtu = new_mtu;
6025 if (netif_running(dev)) {
6026 bnx2_netif_stop(bp);
6027
6028 bnx2_init_nic(bp);
6029
6030 bnx2_netif_start(bp);
6031 }
6032 return 0;
6033}
6034
6035#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6036static void
6037poll_bnx2(struct net_device *dev)
6038{
Michael Chan972ec0d2006-01-23 16:12:43 -08006039 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006040
6041 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006042 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006043 enable_irq(bp->pdev->irq);
6044}
6045#endif
6046
Michael Chan253c8b72007-01-08 19:56:01 -08006047static void __devinit
6048bnx2_get_5709_media(struct bnx2 *bp)
6049{
6050 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6051 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6052 u32 strap;
6053
6054 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6055 return;
6056 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6057 bp->phy_flags |= PHY_SERDES_FLAG;
6058 return;
6059 }
6060
6061 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6062 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6063 else
6064 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6065
6066 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6067 switch (strap) {
6068 case 0x4:
6069 case 0x5:
6070 case 0x6:
6071 bp->phy_flags |= PHY_SERDES_FLAG;
6072 return;
6073 }
6074 } else {
6075 switch (strap) {
6076 case 0x1:
6077 case 0x2:
6078 case 0x4:
6079 bp->phy_flags |= PHY_SERDES_FLAG;
6080 return;
6081 }
6082 }
6083}
6084
Michael Chan883e5152007-05-03 13:25:11 -07006085static void __devinit
6086bnx2_get_pci_speed(struct bnx2 *bp)
6087{
6088 u32 reg;
6089
6090 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6091 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6092 u32 clkreg;
6093
6094 bp->flags |= PCIX_FLAG;
6095
6096 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6097
6098 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6099 switch (clkreg) {
6100 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6101 bp->bus_speed_mhz = 133;
6102 break;
6103
6104 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6105 bp->bus_speed_mhz = 100;
6106 break;
6107
6108 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6109 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6110 bp->bus_speed_mhz = 66;
6111 break;
6112
6113 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6114 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6115 bp->bus_speed_mhz = 50;
6116 break;
6117
6118 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6119 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6120 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6121 bp->bus_speed_mhz = 33;
6122 break;
6123 }
6124 }
6125 else {
6126 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6127 bp->bus_speed_mhz = 66;
6128 else
6129 bp->bus_speed_mhz = 33;
6130 }
6131
6132 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6133 bp->flags |= PCI_32BIT_FLAG;
6134
6135}
6136
Michael Chanb6016b72005-05-26 13:03:09 -07006137static int __devinit
6138bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6139{
6140 struct bnx2 *bp;
6141 unsigned long mem_len;
6142 int rc;
6143 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006144 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006145
6146 SET_MODULE_OWNER(dev);
6147 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006148 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006149
6150 bp->flags = 0;
6151 bp->phy_flags = 0;
6152
6153 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6154 rc = pci_enable_device(pdev);
6155 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006156 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006157 goto err_out;
6158 }
6159
6160 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006161 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006162 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006163 rc = -ENODEV;
6164 goto err_out_disable;
6165 }
6166
6167 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6168 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006169 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006170 goto err_out_disable;
6171 }
6172
6173 pci_set_master(pdev);
6174
6175 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6176 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006177 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006178 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006179 rc = -EIO;
6180 goto err_out_release;
6181 }
6182
Michael Chanb6016b72005-05-26 13:03:09 -07006183 bp->dev = dev;
6184 bp->pdev = pdev;
6185
6186 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006187 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006188 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006189
6190 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006191 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006192 dev->mem_end = dev->mem_start + mem_len;
6193 dev->irq = pdev->irq;
6194
6195 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6196
6197 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006198 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006199 rc = -ENOMEM;
6200 goto err_out_release;
6201 }
6202
6203 /* Configure byte swap and enable write to the reg_window registers.
6204 * Rely on CPU to do target byte swapping on big endian systems
6205 * The chip's target access swapping will not swap all accesses
6206 */
6207 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6208 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6209 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6210
Pavel Machek829ca9a2005-09-03 15:56:56 -07006211 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006212
6213 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6214
Michael Chan883e5152007-05-03 13:25:11 -07006215 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6216 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6217 dev_err(&pdev->dev,
6218 "Cannot find PCIE capability, aborting.\n");
6219 rc = -EIO;
6220 goto err_out_unmap;
6221 }
6222 bp->flags |= PCIE_FLAG;
6223 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006224 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6225 if (bp->pcix_cap == 0) {
6226 dev_err(&pdev->dev,
6227 "Cannot find PCIX capability, aborting.\n");
6228 rc = -EIO;
6229 goto err_out_unmap;
6230 }
6231 }
6232
Michael Chan8e6a72c2007-05-03 13:24:48 -07006233 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6234 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6235 bp->flags |= MSI_CAP_FLAG;
6236 }
6237
Michael Chan40453c82007-05-03 13:19:18 -07006238 /* 5708 cannot support DMA addresses > 40-bit. */
6239 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6240 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6241 else
6242 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6243
6244 /* Configure DMA attributes. */
6245 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6246 dev->features |= NETIF_F_HIGHDMA;
6247 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6248 if (rc) {
6249 dev_err(&pdev->dev,
6250 "pci_set_consistent_dma_mask failed, aborting.\n");
6251 goto err_out_unmap;
6252 }
6253 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6254 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6255 goto err_out_unmap;
6256 }
6257
Michael Chan883e5152007-05-03 13:25:11 -07006258 if (!(bp->flags & PCIE_FLAG))
6259 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006260
6261 /* 5706A0 may falsely detect SERR and PERR. */
6262 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6263 reg = REG_RD(bp, PCI_COMMAND);
6264 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6265 REG_WR(bp, PCI_COMMAND, reg);
6266 }
6267 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6268 !(bp->flags & PCIX_FLAG)) {
6269
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006270 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006271 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006272 goto err_out_unmap;
6273 }
6274
6275 bnx2_init_nvram(bp);
6276
Michael Chane3648b32005-11-04 08:51:21 -08006277 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6278
6279 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006280 BNX2_SHM_HDR_SIGNATURE_SIG) {
6281 u32 off = PCI_FUNC(pdev->devfn) << 2;
6282
6283 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6284 } else
Michael Chane3648b32005-11-04 08:51:21 -08006285 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6286
Michael Chanb6016b72005-05-26 13:03:09 -07006287 /* Get the permanent MAC address. First we need to make sure the
6288 * firmware is actually running.
6289 */
Michael Chane3648b32005-11-04 08:51:21 -08006290 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006291
6292 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6293 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006294 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006295 rc = -ENODEV;
6296 goto err_out_unmap;
6297 }
6298
Michael Chane3648b32005-11-04 08:51:21 -08006299 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006300
Michael Chane3648b32005-11-04 08:51:21 -08006301 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006302 bp->mac_addr[0] = (u8) (reg >> 8);
6303 bp->mac_addr[1] = (u8) reg;
6304
Michael Chane3648b32005-11-04 08:51:21 -08006305 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006306 bp->mac_addr[2] = (u8) (reg >> 24);
6307 bp->mac_addr[3] = (u8) (reg >> 16);
6308 bp->mac_addr[4] = (u8) (reg >> 8);
6309 bp->mac_addr[5] = (u8) reg;
6310
6311 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006312 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006313
6314 bp->rx_csum = 1;
6315
6316 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6317
6318 bp->tx_quick_cons_trip_int = 20;
6319 bp->tx_quick_cons_trip = 20;
6320 bp->tx_ticks_int = 80;
6321 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006322
Michael Chanb6016b72005-05-26 13:03:09 -07006323 bp->rx_quick_cons_trip_int = 6;
6324 bp->rx_quick_cons_trip = 6;
6325 bp->rx_ticks_int = 18;
6326 bp->rx_ticks = 18;
6327
6328 bp->stats_ticks = 1000000 & 0xffff00;
6329
6330 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006331 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006332
Michael Chan5b0c76a2005-11-04 08:45:49 -08006333 bp->phy_addr = 1;
6334
Michael Chanb6016b72005-05-26 13:03:09 -07006335 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006336 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6337 bnx2_get_5709_media(bp);
6338 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006339 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006340
6341 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07006342 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006343 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006344 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006345 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006346 BNX2_SHARED_HW_CFG_CONFIG);
6347 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6348 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6349 }
Michael Chan261dd5c2007-01-08 19:55:46 -08006350 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6351 CHIP_NUM(bp) == CHIP_NUM_5708)
6352 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006353 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6354 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006355
Michael Chan16088272006-06-12 22:16:43 -07006356 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6357 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6358 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006359 bp->flags |= NO_WOL_FLAG;
6360
Michael Chanb6016b72005-05-26 13:03:09 -07006361 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6362 bp->tx_quick_cons_trip_int =
6363 bp->tx_quick_cons_trip;
6364 bp->tx_ticks_int = bp->tx_ticks;
6365 bp->rx_quick_cons_trip_int =
6366 bp->rx_quick_cons_trip;
6367 bp->rx_ticks_int = bp->rx_ticks;
6368 bp->comp_prod_trip_int = bp->comp_prod_trip;
6369 bp->com_ticks_int = bp->com_ticks;
6370 bp->cmd_ticks_int = bp->cmd_ticks;
6371 }
6372
Michael Chanf9317a42006-09-29 17:06:23 -07006373 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6374 *
6375 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6376 * with byte enables disabled on the unused 32-bit word. This is legal
6377 * but causes problems on the AMD 8132 which will eventually stop
6378 * responding after a while.
6379 *
6380 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006381 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006382 */
6383 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6384 struct pci_dev *amd_8132 = NULL;
6385
6386 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6387 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6388 amd_8132))) {
6389 u8 rev;
6390
6391 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6392 if (rev >= 0x10 && rev <= 0x13) {
6393 disable_msi = 1;
6394 pci_dev_put(amd_8132);
6395 break;
6396 }
6397 }
6398 }
6399
Michael Chandeaf3912007-07-07 22:48:00 -07006400 bnx2_set_default_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006401 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6402
Michael Chancd339a02005-08-25 15:35:24 -07006403 init_timer(&bp->timer);
6404 bp->timer.expires = RUN_AT(bp->timer_interval);
6405 bp->timer.data = (unsigned long) bp;
6406 bp->timer.function = bnx2_timer;
6407
Michael Chanb6016b72005-05-26 13:03:09 -07006408 return 0;
6409
6410err_out_unmap:
6411 if (bp->regview) {
6412 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006413 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006414 }
6415
6416err_out_release:
6417 pci_release_regions(pdev);
6418
6419err_out_disable:
6420 pci_disable_device(pdev);
6421 pci_set_drvdata(pdev, NULL);
6422
6423err_out:
6424 return rc;
6425}
6426
Michael Chan883e5152007-05-03 13:25:11 -07006427static char * __devinit
6428bnx2_bus_string(struct bnx2 *bp, char *str)
6429{
6430 char *s = str;
6431
6432 if (bp->flags & PCIE_FLAG) {
6433 s += sprintf(s, "PCI Express");
6434 } else {
6435 s += sprintf(s, "PCI");
6436 if (bp->flags & PCIX_FLAG)
6437 s += sprintf(s, "-X");
6438 if (bp->flags & PCI_32BIT_FLAG)
6439 s += sprintf(s, " 32-bit");
6440 else
6441 s += sprintf(s, " 64-bit");
6442 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6443 }
6444 return str;
6445}
6446
Michael Chanb6016b72005-05-26 13:03:09 -07006447static int __devinit
6448bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6449{
6450 static int version_printed = 0;
6451 struct net_device *dev = NULL;
6452 struct bnx2 *bp;
6453 int rc, i;
Michael Chan883e5152007-05-03 13:25:11 -07006454 char str[40];
Michael Chanb6016b72005-05-26 13:03:09 -07006455
6456 if (version_printed++ == 0)
6457 printk(KERN_INFO "%s", version);
6458
6459 /* dev zeroed in init_etherdev */
6460 dev = alloc_etherdev(sizeof(*bp));
6461
6462 if (!dev)
6463 return -ENOMEM;
6464
6465 rc = bnx2_init_board(pdev, dev);
6466 if (rc < 0) {
6467 free_netdev(dev);
6468 return rc;
6469 }
6470
6471 dev->open = bnx2_open;
6472 dev->hard_start_xmit = bnx2_start_xmit;
6473 dev->stop = bnx2_close;
6474 dev->get_stats = bnx2_get_stats;
6475 dev->set_multicast_list = bnx2_set_rx_mode;
6476 dev->do_ioctl = bnx2_ioctl;
6477 dev->set_mac_address = bnx2_change_mac_addr;
6478 dev->change_mtu = bnx2_change_mtu;
6479 dev->tx_timeout = bnx2_tx_timeout;
6480 dev->watchdog_timeo = TX_TIMEOUT;
6481#ifdef BCM_VLAN
6482 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07006483#endif
6484 dev->poll = bnx2_poll;
6485 dev->ethtool_ops = &bnx2_ethtool_ops;
6486 dev->weight = 64;
6487
Michael Chan972ec0d2006-01-23 16:12:43 -08006488 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006489
6490#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6491 dev->poll_controller = poll_bnx2;
6492#endif
6493
Michael Chan1b2f9222007-05-03 13:20:19 -07006494 pci_set_drvdata(pdev, dev);
6495
6496 memcpy(dev->dev_addr, bp->mac_addr, 6);
6497 memcpy(dev->perm_addr, bp->mac_addr, 6);
6498 bp->name = board_info[ent->driver_data].name;
6499
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006500 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan4666f872007-05-03 13:22:28 -07006501 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006502 dev->features |= NETIF_F_IPV6_CSUM;
6503
Michael Chan1b2f9222007-05-03 13:20:19 -07006504#ifdef BCM_VLAN
6505 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6506#endif
6507 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006508 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6509 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006510
Michael Chanb6016b72005-05-26 13:03:09 -07006511 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006512 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006513 if (bp->regview)
6514 iounmap(bp->regview);
6515 pci_release_regions(pdev);
6516 pci_disable_device(pdev);
6517 pci_set_drvdata(pdev, NULL);
6518 free_netdev(dev);
6519 return rc;
6520 }
6521
Michael Chan883e5152007-05-03 13:25:11 -07006522 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Michael Chanb6016b72005-05-26 13:03:09 -07006523 "IRQ %d, ",
6524 dev->name,
6525 bp->name,
6526 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6527 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07006528 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07006529 dev->base_addr,
6530 bp->pdev->irq);
6531
6532 printk("node addr ");
6533 for (i = 0; i < 6; i++)
6534 printk("%2.2x", dev->dev_addr[i]);
6535 printk("\n");
6536
Michael Chanb6016b72005-05-26 13:03:09 -07006537 return 0;
6538}
6539
6540static void __devexit
6541bnx2_remove_one(struct pci_dev *pdev)
6542{
6543 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006544 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006545
Michael Chanafdc08b2005-08-25 15:34:29 -07006546 flush_scheduled_work();
6547
Michael Chanb6016b72005-05-26 13:03:09 -07006548 unregister_netdev(dev);
6549
6550 if (bp->regview)
6551 iounmap(bp->regview);
6552
6553 free_netdev(dev);
6554 pci_release_regions(pdev);
6555 pci_disable_device(pdev);
6556 pci_set_drvdata(pdev, NULL);
6557}
6558
6559static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006560bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006561{
6562 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006563 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006564 u32 reset_code;
6565
6566 if (!netif_running(dev))
6567 return 0;
6568
Michael Chan1d60290f2006-03-20 17:50:08 -08006569 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006570 bnx2_netif_stop(bp);
6571 netif_device_detach(dev);
6572 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006573 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006574 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006575 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006576 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6577 else
6578 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6579 bnx2_reset_chip(bp, reset_code);
6580 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006581 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006582 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006583 return 0;
6584}
6585
6586static int
6587bnx2_resume(struct pci_dev *pdev)
6588{
6589 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006590 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006591
6592 if (!netif_running(dev))
6593 return 0;
6594
Michael Chan30c517b2007-05-03 13:20:40 -07006595 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006596 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006597 netif_device_attach(dev);
6598 bnx2_init_nic(bp);
6599 bnx2_netif_start(bp);
6600 return 0;
6601}
6602
6603static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006604 .name = DRV_MODULE_NAME,
6605 .id_table = bnx2_pci_tbl,
6606 .probe = bnx2_init_one,
6607 .remove = __devexit_p(bnx2_remove_one),
6608 .suspend = bnx2_suspend,
6609 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006610};
6611
6612static int __init bnx2_init(void)
6613{
Jeff Garzik29917622006-08-19 17:48:59 -04006614 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006615}
6616
6617static void __exit bnx2_cleanup(void)
6618{
6619 pci_unregister_driver(&bnx2_pci_driver);
6620}
6621
6622module_init(bnx2_init);
6623module_exit(bnx2_cleanup);
6624
6625
6626