blob: a806a8edec875a0e88f53d467fc3264549646c45 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chanb91b9fd2007-06-04 21:24:42 -070057#define DRV_MODULE_VERSION "1.5.11"
58#define DRV_MODULE_RELDATE "June 4, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
Michael Chan0d8a6572007-07-07 22:49:43 -0700553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554 return;
555
Michael Chane3648b32005-11-04 08:51:21 -0800556 if (bp->link_up) {
557 u32 bmsr;
558
559 switch (bp->line_speed) {
560 case SPEED_10:
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
563 else
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
565 break;
566 case SPEED_100:
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
569 else
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
571 break;
572 case SPEED_1000:
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575 else
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577 break;
578 case SPEED_2500:
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581 else
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 break;
584 }
585
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588 if (bp->autoneg) {
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
Michael Chanca58c3a2007-05-03 13:22:52 -0700591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800593
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597 else
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 }
600 }
601 else
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605}
606
Michael Chan9b1084b2007-07-07 22:50:37 -0700607static char *
608bnx2_xceiver_str(struct bnx2 *bp)
609{
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
612 "Copper"));
613}
614
Michael Chane3648b32005-11-04 08:51:21 -0800615static void
Michael Chanb6016b72005-05-26 13:03:09 -0700616bnx2_report_link(struct bnx2 *bp)
617{
618 if (bp->link_up) {
619 netif_carrier_on(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700622
623 printk("%d Mbps ", bp->line_speed);
624
625 if (bp->duplex == DUPLEX_FULL)
626 printk("full duplex");
627 else
628 printk("half duplex");
629
630 if (bp->flow_ctrl) {
631 if (bp->flow_ctrl & FLOW_CTRL_RX) {
632 printk(", receive ");
633 if (bp->flow_ctrl & FLOW_CTRL_TX)
634 printk("& transmit ");
635 }
636 else {
637 printk(", transmit ");
638 }
639 printk("flow control ON");
640 }
641 printk("\n");
642 }
643 else {
644 netif_carrier_off(bp->dev);
Michael Chan9b1084b2007-07-07 22:50:37 -0700645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
Michael Chanb6016b72005-05-26 13:03:09 -0700647 }
Michael Chane3648b32005-11-04 08:51:21 -0800648
649 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700650}
651
652static void
653bnx2_resolve_flow_ctrl(struct bnx2 *bp)
654{
655 u32 local_adv, remote_adv;
656
657 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400658 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700659 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
660
661 if (bp->duplex == DUPLEX_FULL) {
662 bp->flow_ctrl = bp->req_flow_ctrl;
663 }
664 return;
665 }
666
667 if (bp->duplex != DUPLEX_FULL) {
668 return;
669 }
670
Michael Chan5b0c76a2005-11-04 08:45:49 -0800671 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
673 u32 val;
674
675 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677 bp->flow_ctrl |= FLOW_CTRL_TX;
678 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679 bp->flow_ctrl |= FLOW_CTRL_RX;
680 return;
681 }
682
Michael Chanca58c3a2007-05-03 13:22:52 -0700683 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700685
686 if (bp->phy_flags & PHY_SERDES_FLAG) {
687 u32 new_local_adv = 0;
688 u32 new_remote_adv = 0;
689
690 if (local_adv & ADVERTISE_1000XPAUSE)
691 new_local_adv |= ADVERTISE_PAUSE_CAP;
692 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693 new_local_adv |= ADVERTISE_PAUSE_ASYM;
694 if (remote_adv & ADVERTISE_1000XPAUSE)
695 new_remote_adv |= ADVERTISE_PAUSE_CAP;
696 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
698
699 local_adv = new_local_adv;
700 remote_adv = new_remote_adv;
701 }
702
703 /* See Table 28B-3 of 802.3ab-1999 spec. */
704 if (local_adv & ADVERTISE_PAUSE_CAP) {
705 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706 if (remote_adv & ADVERTISE_PAUSE_CAP) {
707 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
708 }
709 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710 bp->flow_ctrl = FLOW_CTRL_RX;
711 }
712 }
713 else {
714 if (remote_adv & ADVERTISE_PAUSE_CAP) {
715 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
716 }
717 }
718 }
719 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
722
723 bp->flow_ctrl = FLOW_CTRL_TX;
724 }
725 }
726}
727
728static int
Michael Chan27a005b2007-05-03 13:23:41 -0700729bnx2_5709s_linkup(struct bnx2 *bp)
730{
731 u32 val, speed;
732
733 bp->link_up = 1;
734
735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
738
739 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740 bp->line_speed = bp->req_line_speed;
741 bp->duplex = bp->req_duplex;
742 return 0;
743 }
744 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
745 switch (speed) {
746 case MII_BNX2_GP_TOP_AN_SPEED_10:
747 bp->line_speed = SPEED_10;
748 break;
749 case MII_BNX2_GP_TOP_AN_SPEED_100:
750 bp->line_speed = SPEED_100;
751 break;
752 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754 bp->line_speed = SPEED_1000;
755 break;
756 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757 bp->line_speed = SPEED_2500;
758 break;
759 }
760 if (val & MII_BNX2_GP_TOP_AN_FD)
761 bp->duplex = DUPLEX_FULL;
762 else
763 bp->duplex = DUPLEX_HALF;
764 return 0;
765}
766
767static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800768bnx2_5708s_linkup(struct bnx2 *bp)
769{
770 u32 val;
771
772 bp->link_up = 1;
773 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775 case BCM5708S_1000X_STAT1_SPEED_10:
776 bp->line_speed = SPEED_10;
777 break;
778 case BCM5708S_1000X_STAT1_SPEED_100:
779 bp->line_speed = SPEED_100;
780 break;
781 case BCM5708S_1000X_STAT1_SPEED_1G:
782 bp->line_speed = SPEED_1000;
783 break;
784 case BCM5708S_1000X_STAT1_SPEED_2G5:
785 bp->line_speed = SPEED_2500;
786 break;
787 }
788 if (val & BCM5708S_1000X_STAT1_FD)
789 bp->duplex = DUPLEX_FULL;
790 else
791 bp->duplex = DUPLEX_HALF;
792
793 return 0;
794}
795
796static int
797bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700798{
799 u32 bmcr, local_adv, remote_adv, common;
800
801 bp->link_up = 1;
802 bp->line_speed = SPEED_1000;
803
Michael Chanca58c3a2007-05-03 13:22:52 -0700804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700805 if (bmcr & BMCR_FULLDPLX) {
806 bp->duplex = DUPLEX_FULL;
807 }
808 else {
809 bp->duplex = DUPLEX_HALF;
810 }
811
812 if (!(bmcr & BMCR_ANENABLE)) {
813 return 0;
814 }
815
Michael Chanca58c3a2007-05-03 13:22:52 -0700816 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700818
819 common = local_adv & remote_adv;
820 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
821
822 if (common & ADVERTISE_1000XFULL) {
823 bp->duplex = DUPLEX_FULL;
824 }
825 else {
826 bp->duplex = DUPLEX_HALF;
827 }
828 }
829
830 return 0;
831}
832
833static int
834bnx2_copper_linkup(struct bnx2 *bp)
835{
836 u32 bmcr;
837
Michael Chanca58c3a2007-05-03 13:22:52 -0700838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700839 if (bmcr & BMCR_ANENABLE) {
840 u32 local_adv, remote_adv, common;
841
842 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
844
845 common = local_adv & (remote_adv >> 2);
846 if (common & ADVERTISE_1000FULL) {
847 bp->line_speed = SPEED_1000;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_1000HALF) {
851 bp->line_speed = SPEED_1000;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700855 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700857
858 common = local_adv & remote_adv;
859 if (common & ADVERTISE_100FULL) {
860 bp->line_speed = SPEED_100;
861 bp->duplex = DUPLEX_FULL;
862 }
863 else if (common & ADVERTISE_100HALF) {
864 bp->line_speed = SPEED_100;
865 bp->duplex = DUPLEX_HALF;
866 }
867 else if (common & ADVERTISE_10FULL) {
868 bp->line_speed = SPEED_10;
869 bp->duplex = DUPLEX_FULL;
870 }
871 else if (common & ADVERTISE_10HALF) {
872 bp->line_speed = SPEED_10;
873 bp->duplex = DUPLEX_HALF;
874 }
875 else {
876 bp->line_speed = 0;
877 bp->link_up = 0;
878 }
879 }
880 }
881 else {
882 if (bmcr & BMCR_SPEED100) {
883 bp->line_speed = SPEED_100;
884 }
885 else {
886 bp->line_speed = SPEED_10;
887 }
888 if (bmcr & BMCR_FULLDPLX) {
889 bp->duplex = DUPLEX_FULL;
890 }
891 else {
892 bp->duplex = DUPLEX_HALF;
893 }
894 }
895
896 return 0;
897}
898
899static int
900bnx2_set_mac_link(struct bnx2 *bp)
901{
902 u32 val;
903
904 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906 (bp->duplex == DUPLEX_HALF)) {
907 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
908 }
909
910 /* Configure the EMAC mode register. */
911 val = REG_RD(bp, BNX2_EMAC_MODE);
912
913 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800914 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800915 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700916
917 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800918 switch (bp->line_speed) {
919 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800920 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800922 break;
923 }
924 /* fall through */
925 case SPEED_100:
926 val |= BNX2_EMAC_MODE_PORT_MII;
927 break;
928 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800929 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800930 /* fall through */
931 case SPEED_1000:
932 val |= BNX2_EMAC_MODE_PORT_GMII;
933 break;
934 }
Michael Chanb6016b72005-05-26 13:03:09 -0700935 }
936 else {
937 val |= BNX2_EMAC_MODE_PORT_GMII;
938 }
939
940 /* Set the MAC to operate in the appropriate duplex mode. */
941 if (bp->duplex == DUPLEX_HALF)
942 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943 REG_WR(bp, BNX2_EMAC_MODE, val);
944
945 /* Enable/disable rx PAUSE. */
946 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
947
948 if (bp->flow_ctrl & FLOW_CTRL_RX)
949 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
951
952 /* Enable/disable tx PAUSE. */
953 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
955
956 if (bp->flow_ctrl & FLOW_CTRL_TX)
957 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
959
960 /* Acknowledge the interrupt. */
961 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
962
963 return 0;
964}
965
Michael Chan27a005b2007-05-03 13:23:41 -0700966static void
967bnx2_enable_bmsr1(struct bnx2 *bp)
968{
969 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970 (CHIP_NUM(bp) == CHIP_NUM_5709))
971 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972 MII_BNX2_BLK_ADDR_GP_STATUS);
973}
974
975static void
976bnx2_disable_bmsr1(struct bnx2 *bp)
977{
978 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979 (CHIP_NUM(bp) == CHIP_NUM_5709))
980 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
982}
983
Michael Chanb6016b72005-05-26 13:03:09 -0700984static int
Michael Chan605a9e22007-05-03 13:23:13 -0700985bnx2_test_and_enable_2g5(struct bnx2 *bp)
986{
987 u32 up1;
988 int ret = 1;
989
990 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
991 return 0;
992
993 if (bp->autoneg & AUTONEG_SPEED)
994 bp->advertising |= ADVERTISED_2500baseX_Full;
995
Michael Chan27a005b2007-05-03 13:23:41 -0700996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
998
Michael Chan605a9e22007-05-03 13:23:13 -0700999 bnx2_read_phy(bp, bp->mii_up1, &up1);
1000 if (!(up1 & BCM5708S_UP1_2G5)) {
1001 up1 |= BCM5708S_UP1_2G5;
1002 bnx2_write_phy(bp, bp->mii_up1, up1);
1003 ret = 0;
1004 }
1005
Michael Chan27a005b2007-05-03 13:23:41 -07001006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1009
Michael Chan605a9e22007-05-03 13:23:13 -07001010 return ret;
1011}
1012
1013static int
1014bnx2_test_and_disable_2g5(struct bnx2 *bp)
1015{
1016 u32 up1;
1017 int ret = 0;
1018
1019 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1020 return 0;
1021
Michael Chan27a005b2007-05-03 13:23:41 -07001022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1024
Michael Chan605a9e22007-05-03 13:23:13 -07001025 bnx2_read_phy(bp, bp->mii_up1, &up1);
1026 if (up1 & BCM5708S_UP1_2G5) {
1027 up1 &= ~BCM5708S_UP1_2G5;
1028 bnx2_write_phy(bp, bp->mii_up1, up1);
1029 ret = 1;
1030 }
1031
Michael Chan27a005b2007-05-03 13:23:41 -07001032 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1035
Michael Chan605a9e22007-05-03 13:23:13 -07001036 return ret;
1037}
1038
1039static void
1040bnx2_enable_forced_2g5(struct bnx2 *bp)
1041{
1042 u32 bmcr;
1043
1044 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1045 return;
1046
Michael Chan27a005b2007-05-03 13:23:41 -07001047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1048 u32 val;
1049
1050 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051 MII_BNX2_BLK_ADDR_SERDES_DIG);
1052 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1056
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1060
1061 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001062 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063 bmcr |= BCM5708S_BMCR_FORCE_2500;
1064 }
1065
1066 if (bp->autoneg & AUTONEG_SPEED) {
1067 bmcr &= ~BMCR_ANENABLE;
1068 if (bp->req_duplex == DUPLEX_FULL)
1069 bmcr |= BMCR_FULLDPLX;
1070 }
1071 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1072}
1073
1074static void
1075bnx2_disable_forced_2g5(struct bnx2 *bp)
1076{
1077 u32 bmcr;
1078
1079 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1080 return;
1081
Michael Chan27a005b2007-05-03 13:23:41 -07001082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1083 u32 val;
1084
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086 MII_BNX2_BLK_ADDR_SERDES_DIG);
1087 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1098 }
1099
1100 if (bp->autoneg & AUTONEG_SPEED)
1101 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1103}
1104
1105static int
Michael Chanb6016b72005-05-26 13:03:09 -07001106bnx2_set_link(struct bnx2 *bp)
1107{
1108 u32 bmsr;
1109 u8 link_up;
1110
Michael Chan80be4432006-11-19 14:07:28 -08001111 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001112 bp->link_up = 1;
1113 return 0;
1114 }
1115
Michael Chan0d8a6572007-07-07 22:49:43 -07001116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1117 return 0;
1118
Michael Chanb6016b72005-05-26 13:03:09 -07001119 link_up = bp->link_up;
1120
Michael Chan27a005b2007-05-03 13:23:41 -07001121 bnx2_enable_bmsr1(bp);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001125
1126 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1128 u32 val;
1129
1130 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131 if (val & BNX2_EMAC_STATUS_LINK)
1132 bmsr |= BMSR_LSTATUS;
1133 else
1134 bmsr &= ~BMSR_LSTATUS;
1135 }
1136
1137 if (bmsr & BMSR_LSTATUS) {
1138 bp->link_up = 1;
1139
1140 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001141 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142 bnx2_5706s_linkup(bp);
1143 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001145 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001147 }
1148 else {
1149 bnx2_copper_linkup(bp);
1150 }
1151 bnx2_resolve_flow_ctrl(bp);
1152 }
1153 else {
1154 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001155 (bp->autoneg & AUTONEG_SPEED))
1156 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001157
Michael Chanb6016b72005-05-26 13:03:09 -07001158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1159 bp->link_up = 0;
1160 }
1161
1162 if (bp->link_up != link_up) {
1163 bnx2_report_link(bp);
1164 }
1165
1166 bnx2_set_mac_link(bp);
1167
1168 return 0;
1169}
1170
1171static int
1172bnx2_reset_phy(struct bnx2 *bp)
1173{
1174 int i;
1175 u32 reg;
1176
Michael Chanca58c3a2007-05-03 13:22:52 -07001177 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001178
1179#define PHY_RESET_MAX_WAIT 100
1180 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1181 udelay(10);
1182
Michael Chanca58c3a2007-05-03 13:22:52 -07001183 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001184 if (!(reg & BMCR_RESET)) {
1185 udelay(20);
1186 break;
1187 }
1188 }
1189 if (i == PHY_RESET_MAX_WAIT) {
1190 return -EBUSY;
1191 }
1192 return 0;
1193}
1194
1195static u32
1196bnx2_phy_get_pause_adv(struct bnx2 *bp)
1197{
1198 u32 adv = 0;
1199
1200 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1202
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP;
1208 }
1209 }
1210 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212 adv = ADVERTISE_1000XPSE_ASYM;
1213 }
1214 else {
1215 adv = ADVERTISE_PAUSE_ASYM;
1216 }
1217 }
1218 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1221 }
1222 else {
1223 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1224 }
1225 }
1226 return adv;
1227}
1228
Michael Chan0d8a6572007-07-07 22:49:43 -07001229static int bnx2_fw_sync(struct bnx2 *, u32, int);
1230
Michael Chanb6016b72005-05-26 13:03:09 -07001231static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001232bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233{
1234 u32 speed_arg = 0, pause_adv;
1235
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1237
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252 } else {
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260 else
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1267 }
1268 }
1269
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1284
1285 return 0;
1286}
1287
1288static int
1289bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001290{
Michael Chan605a9e22007-05-03 13:23:13 -07001291 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001292 u32 new_adv = 0;
1293
Michael Chan0d8a6572007-07-07 22:49:43 -07001294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1296
Michael Chanb6016b72005-05-26 13:03:09 -07001297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1298 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001299 int force_link_down = 0;
1300
Michael Chan605a9e22007-05-03 13:23:13 -07001301 if (bp->req_line_speed == SPEED_2500) {
1302 if (!bnx2_test_and_enable_2g5(bp))
1303 force_link_down = 1;
1304 } else if (bp->req_line_speed == SPEED_1000) {
1305 if (bnx2_test_and_disable_2g5(bp))
1306 force_link_down = 1;
1307 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001308 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001309 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1310
Michael Chanca58c3a2007-05-03 13:22:52 -07001311 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001312 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001313 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001314
Michael Chan27a005b2007-05-03 13:23:41 -07001315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316 if (bp->req_line_speed == SPEED_2500)
1317 bnx2_enable_forced_2g5(bp);
1318 else if (bp->req_line_speed == SPEED_1000) {
1319 bnx2_disable_forced_2g5(bp);
1320 new_bmcr &= ~0x2000;
1321 }
1322
1323 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001324 if (bp->req_line_speed == SPEED_2500)
1325 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1326 else
1327 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001328 }
1329
Michael Chanb6016b72005-05-26 13:03:09 -07001330 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001331 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001332 new_bmcr |= BMCR_FULLDPLX;
1333 }
1334 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001335 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001336 new_bmcr &= ~BMCR_FULLDPLX;
1337 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001338 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001339 /* Force a link down visible on the other side */
1340 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001341 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001342 ~(ADVERTISE_1000XFULL |
1343 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001345 BMCR_ANRESTART | BMCR_ANENABLE);
1346
1347 bp->link_up = 0;
1348 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001349 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001350 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001351 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001352 bnx2_write_phy(bp, bp->mii_adv, adv);
1353 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001354 } else {
1355 bnx2_resolve_flow_ctrl(bp);
1356 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001357 }
1358 return 0;
1359 }
1360
Michael Chan605a9e22007-05-03 13:23:13 -07001361 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001362
Michael Chanb6016b72005-05-26 13:03:09 -07001363 if (bp->advertising & ADVERTISED_1000baseT_Full)
1364 new_adv |= ADVERTISE_1000XFULL;
1365
1366 new_adv |= bnx2_phy_get_pause_adv(bp);
1367
Michael Chanca58c3a2007-05-03 13:22:52 -07001368 bnx2_read_phy(bp, bp->mii_adv, &adv);
1369 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001370
1371 bp->serdes_an_pending = 0;
1372 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001375 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001376 spin_unlock_bh(&bp->phy_lock);
1377 msleep(20);
1378 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001379 }
1380
Michael Chanca58c3a2007-05-03 13:22:52 -07001381 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001383 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001384 /* Speed up link-up time when the link partner
1385 * does not autonegotiate which is very common
1386 * in blade servers. Some blade servers use
1387 * IPMI for kerboard input and it's important
1388 * to minimize link disruptions. Autoneg. involves
1389 * exchanging base pages plus 3 next pages and
1390 * normally completes in about 120 msec.
1391 */
1392 bp->current_interval = SERDES_AN_TIMEOUT;
1393 bp->serdes_an_pending = 1;
1394 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001395 } else {
1396 bnx2_resolve_flow_ctrl(bp);
1397 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001398 }
1399
1400 return 0;
1401}
1402
1403#define ETHTOOL_ALL_FIBRE_SPEED \
Michael Chandeaf3912007-07-07 22:48:00 -07001404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07001407
1408#define ETHTOOL_ALL_COPPER_SPEED \
1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1410 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1411 ADVERTISED_1000baseT_Full)
1412
1413#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001415
Michael Chanb6016b72005-05-26 13:03:09 -07001416#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1417
Michael Chandeaf3912007-07-07 22:48:00 -07001418static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001419bnx2_set_default_remote_link(struct bnx2 *bp)
1420{
1421 u32 link;
1422
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425 else
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1444 } else {
1445 bp->autoneg = 0;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1452 }
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1462 }
1463}
1464
1465static void
Michael Chandeaf3912007-07-07 22:48:00 -07001466bnx2_set_default_link(struct bnx2 *bp)
1467{
Michael Chan0d8a6572007-07-07 22:49:43 -07001468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1470
Michael Chandeaf3912007-07-07 22:48:00 -07001471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1474 u32 reg;
1475
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481 bp->autoneg = 0;
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1484 }
1485 } else
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1487}
1488
Michael Chan0d8a6572007-07-07 22:49:43 -07001489static void
1490bnx2_remote_phy_event(struct bnx2 *bp)
1491{
1492 u32 msg;
1493 u8 link_up = bp->link_up;
1494 u8 old_port;
1495
1496 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1497
1498 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1499 bp->link_up = 0;
1500 else {
1501 u32 speed;
1502
1503 bp->link_up = 1;
1504 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1505 bp->duplex = DUPLEX_FULL;
1506 switch (speed) {
1507 case BNX2_LINK_STATUS_10HALF:
1508 bp->duplex = DUPLEX_HALF;
1509 case BNX2_LINK_STATUS_10FULL:
1510 bp->line_speed = SPEED_10;
1511 break;
1512 case BNX2_LINK_STATUS_100HALF:
1513 bp->duplex = DUPLEX_HALF;
1514 case BNX2_LINK_STATUS_100BASE_T4:
1515 case BNX2_LINK_STATUS_100FULL:
1516 bp->line_speed = SPEED_100;
1517 break;
1518 case BNX2_LINK_STATUS_1000HALF:
1519 bp->duplex = DUPLEX_HALF;
1520 case BNX2_LINK_STATUS_1000FULL:
1521 bp->line_speed = SPEED_1000;
1522 break;
1523 case BNX2_LINK_STATUS_2500HALF:
1524 bp->duplex = DUPLEX_HALF;
1525 case BNX2_LINK_STATUS_2500FULL:
1526 bp->line_speed = SPEED_2500;
1527 break;
1528 default:
1529 bp->line_speed = 0;
1530 break;
1531 }
1532
1533 spin_lock(&bp->phy_lock);
1534 bp->flow_ctrl = 0;
1535 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1536 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1537 if (bp->duplex == DUPLEX_FULL)
1538 bp->flow_ctrl = bp->req_flow_ctrl;
1539 } else {
1540 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1541 bp->flow_ctrl |= FLOW_CTRL_TX;
1542 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1543 bp->flow_ctrl |= FLOW_CTRL_RX;
1544 }
1545
1546 old_port = bp->phy_port;
1547 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1548 bp->phy_port = PORT_FIBRE;
1549 else
1550 bp->phy_port = PORT_TP;
1551
1552 if (old_port != bp->phy_port)
1553 bnx2_set_default_link(bp);
1554
1555 spin_unlock(&bp->phy_lock);
1556 }
1557 if (bp->link_up != link_up)
1558 bnx2_report_link(bp);
1559
1560 bnx2_set_mac_link(bp);
1561}
1562
1563static int
1564bnx2_set_remote_link(struct bnx2 *bp)
1565{
1566 u32 evt_code;
1567
1568 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1569 switch (evt_code) {
1570 case BNX2_FW_EVT_CODE_LINK_EVENT:
1571 bnx2_remote_phy_event(bp);
1572 break;
1573 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1574 default:
1575 break;
1576 }
1577 return 0;
1578}
1579
Michael Chanb6016b72005-05-26 13:03:09 -07001580static int
1581bnx2_setup_copper_phy(struct bnx2 *bp)
1582{
1583 u32 bmcr;
1584 u32 new_bmcr;
1585
Michael Chanca58c3a2007-05-03 13:22:52 -07001586 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001587
1588 if (bp->autoneg & AUTONEG_SPEED) {
1589 u32 adv_reg, adv1000_reg;
1590 u32 new_adv_reg = 0;
1591 u32 new_adv1000_reg = 0;
1592
Michael Chanca58c3a2007-05-03 13:22:52 -07001593 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001594 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1595 ADVERTISE_PAUSE_ASYM);
1596
1597 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1598 adv1000_reg &= PHY_ALL_1000_SPEED;
1599
1600 if (bp->advertising & ADVERTISED_10baseT_Half)
1601 new_adv_reg |= ADVERTISE_10HALF;
1602 if (bp->advertising & ADVERTISED_10baseT_Full)
1603 new_adv_reg |= ADVERTISE_10FULL;
1604 if (bp->advertising & ADVERTISED_100baseT_Half)
1605 new_adv_reg |= ADVERTISE_100HALF;
1606 if (bp->advertising & ADVERTISED_100baseT_Full)
1607 new_adv_reg |= ADVERTISE_100FULL;
1608 if (bp->advertising & ADVERTISED_1000baseT_Full)
1609 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001610
Michael Chanb6016b72005-05-26 13:03:09 -07001611 new_adv_reg |= ADVERTISE_CSMA;
1612
1613 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1614
1615 if ((adv1000_reg != new_adv1000_reg) ||
1616 (adv_reg != new_adv_reg) ||
1617 ((bmcr & BMCR_ANENABLE) == 0)) {
1618
Michael Chanca58c3a2007-05-03 13:22:52 -07001619 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001620 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001621 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001622 BMCR_ANENABLE);
1623 }
1624 else if (bp->link_up) {
1625 /* Flow ctrl may have changed from auto to forced */
1626 /* or vice-versa. */
1627
1628 bnx2_resolve_flow_ctrl(bp);
1629 bnx2_set_mac_link(bp);
1630 }
1631 return 0;
1632 }
1633
1634 new_bmcr = 0;
1635 if (bp->req_line_speed == SPEED_100) {
1636 new_bmcr |= BMCR_SPEED100;
1637 }
1638 if (bp->req_duplex == DUPLEX_FULL) {
1639 new_bmcr |= BMCR_FULLDPLX;
1640 }
1641 if (new_bmcr != bmcr) {
1642 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001643
Michael Chanca58c3a2007-05-03 13:22:52 -07001644 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001646
Michael Chanb6016b72005-05-26 13:03:09 -07001647 if (bmsr & BMSR_LSTATUS) {
1648 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001649 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001650 spin_unlock_bh(&bp->phy_lock);
1651 msleep(50);
1652 spin_lock_bh(&bp->phy_lock);
1653
Michael Chanca58c3a2007-05-03 13:22:52 -07001654 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1655 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001656 }
1657
Michael Chanca58c3a2007-05-03 13:22:52 -07001658 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001659
1660 /* Normally, the new speed is setup after the link has
1661 * gone down and up again. In some cases, link will not go
1662 * down so we need to set up the new speed here.
1663 */
1664 if (bmsr & BMSR_LSTATUS) {
1665 bp->line_speed = bp->req_line_speed;
1666 bp->duplex = bp->req_duplex;
1667 bnx2_resolve_flow_ctrl(bp);
1668 bnx2_set_mac_link(bp);
1669 }
Michael Chan27a005b2007-05-03 13:23:41 -07001670 } else {
1671 bnx2_resolve_flow_ctrl(bp);
1672 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001673 }
1674 return 0;
1675}
1676
1677static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001678bnx2_setup_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001679{
1680 if (bp->loopback == MAC_LOOPBACK)
1681 return 0;
1682
1683 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07001684 return (bnx2_setup_serdes_phy(bp, port));
Michael Chanb6016b72005-05-26 13:03:09 -07001685 }
1686 else {
1687 return (bnx2_setup_copper_phy(bp));
1688 }
1689}
1690
1691static int
Michael Chan27a005b2007-05-03 13:23:41 -07001692bnx2_init_5709s_phy(struct bnx2 *bp)
1693{
1694 u32 val;
1695
1696 bp->mii_bmcr = MII_BMCR + 0x10;
1697 bp->mii_bmsr = MII_BMSR + 0x10;
1698 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1699 bp->mii_adv = MII_ADVERTISE + 0x10;
1700 bp->mii_lpa = MII_LPA + 0x10;
1701 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1702
1703 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1704 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1705
1706 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1707 bnx2_reset_phy(bp);
1708
1709 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1710
1711 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1712 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1713 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1714 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1715
1716 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1717 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1718 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1719 val |= BCM5708S_UP1_2G5;
1720 else
1721 val &= ~BCM5708S_UP1_2G5;
1722 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1723
1724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1725 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1726 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1727 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1728
1729 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1730
1731 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1732 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1733 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1734
1735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1736
1737 return 0;
1738}
1739
1740static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001741bnx2_init_5708s_phy(struct bnx2 *bp)
1742{
1743 u32 val;
1744
Michael Chan27a005b2007-05-03 13:23:41 -07001745 bnx2_reset_phy(bp);
1746
1747 bp->mii_up1 = BCM5708S_UP1;
1748
Michael Chan5b0c76a2005-11-04 08:45:49 -08001749 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1750 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1751 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1752
1753 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1754 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1755 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1756
1757 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1758 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1759 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1760
1761 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1762 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1763 val |= BCM5708S_UP1_2G5;
1764 bnx2_write_phy(bp, BCM5708S_UP1, val);
1765 }
1766
1767 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001768 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1769 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001770 /* increase tx signal amplitude */
1771 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1772 BCM5708S_BLK_ADDR_TX_MISC);
1773 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1774 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1775 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777 }
1778
Michael Chane3648b32005-11-04 08:51:21 -08001779 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001780 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1781
1782 if (val) {
1783 u32 is_backplane;
1784
Michael Chane3648b32005-11-04 08:51:21 -08001785 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001786 BNX2_SHARED_HW_CFG_CONFIG);
1787 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1788 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1789 BCM5708S_BLK_ADDR_TX_MISC);
1790 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1791 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792 BCM5708S_BLK_ADDR_DIG);
1793 }
1794 }
1795 return 0;
1796}
1797
1798static int
1799bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001800{
Michael Chan27a005b2007-05-03 13:23:41 -07001801 bnx2_reset_phy(bp);
1802
Michael Chanb6016b72005-05-26 13:03:09 -07001803 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1804
Michael Chan59b47d82006-11-19 14:10:45 -08001805 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1806 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001807
1808 if (bp->dev->mtu > 1500) {
1809 u32 val;
1810
1811 /* Set extended packet length bit */
1812 bnx2_write_phy(bp, 0x18, 0x7);
1813 bnx2_read_phy(bp, 0x18, &val);
1814 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1815
1816 bnx2_write_phy(bp, 0x1c, 0x6c00);
1817 bnx2_read_phy(bp, 0x1c, &val);
1818 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1819 }
1820 else {
1821 u32 val;
1822
1823 bnx2_write_phy(bp, 0x18, 0x7);
1824 bnx2_read_phy(bp, 0x18, &val);
1825 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1826
1827 bnx2_write_phy(bp, 0x1c, 0x6c00);
1828 bnx2_read_phy(bp, 0x1c, &val);
1829 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1830 }
1831
1832 return 0;
1833}
1834
1835static int
1836bnx2_init_copper_phy(struct bnx2 *bp)
1837{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001838 u32 val;
1839
Michael Chan27a005b2007-05-03 13:23:41 -07001840 bnx2_reset_phy(bp);
1841
Michael Chanb6016b72005-05-26 13:03:09 -07001842 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1843 bnx2_write_phy(bp, 0x18, 0x0c00);
1844 bnx2_write_phy(bp, 0x17, 0x000a);
1845 bnx2_write_phy(bp, 0x15, 0x310b);
1846 bnx2_write_phy(bp, 0x17, 0x201f);
1847 bnx2_write_phy(bp, 0x15, 0x9506);
1848 bnx2_write_phy(bp, 0x17, 0x401f);
1849 bnx2_write_phy(bp, 0x15, 0x14e2);
1850 bnx2_write_phy(bp, 0x18, 0x0400);
1851 }
1852
Michael Chanb659f442007-02-02 00:46:35 -08001853 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1854 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1855 MII_BNX2_DSP_EXPAND_REG | 0x8);
1856 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1857 val &= ~(1 << 8);
1858 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1859 }
1860
Michael Chanb6016b72005-05-26 13:03:09 -07001861 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001862 /* Set extended packet length bit */
1863 bnx2_write_phy(bp, 0x18, 0x7);
1864 bnx2_read_phy(bp, 0x18, &val);
1865 bnx2_write_phy(bp, 0x18, val | 0x4000);
1866
1867 bnx2_read_phy(bp, 0x10, &val);
1868 bnx2_write_phy(bp, 0x10, val | 0x1);
1869 }
1870 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001871 bnx2_write_phy(bp, 0x18, 0x7);
1872 bnx2_read_phy(bp, 0x18, &val);
1873 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1874
1875 bnx2_read_phy(bp, 0x10, &val);
1876 bnx2_write_phy(bp, 0x10, val & ~0x1);
1877 }
1878
Michael Chan5b0c76a2005-11-04 08:45:49 -08001879 /* ethernet@wirespeed */
1880 bnx2_write_phy(bp, 0x18, 0x7007);
1881 bnx2_read_phy(bp, 0x18, &val);
1882 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001883 return 0;
1884}
1885
1886
1887static int
1888bnx2_init_phy(struct bnx2 *bp)
1889{
1890 u32 val;
1891 int rc = 0;
1892
1893 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1894 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1895
Michael Chanca58c3a2007-05-03 13:22:52 -07001896 bp->mii_bmcr = MII_BMCR;
1897 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001898 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001899 bp->mii_adv = MII_ADVERTISE;
1900 bp->mii_lpa = MII_LPA;
1901
Michael Chanb6016b72005-05-26 13:03:09 -07001902 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1903
Michael Chan0d8a6572007-07-07 22:49:43 -07001904 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1905 goto setup_phy;
1906
Michael Chanb6016b72005-05-26 13:03:09 -07001907 bnx2_read_phy(bp, MII_PHYSID1, &val);
1908 bp->phy_id = val << 16;
1909 bnx2_read_phy(bp, MII_PHYSID2, &val);
1910 bp->phy_id |= val & 0xffff;
1911
1912 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001913 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1914 rc = bnx2_init_5706s_phy(bp);
1915 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1916 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001917 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1918 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001919 }
1920 else {
1921 rc = bnx2_init_copper_phy(bp);
1922 }
1923
Michael Chan0d8a6572007-07-07 22:49:43 -07001924setup_phy:
1925 if (!rc)
1926 rc = bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07001927
1928 return rc;
1929}
1930
1931static int
1932bnx2_set_mac_loopback(struct bnx2 *bp)
1933{
1934 u32 mac_mode;
1935
1936 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1937 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1938 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1939 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1940 bp->link_up = 1;
1941 return 0;
1942}
1943
Michael Chanbc5a0692006-01-23 16:13:22 -08001944static int bnx2_test_link(struct bnx2 *);
1945
1946static int
1947bnx2_set_phy_loopback(struct bnx2 *bp)
1948{
1949 u32 mac_mode;
1950 int rc, i;
1951
1952 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001953 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001954 BMCR_SPEED1000);
1955 spin_unlock_bh(&bp->phy_lock);
1956 if (rc)
1957 return rc;
1958
1959 for (i = 0; i < 10; i++) {
1960 if (bnx2_test_link(bp) == 0)
1961 break;
Michael Chan80be4432006-11-19 14:07:28 -08001962 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001963 }
1964
1965 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1966 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1967 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001968 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001969
1970 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1971 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1972 bp->link_up = 1;
1973 return 0;
1974}
1975
Michael Chanb6016b72005-05-26 13:03:09 -07001976static int
Michael Chanb090ae22006-01-23 16:07:10 -08001977bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001978{
1979 int i;
1980 u32 val;
1981
Michael Chanb6016b72005-05-26 13:03:09 -07001982 bp->fw_wr_seq++;
1983 msg_data |= bp->fw_wr_seq;
1984
Michael Chane3648b32005-11-04 08:51:21 -08001985 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001986
1987 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001988 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1989 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001990
Michael Chane3648b32005-11-04 08:51:21 -08001991 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001992
1993 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1994 break;
1995 }
Michael Chanb090ae22006-01-23 16:07:10 -08001996 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1997 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001998
1999 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08002000 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2001 if (!silent)
2002 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2003 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002004
2005 msg_data &= ~BNX2_DRV_MSG_CODE;
2006 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2007
Michael Chane3648b32005-11-04 08:51:21 -08002008 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07002009
Michael Chanb6016b72005-05-26 13:03:09 -07002010 return -EBUSY;
2011 }
2012
Michael Chanb090ae22006-01-23 16:07:10 -08002013 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2014 return -EIO;
2015
Michael Chanb6016b72005-05-26 13:03:09 -07002016 return 0;
2017}
2018
Michael Chan59b47d82006-11-19 14:10:45 -08002019static int
2020bnx2_init_5709_context(struct bnx2 *bp)
2021{
2022 int i, ret = 0;
2023 u32 val;
2024
2025 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2026 val |= (BCM_PAGE_BITS - 8) << 16;
2027 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07002028 for (i = 0; i < 10; i++) {
2029 val = REG_RD(bp, BNX2_CTX_COMMAND);
2030 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2031 break;
2032 udelay(2);
2033 }
2034 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2035 return -EBUSY;
2036
Michael Chan59b47d82006-11-19 14:10:45 -08002037 for (i = 0; i < bp->ctx_pages; i++) {
2038 int j;
2039
2040 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2041 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2042 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2043 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2044 (u64) bp->ctx_blk_mapping[i] >> 32);
2045 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2046 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2047 for (j = 0; j < 10; j++) {
2048
2049 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2050 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2051 break;
2052 udelay(5);
2053 }
2054 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2055 ret = -EBUSY;
2056 break;
2057 }
2058 }
2059 return ret;
2060}
2061
Michael Chanb6016b72005-05-26 13:03:09 -07002062static void
2063bnx2_init_context(struct bnx2 *bp)
2064{
2065 u32 vcid;
2066
2067 vcid = 96;
2068 while (vcid) {
2069 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07002070 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002071
2072 vcid--;
2073
2074 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2075 u32 new_vcid;
2076
2077 vcid_addr = GET_PCID_ADDR(vcid);
2078 if (vcid & 0x8) {
2079 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2080 }
2081 else {
2082 new_vcid = vcid;
2083 }
2084 pcid_addr = GET_PCID_ADDR(new_vcid);
2085 }
2086 else {
2087 vcid_addr = GET_CID_ADDR(vcid);
2088 pcid_addr = vcid_addr;
2089 }
2090
Michael Chan7947b202007-06-04 21:17:10 -07002091 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2092 vcid_addr += (i << PHY_CTX_SHIFT);
2093 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07002094
Michael Chan7947b202007-06-04 21:17:10 -07002095 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2096 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2097
2098 /* Zero out the context. */
2099 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2100 CTX_WR(bp, 0x00, offset, 0);
2101
2102 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2103 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
Michael Chanb6016b72005-05-26 13:03:09 -07002104 }
Michael Chanb6016b72005-05-26 13:03:09 -07002105 }
2106}
2107
2108static int
2109bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2110{
2111 u16 *good_mbuf;
2112 u32 good_mbuf_cnt;
2113 u32 val;
2114
2115 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2116 if (good_mbuf == NULL) {
2117 printk(KERN_ERR PFX "Failed to allocate memory in "
2118 "bnx2_alloc_bad_rbuf\n");
2119 return -ENOMEM;
2120 }
2121
2122 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2123 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2124
2125 good_mbuf_cnt = 0;
2126
2127 /* Allocate a bunch of mbufs and save the good ones in an array. */
2128 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2129 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2130 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2131
2132 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2133
2134 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2135
2136 /* The addresses with Bit 9 set are bad memory blocks. */
2137 if (!(val & (1 << 9))) {
2138 good_mbuf[good_mbuf_cnt] = (u16) val;
2139 good_mbuf_cnt++;
2140 }
2141
2142 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2143 }
2144
2145 /* Free the good ones back to the mbuf pool thus discarding
2146 * all the bad ones. */
2147 while (good_mbuf_cnt) {
2148 good_mbuf_cnt--;
2149
2150 val = good_mbuf[good_mbuf_cnt];
2151 val = (val << 9) | val | 1;
2152
2153 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2154 }
2155 kfree(good_mbuf);
2156 return 0;
2157}
2158
2159static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002160bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07002161{
2162 u32 val;
2163 u8 *mac_addr = bp->dev->dev_addr;
2164
2165 val = (mac_addr[0] << 8) | mac_addr[1];
2166
2167 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2168
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002169 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07002170 (mac_addr[4] << 8) | mac_addr[5];
2171
2172 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2173}
2174
2175static inline int
2176bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2177{
2178 struct sk_buff *skb;
2179 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2180 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08002181 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07002182 unsigned long align;
2183
Michael Chan932f3772006-08-15 01:39:36 -07002184 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07002185 if (skb == NULL) {
2186 return -ENOMEM;
2187 }
2188
Michael Chan59b47d82006-11-19 14:10:45 -08002189 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2190 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07002191
Michael Chanb6016b72005-05-26 13:03:09 -07002192 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2193 PCI_DMA_FROMDEVICE);
2194
2195 rx_buf->skb = skb;
2196 pci_unmap_addr_set(rx_buf, mapping, mapping);
2197
2198 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2199 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2200
2201 bp->rx_prod_bseq += bp->rx_buf_use_size;
2202
2203 return 0;
2204}
2205
Michael Chanda3e4fb2007-05-03 13:24:23 -07002206static int
2207bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2208{
2209 struct status_block *sblk = bp->status_blk;
2210 u32 new_link_state, old_link_state;
2211 int is_set = 1;
2212
2213 new_link_state = sblk->status_attn_bits & event;
2214 old_link_state = sblk->status_attn_bits_ack & event;
2215 if (new_link_state != old_link_state) {
2216 if (new_link_state)
2217 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2218 else
2219 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2220 } else
2221 is_set = 0;
2222
2223 return is_set;
2224}
2225
Michael Chanb6016b72005-05-26 13:03:09 -07002226static void
2227bnx2_phy_int(struct bnx2 *bp)
2228{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002229 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2230 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002231 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002232 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002233 }
Michael Chan0d8a6572007-07-07 22:49:43 -07002234 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2235 bnx2_set_remote_link(bp);
2236
Michael Chanb6016b72005-05-26 13:03:09 -07002237}
2238
2239static void
2240bnx2_tx_int(struct bnx2 *bp)
2241{
Michael Chanf4e418f2005-11-04 08:53:48 -08002242 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002243 u16 hw_cons, sw_cons, sw_ring_cons;
2244 int tx_free_bd = 0;
2245
Michael Chanf4e418f2005-11-04 08:53:48 -08002246 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002247 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2248 hw_cons++;
2249 }
2250 sw_cons = bp->tx_cons;
2251
2252 while (sw_cons != hw_cons) {
2253 struct sw_bd *tx_buf;
2254 struct sk_buff *skb;
2255 int i, last;
2256
2257 sw_ring_cons = TX_RING_IDX(sw_cons);
2258
2259 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2260 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002261
Michael Chanb6016b72005-05-26 13:03:09 -07002262 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002263 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002264 u16 last_idx, last_ring_idx;
2265
2266 last_idx = sw_cons +
2267 skb_shinfo(skb)->nr_frags + 1;
2268 last_ring_idx = sw_ring_cons +
2269 skb_shinfo(skb)->nr_frags + 1;
2270 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2271 last_idx++;
2272 }
2273 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2274 break;
2275 }
2276 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002277
Michael Chanb6016b72005-05-26 13:03:09 -07002278 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2279 skb_headlen(skb), PCI_DMA_TODEVICE);
2280
2281 tx_buf->skb = NULL;
2282 last = skb_shinfo(skb)->nr_frags;
2283
2284 for (i = 0; i < last; i++) {
2285 sw_cons = NEXT_TX_BD(sw_cons);
2286
2287 pci_unmap_page(bp->pdev,
2288 pci_unmap_addr(
2289 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2290 mapping),
2291 skb_shinfo(skb)->frags[i].size,
2292 PCI_DMA_TODEVICE);
2293 }
2294
2295 sw_cons = NEXT_TX_BD(sw_cons);
2296
2297 tx_free_bd += last + 1;
2298
Michael Chan745720e2006-06-29 12:37:41 -07002299 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002300
Michael Chanf4e418f2005-11-04 08:53:48 -08002301 hw_cons = bp->hw_tx_cons =
2302 sblk->status_tx_quick_consumer_index0;
2303
Michael Chanb6016b72005-05-26 13:03:09 -07002304 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2305 hw_cons++;
2306 }
2307 }
2308
Michael Chane89bbf12005-08-25 15:36:58 -07002309 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002310 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2311 * before checking for netif_queue_stopped(). Without the
2312 * memory barrier, there is a small possibility that bnx2_start_xmit()
2313 * will miss it and cause the queue to be stopped forever.
2314 */
2315 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002316
Michael Chan2f8af122006-08-15 01:39:10 -07002317 if (unlikely(netif_queue_stopped(bp->dev)) &&
2318 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2319 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002320 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002321 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002322 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002323 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002324 }
Michael Chanb6016b72005-05-26 13:03:09 -07002325}
2326
2327static inline void
2328bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2329 u16 cons, u16 prod)
2330{
Michael Chan236b6392006-03-20 17:49:02 -08002331 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2332 struct rx_bd *cons_bd, *prod_bd;
2333
2334 cons_rx_buf = &bp->rx_buf_ring[cons];
2335 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002336
2337 pci_dma_sync_single_for_device(bp->pdev,
2338 pci_unmap_addr(cons_rx_buf, mapping),
2339 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2340
Michael Chan236b6392006-03-20 17:49:02 -08002341 bp->rx_prod_bseq += bp->rx_buf_use_size;
2342
2343 prod_rx_buf->skb = skb;
2344
2345 if (cons == prod)
2346 return;
2347
Michael Chanb6016b72005-05-26 13:03:09 -07002348 pci_unmap_addr_set(prod_rx_buf, mapping,
2349 pci_unmap_addr(cons_rx_buf, mapping));
2350
Michael Chan3fdfcc22006-03-20 17:49:49 -08002351 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2352 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002353 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2354 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002355}
2356
2357static int
2358bnx2_rx_int(struct bnx2 *bp, int budget)
2359{
Michael Chanf4e418f2005-11-04 08:53:48 -08002360 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002361 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2362 struct l2_fhdr *rx_hdr;
2363 int rx_pkt = 0;
2364
Michael Chanf4e418f2005-11-04 08:53:48 -08002365 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002366 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2367 hw_cons++;
2368 }
2369 sw_cons = bp->rx_cons;
2370 sw_prod = bp->rx_prod;
2371
2372 /* Memory barrier necessary as speculative reads of the rx
2373 * buffer can be ahead of the index in the status block
2374 */
2375 rmb();
2376 while (sw_cons != hw_cons) {
2377 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002378 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002379 struct sw_bd *rx_buf;
2380 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002381 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002382
2383 sw_ring_cons = RX_RING_IDX(sw_cons);
2384 sw_ring_prod = RX_RING_IDX(sw_prod);
2385
2386 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2387 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002388
2389 rx_buf->skb = NULL;
2390
2391 dma_addr = pci_unmap_addr(rx_buf, mapping);
2392
2393 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002394 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2395
2396 rx_hdr = (struct l2_fhdr *) skb->data;
2397 len = rx_hdr->l2_fhdr_pkt_len - 4;
2398
Michael Chanade2bfe2006-01-23 16:09:51 -08002399 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002400 (L2_FHDR_ERRORS_BAD_CRC |
2401 L2_FHDR_ERRORS_PHY_DECODE |
2402 L2_FHDR_ERRORS_ALIGNMENT |
2403 L2_FHDR_ERRORS_TOO_SHORT |
2404 L2_FHDR_ERRORS_GIANT_FRAME)) {
2405
2406 goto reuse_rx;
2407 }
2408
2409 /* Since we don't have a jumbo ring, copy small packets
2410 * if mtu > 1500
2411 */
2412 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2413 struct sk_buff *new_skb;
2414
Michael Chan932f3772006-08-15 01:39:36 -07002415 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002416 if (new_skb == NULL)
2417 goto reuse_rx;
2418
2419 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002420 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2421 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002422 skb_reserve(new_skb, 2);
2423 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002424
2425 bnx2_reuse_rx_skb(bp, skb,
2426 sw_ring_cons, sw_ring_prod);
2427
2428 skb = new_skb;
2429 }
2430 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002431 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002432 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2433
2434 skb_reserve(skb, bp->rx_offset);
2435 skb_put(skb, len);
2436 }
2437 else {
2438reuse_rx:
2439 bnx2_reuse_rx_skb(bp, skb,
2440 sw_ring_cons, sw_ring_prod);
2441 goto next_rx;
2442 }
2443
2444 skb->protocol = eth_type_trans(skb, bp->dev);
2445
2446 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002447 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002448
Michael Chan745720e2006-06-29 12:37:41 -07002449 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002450 goto next_rx;
2451
2452 }
2453
Michael Chanb6016b72005-05-26 13:03:09 -07002454 skb->ip_summed = CHECKSUM_NONE;
2455 if (bp->rx_csum &&
2456 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2457 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2458
Michael Chanade2bfe2006-01-23 16:09:51 -08002459 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2460 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002461 skb->ip_summed = CHECKSUM_UNNECESSARY;
2462 }
2463
2464#ifdef BCM_VLAN
2465 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2466 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2467 rx_hdr->l2_fhdr_vlan_tag);
2468 }
2469 else
2470#endif
2471 netif_receive_skb(skb);
2472
2473 bp->dev->last_rx = jiffies;
2474 rx_pkt++;
2475
2476next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002477 sw_cons = NEXT_RX_BD(sw_cons);
2478 sw_prod = NEXT_RX_BD(sw_prod);
2479
2480 if ((rx_pkt == budget))
2481 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002482
2483 /* Refresh hw_cons to see if there is new work */
2484 if (sw_cons == hw_cons) {
2485 hw_cons = bp->hw_rx_cons =
2486 sblk->status_rx_quick_consumer_index0;
2487 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2488 hw_cons++;
2489 rmb();
2490 }
Michael Chanb6016b72005-05-26 13:03:09 -07002491 }
2492 bp->rx_cons = sw_cons;
2493 bp->rx_prod = sw_prod;
2494
2495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2496
2497 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2498
2499 mmiowb();
2500
2501 return rx_pkt;
2502
2503}
2504
2505/* MSI ISR - The only difference between this and the INTx ISR
2506 * is that the MSI interrupt is always serviced.
2507 */
2508static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002509bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002510{
2511 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002512 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002513
Michael Chanc921e4c2005-09-08 13:15:32 -07002514 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002515 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2516 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2517 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2518
2519 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002520 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2521 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002522
Michael Chan73eef4c2005-08-25 15:39:15 -07002523 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002524
Michael Chan73eef4c2005-08-25 15:39:15 -07002525 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002526}
2527
2528static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002529bnx2_msi_1shot(int irq, void *dev_instance)
2530{
2531 struct net_device *dev = dev_instance;
2532 struct bnx2 *bp = netdev_priv(dev);
2533
2534 prefetch(bp->status_blk);
2535
2536 /* Return here if interrupt is disabled. */
2537 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2538 return IRQ_HANDLED;
2539
2540 netif_rx_schedule(dev);
2541
2542 return IRQ_HANDLED;
2543}
2544
2545static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002546bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002547{
2548 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002549 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb8a7ce72007-07-07 22:51:03 -07002550 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002551
2552 /* When using INTx, it is possible for the interrupt to arrive
2553 * at the CPU before the status block posted prior to the
2554 * interrupt. Reading a register will flush the status block.
2555 * When using MSI, the MSI message will always complete after
2556 * the status block write.
2557 */
Michael Chanb8a7ce72007-07-07 22:51:03 -07002558 if ((sblk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002559 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2560 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002561 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002562
2563 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2564 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2565 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2566
Michael Chanb8a7ce72007-07-07 22:51:03 -07002567 /* Read back to deassert IRQ immediately to avoid too many
2568 * spurious interrupts.
2569 */
2570 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2571
Michael Chanb6016b72005-05-26 13:03:09 -07002572 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002573 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2574 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002575
Michael Chanb8a7ce72007-07-07 22:51:03 -07002576 if (netif_rx_schedule_prep(dev)) {
2577 bp->last_status_idx = sblk->status_idx;
2578 __netif_rx_schedule(dev);
2579 }
Michael Chanb6016b72005-05-26 13:03:09 -07002580
Michael Chan73eef4c2005-08-25 15:39:15 -07002581 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002582}
2583
Michael Chan0d8a6572007-07-07 22:49:43 -07002584#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2585 STATUS_ATTN_BITS_TIMER_ABORT)
Michael Chanda3e4fb2007-05-03 13:24:23 -07002586
Michael Chanf4e418f2005-11-04 08:53:48 -08002587static inline int
2588bnx2_has_work(struct bnx2 *bp)
2589{
2590 struct status_block *sblk = bp->status_blk;
2591
2592 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2593 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2594 return 1;
2595
Michael Chanda3e4fb2007-05-03 13:24:23 -07002596 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2597 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002598 return 1;
2599
2600 return 0;
2601}
2602
Michael Chanb6016b72005-05-26 13:03:09 -07002603static int
2604bnx2_poll(struct net_device *dev, int *budget)
2605{
Michael Chan972ec0d2006-01-23 16:12:43 -08002606 struct bnx2 *bp = netdev_priv(dev);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002607 struct status_block *sblk = bp->status_blk;
2608 u32 status_attn_bits = sblk->status_attn_bits;
2609 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002610
Michael Chanda3e4fb2007-05-03 13:24:23 -07002611 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2612 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002613
Michael Chanb6016b72005-05-26 13:03:09 -07002614 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002615
2616 /* This is needed to take care of transient status
2617 * during link changes.
2618 */
2619 REG_WR(bp, BNX2_HC_COMMAND,
2620 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2621 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002622 }
2623
Michael Chanf4e418f2005-11-04 08:53:48 -08002624 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002625 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002626
Michael Chanf4e418f2005-11-04 08:53:48 -08002627 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002628 int orig_budget = *budget;
2629 int work_done;
2630
2631 if (orig_budget > dev->quota)
2632 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002633
Michael Chanb6016b72005-05-26 13:03:09 -07002634 work_done = bnx2_rx_int(bp, orig_budget);
2635 *budget -= work_done;
2636 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002637 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002638
Michael Chanf4e418f2005-11-04 08:53:48 -08002639 bp->last_status_idx = bp->status_blk->status_idx;
2640 rmb();
2641
2642 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002643 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002644 if (likely(bp->flags & USING_MSI_FLAG)) {
2645 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2646 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2647 bp->last_status_idx);
2648 return 0;
2649 }
Michael Chanb6016b72005-05-26 13:03:09 -07002650 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002651 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2652 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2653 bp->last_status_idx);
2654
2655 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2656 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2657 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002658 return 0;
2659 }
2660
2661 return 1;
2662}
2663
Herbert Xu932ff272006-06-09 12:20:56 -07002664/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002665 * from set_multicast.
2666 */
2667static void
2668bnx2_set_rx_mode(struct net_device *dev)
2669{
Michael Chan972ec0d2006-01-23 16:12:43 -08002670 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002671 u32 rx_mode, sort_mode;
2672 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002673
Michael Chanc770a652005-08-25 15:38:39 -07002674 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002675
2676 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2677 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2678 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2679#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002680 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002681 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002682#else
Michael Chane29054f2006-01-23 16:06:06 -08002683 if (!(bp->flags & ASF_ENABLE_FLAG))
2684 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002685#endif
2686 if (dev->flags & IFF_PROMISC) {
2687 /* Promiscuous mode. */
2688 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002689 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2690 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002691 }
2692 else if (dev->flags & IFF_ALLMULTI) {
2693 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2694 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2695 0xffffffff);
2696 }
2697 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2698 }
2699 else {
2700 /* Accept one or more multicast(s). */
2701 struct dev_mc_list *mclist;
2702 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2703 u32 regidx;
2704 u32 bit;
2705 u32 crc;
2706
2707 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2708
2709 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2710 i++, mclist = mclist->next) {
2711
2712 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2713 bit = crc & 0xff;
2714 regidx = (bit & 0xe0) >> 5;
2715 bit &= 0x1f;
2716 mc_filter[regidx] |= (1 << bit);
2717 }
2718
2719 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2720 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2721 mc_filter[i]);
2722 }
2723
2724 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2725 }
2726
2727 if (rx_mode != bp->rx_mode) {
2728 bp->rx_mode = rx_mode;
2729 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2730 }
2731
2732 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2733 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2734 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2735
Michael Chanc770a652005-08-25 15:38:39 -07002736 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002737}
2738
Michael Chanfba9fe92006-06-12 22:21:25 -07002739#define FW_BUF_SIZE 0x8000
2740
2741static int
2742bnx2_gunzip_init(struct bnx2 *bp)
2743{
2744 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2745 goto gunzip_nomem1;
2746
2747 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2748 goto gunzip_nomem2;
2749
2750 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2751 if (bp->strm->workspace == NULL)
2752 goto gunzip_nomem3;
2753
2754 return 0;
2755
2756gunzip_nomem3:
2757 kfree(bp->strm);
2758 bp->strm = NULL;
2759
2760gunzip_nomem2:
2761 vfree(bp->gunzip_buf);
2762 bp->gunzip_buf = NULL;
2763
2764gunzip_nomem1:
2765 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2766 "uncompression.\n", bp->dev->name);
2767 return -ENOMEM;
2768}
2769
2770static void
2771bnx2_gunzip_end(struct bnx2 *bp)
2772{
2773 kfree(bp->strm->workspace);
2774
2775 kfree(bp->strm);
2776 bp->strm = NULL;
2777
2778 if (bp->gunzip_buf) {
2779 vfree(bp->gunzip_buf);
2780 bp->gunzip_buf = NULL;
2781 }
2782}
2783
2784static int
2785bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2786{
2787 int n, rc;
2788
2789 /* check gzip header */
2790 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2791 return -EINVAL;
2792
2793 n = 10;
2794
2795#define FNAME 0x8
2796 if (zbuf[3] & FNAME)
2797 while ((zbuf[n++] != 0) && (n < len));
2798
2799 bp->strm->next_in = zbuf + n;
2800 bp->strm->avail_in = len - n;
2801 bp->strm->next_out = bp->gunzip_buf;
2802 bp->strm->avail_out = FW_BUF_SIZE;
2803
2804 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2805 if (rc != Z_OK)
2806 return rc;
2807
2808 rc = zlib_inflate(bp->strm, Z_FINISH);
2809
2810 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2811 *outbuf = bp->gunzip_buf;
2812
2813 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2814 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2815 bp->dev->name, bp->strm->msg);
2816
2817 zlib_inflateEnd(bp->strm);
2818
2819 if (rc == Z_STREAM_END)
2820 return 0;
2821
2822 return rc;
2823}
2824
Michael Chanb6016b72005-05-26 13:03:09 -07002825static void
2826load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2827 u32 rv2p_proc)
2828{
2829 int i;
2830 u32 val;
2831
2832
2833 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002834 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002835 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002836 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002837 rv2p_code++;
2838
2839 if (rv2p_proc == RV2P_PROC1) {
2840 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2841 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2842 }
2843 else {
2844 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2845 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2846 }
2847 }
2848
2849 /* Reset the processor, un-stall is done later. */
2850 if (rv2p_proc == RV2P_PROC1) {
2851 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2852 }
2853 else {
2854 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2855 }
2856}
2857
Michael Chanaf3ee512006-11-19 14:09:25 -08002858static int
Michael Chanb6016b72005-05-26 13:03:09 -07002859load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2860{
2861 u32 offset;
2862 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002863 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002864
2865 /* Halt the CPU. */
2866 val = REG_RD_IND(bp, cpu_reg->mode);
2867 val |= cpu_reg->mode_value_halt;
2868 REG_WR_IND(bp, cpu_reg->mode, val);
2869 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2870
2871 /* Load the Text area. */
2872 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002873 if (fw->gz_text) {
2874 u32 text_len;
2875 void *text;
2876
2877 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2878 &text_len);
2879 if (rc)
2880 return rc;
2881
2882 fw->text = text;
2883 }
2884 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002885 int j;
2886
2887 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002888 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002889 }
2890 }
2891
2892 /* Load the Data area. */
2893 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2894 if (fw->data) {
2895 int j;
2896
2897 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2898 REG_WR_IND(bp, offset, fw->data[j]);
2899 }
2900 }
2901
2902 /* Load the SBSS area. */
2903 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2904 if (fw->sbss) {
2905 int j;
2906
2907 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2908 REG_WR_IND(bp, offset, fw->sbss[j]);
2909 }
2910 }
2911
2912 /* Load the BSS area. */
2913 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2914 if (fw->bss) {
2915 int j;
2916
2917 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2918 REG_WR_IND(bp, offset, fw->bss[j]);
2919 }
2920 }
2921
2922 /* Load the Read-Only area. */
2923 offset = cpu_reg->spad_base +
2924 (fw->rodata_addr - cpu_reg->mips_view_base);
2925 if (fw->rodata) {
2926 int j;
2927
2928 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2929 REG_WR_IND(bp, offset, fw->rodata[j]);
2930 }
2931 }
2932
2933 /* Clear the pre-fetch instruction. */
2934 REG_WR_IND(bp, cpu_reg->inst, 0);
2935 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2936
2937 /* Start the CPU. */
2938 val = REG_RD_IND(bp, cpu_reg->mode);
2939 val &= ~cpu_reg->mode_value_halt;
2940 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2941 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002942
2943 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002944}
2945
Michael Chanfba9fe92006-06-12 22:21:25 -07002946static int
Michael Chanb6016b72005-05-26 13:03:09 -07002947bnx2_init_cpus(struct bnx2 *bp)
2948{
2949 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002950 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002951 int rc = 0;
2952 void *text;
2953 u32 text_len;
2954
2955 if ((rc = bnx2_gunzip_init(bp)) != 0)
2956 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002957
2958 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002959 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2960 &text_len);
2961 if (rc)
2962 goto init_cpu_err;
2963
2964 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2965
2966 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2967 &text_len);
2968 if (rc)
2969 goto init_cpu_err;
2970
2971 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002972
2973 /* Initialize the RX Processor. */
2974 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2975 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2976 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2977 cpu_reg.state = BNX2_RXP_CPU_STATE;
2978 cpu_reg.state_value_clear = 0xffffff;
2979 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2980 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2981 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2982 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2983 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2984 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2985 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002986
Michael Chand43584c2006-11-19 14:14:35 -08002987 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2988 fw = &bnx2_rxp_fw_09;
2989 else
2990 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002991
Michael Chanaf3ee512006-11-19 14:09:25 -08002992 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002993 if (rc)
2994 goto init_cpu_err;
2995
Michael Chanb6016b72005-05-26 13:03:09 -07002996 /* Initialize the TX Processor. */
2997 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2998 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2999 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3000 cpu_reg.state = BNX2_TXP_CPU_STATE;
3001 cpu_reg.state_value_clear = 0xffffff;
3002 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3003 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3004 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3005 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3006 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3007 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3008 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003009
Michael Chand43584c2006-11-19 14:14:35 -08003010 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3011 fw = &bnx2_txp_fw_09;
3012 else
3013 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003014
Michael Chanaf3ee512006-11-19 14:09:25 -08003015 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003016 if (rc)
3017 goto init_cpu_err;
3018
Michael Chanb6016b72005-05-26 13:03:09 -07003019 /* Initialize the TX Patch-up Processor. */
3020 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3021 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3022 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3023 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3024 cpu_reg.state_value_clear = 0xffffff;
3025 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3026 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3027 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3028 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3029 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3030 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3031 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003032
Michael Chand43584c2006-11-19 14:14:35 -08003033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3034 fw = &bnx2_tpat_fw_09;
3035 else
3036 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003037
Michael Chanaf3ee512006-11-19 14:09:25 -08003038 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003039 if (rc)
3040 goto init_cpu_err;
3041
Michael Chanb6016b72005-05-26 13:03:09 -07003042 /* Initialize the Completion Processor. */
3043 cpu_reg.mode = BNX2_COM_CPU_MODE;
3044 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3045 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3046 cpu_reg.state = BNX2_COM_CPU_STATE;
3047 cpu_reg.state_value_clear = 0xffffff;
3048 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3049 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3050 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3051 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3052 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3053 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3054 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003055
Michael Chand43584c2006-11-19 14:14:35 -08003056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3057 fw = &bnx2_com_fw_09;
3058 else
3059 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003060
Michael Chanaf3ee512006-11-19 14:09:25 -08003061 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003062 if (rc)
3063 goto init_cpu_err;
3064
Michael Chand43584c2006-11-19 14:14:35 -08003065 /* Initialize the Command Processor. */
3066 cpu_reg.mode = BNX2_CP_CPU_MODE;
3067 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3068 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3069 cpu_reg.state = BNX2_CP_CPU_STATE;
3070 cpu_reg.state_value_clear = 0xffffff;
3071 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3072 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3073 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3074 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3075 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3076 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3077 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07003078
Michael Chand43584c2006-11-19 14:14:35 -08003079 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3080 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07003081
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08003082 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08003083 if (rc)
3084 goto init_cpu_err;
3085 }
Michael Chanfba9fe92006-06-12 22:21:25 -07003086init_cpu_err:
3087 bnx2_gunzip_end(bp);
3088 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003089}
3090
3091static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07003092bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07003093{
3094 u16 pmcsr;
3095
3096 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3097
3098 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07003099 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07003100 u32 val;
3101
3102 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3103 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3104 PCI_PM_CTRL_PME_STATUS);
3105
3106 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3107 /* delay required during transition out of D3hot */
3108 msleep(20);
3109
3110 val = REG_RD(bp, BNX2_EMAC_MODE);
3111 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3112 val &= ~BNX2_EMAC_MODE_MPKT;
3113 REG_WR(bp, BNX2_EMAC_MODE, val);
3114
3115 val = REG_RD(bp, BNX2_RPM_CONFIG);
3116 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3117 REG_WR(bp, BNX2_RPM_CONFIG, val);
3118 break;
3119 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07003120 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07003121 int i;
3122 u32 val, wol_msg;
3123
3124 if (bp->wol) {
3125 u32 advertising;
3126 u8 autoneg;
3127
3128 autoneg = bp->autoneg;
3129 advertising = bp->advertising;
3130
3131 bp->autoneg = AUTONEG_SPEED;
3132 bp->advertising = ADVERTISED_10baseT_Half |
3133 ADVERTISED_10baseT_Full |
3134 ADVERTISED_100baseT_Half |
3135 ADVERTISED_100baseT_Full |
3136 ADVERTISED_Autoneg;
3137
3138 bnx2_setup_copper_phy(bp);
3139
3140 bp->autoneg = autoneg;
3141 bp->advertising = advertising;
3142
3143 bnx2_set_mac_addr(bp);
3144
3145 val = REG_RD(bp, BNX2_EMAC_MODE);
3146
3147 /* Enable port mode. */
3148 val &= ~BNX2_EMAC_MODE_PORT;
3149 val |= BNX2_EMAC_MODE_PORT_MII |
3150 BNX2_EMAC_MODE_MPKT_RCVD |
3151 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003152 BNX2_EMAC_MODE_MPKT;
3153
3154 REG_WR(bp, BNX2_EMAC_MODE, val);
3155
3156 /* receive all multicast */
3157 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3158 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3159 0xffffffff);
3160 }
3161 REG_WR(bp, BNX2_EMAC_RX_MODE,
3162 BNX2_EMAC_RX_MODE_SORT_MODE);
3163
3164 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3165 BNX2_RPM_SORT_USER0_MC_EN;
3166 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3167 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3168 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3169 BNX2_RPM_SORT_USER0_ENA);
3170
3171 /* Need to enable EMAC and RPM for WOL. */
3172 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3173 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3174 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3175 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3176
3177 val = REG_RD(bp, BNX2_RPM_CONFIG);
3178 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3179 REG_WR(bp, BNX2_RPM_CONFIG, val);
3180
3181 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3182 }
3183 else {
3184 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3185 }
3186
Michael Chandda1e392006-01-23 16:08:14 -08003187 if (!(bp->flags & NO_WOL_FLAG))
3188 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003189
3190 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3191 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3192 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3193
3194 if (bp->wol)
3195 pmcsr |= 3;
3196 }
3197 else {
3198 pmcsr |= 3;
3199 }
3200 if (bp->wol) {
3201 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3202 }
3203 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3204 pmcsr);
3205
3206 /* No more memory access after this point until
3207 * device is brought back to D0.
3208 */
3209 udelay(50);
3210 break;
3211 }
3212 default:
3213 return -EINVAL;
3214 }
3215 return 0;
3216}
3217
3218static int
3219bnx2_acquire_nvram_lock(struct bnx2 *bp)
3220{
3221 u32 val;
3222 int j;
3223
3224 /* Request access to the flash interface. */
3225 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3226 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3227 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3228 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3229 break;
3230
3231 udelay(5);
3232 }
3233
3234 if (j >= NVRAM_TIMEOUT_COUNT)
3235 return -EBUSY;
3236
3237 return 0;
3238}
3239
3240static int
3241bnx2_release_nvram_lock(struct bnx2 *bp)
3242{
3243 int j;
3244 u32 val;
3245
3246 /* Relinquish nvram interface. */
3247 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3248
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3251 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3252 break;
3253
3254 udelay(5);
3255 }
3256
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3258 return -EBUSY;
3259
3260 return 0;
3261}
3262
3263
3264static int
3265bnx2_enable_nvram_write(struct bnx2 *bp)
3266{
3267 u32 val;
3268
3269 val = REG_RD(bp, BNX2_MISC_CFG);
3270 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3271
3272 if (!bp->flash_info->buffered) {
3273 int j;
3274
3275 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3276 REG_WR(bp, BNX2_NVM_COMMAND,
3277 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3278
3279 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3280 udelay(5);
3281
3282 val = REG_RD(bp, BNX2_NVM_COMMAND);
3283 if (val & BNX2_NVM_COMMAND_DONE)
3284 break;
3285 }
3286
3287 if (j >= NVRAM_TIMEOUT_COUNT)
3288 return -EBUSY;
3289 }
3290 return 0;
3291}
3292
3293static void
3294bnx2_disable_nvram_write(struct bnx2 *bp)
3295{
3296 u32 val;
3297
3298 val = REG_RD(bp, BNX2_MISC_CFG);
3299 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3300}
3301
3302
3303static void
3304bnx2_enable_nvram_access(struct bnx2 *bp)
3305{
3306 u32 val;
3307
3308 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3309 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003310 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003311 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3312}
3313
3314static void
3315bnx2_disable_nvram_access(struct bnx2 *bp)
3316{
3317 u32 val;
3318
3319 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3320 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003321 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003322 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3323 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3324}
3325
3326static int
3327bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3328{
3329 u32 cmd;
3330 int j;
3331
3332 if (bp->flash_info->buffered)
3333 /* Buffered flash, no erase needed */
3334 return 0;
3335
3336 /* Build an erase command */
3337 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3338 BNX2_NVM_COMMAND_DOIT;
3339
3340 /* Need to clear DONE bit separately. */
3341 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3342
3343 /* Address of the NVRAM to read from. */
3344 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3345
3346 /* Issue an erase command. */
3347 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3348
3349 /* Wait for completion. */
3350 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3351 u32 val;
3352
3353 udelay(5);
3354
3355 val = REG_RD(bp, BNX2_NVM_COMMAND);
3356 if (val & BNX2_NVM_COMMAND_DONE)
3357 break;
3358 }
3359
3360 if (j >= NVRAM_TIMEOUT_COUNT)
3361 return -EBUSY;
3362
3363 return 0;
3364}
3365
3366static int
3367bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3368{
3369 u32 cmd;
3370 int j;
3371
3372 /* Build the command word. */
3373 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3374
3375 /* Calculate an offset of a buffered flash. */
3376 if (bp->flash_info->buffered) {
3377 offset = ((offset / bp->flash_info->page_size) <<
3378 bp->flash_info->page_bits) +
3379 (offset % bp->flash_info->page_size);
3380 }
3381
3382 /* Need to clear DONE bit separately. */
3383 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3384
3385 /* Address of the NVRAM to read from. */
3386 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3387
3388 /* Issue a read command. */
3389 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3390
3391 /* Wait for completion. */
3392 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3393 u32 val;
3394
3395 udelay(5);
3396
3397 val = REG_RD(bp, BNX2_NVM_COMMAND);
3398 if (val & BNX2_NVM_COMMAND_DONE) {
3399 val = REG_RD(bp, BNX2_NVM_READ);
3400
3401 val = be32_to_cpu(val);
3402 memcpy(ret_val, &val, 4);
3403 break;
3404 }
3405 }
3406 if (j >= NVRAM_TIMEOUT_COUNT)
3407 return -EBUSY;
3408
3409 return 0;
3410}
3411
3412
3413static int
3414bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3415{
3416 u32 cmd, val32;
3417 int j;
3418
3419 /* Build the command word. */
3420 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3421
3422 /* Calculate an offset of a buffered flash. */
3423 if (bp->flash_info->buffered) {
3424 offset = ((offset / bp->flash_info->page_size) <<
3425 bp->flash_info->page_bits) +
3426 (offset % bp->flash_info->page_size);
3427 }
3428
3429 /* Need to clear DONE bit separately. */
3430 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3431
3432 memcpy(&val32, val, 4);
3433 val32 = cpu_to_be32(val32);
3434
3435 /* Write the data. */
3436 REG_WR(bp, BNX2_NVM_WRITE, val32);
3437
3438 /* Address of the NVRAM to write to. */
3439 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3440
3441 /* Issue the write command. */
3442 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3443
3444 /* Wait for completion. */
3445 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3446 udelay(5);
3447
3448 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3449 break;
3450 }
3451 if (j >= NVRAM_TIMEOUT_COUNT)
3452 return -EBUSY;
3453
3454 return 0;
3455}
3456
3457static int
3458bnx2_init_nvram(struct bnx2 *bp)
3459{
3460 u32 val;
3461 int j, entry_count, rc;
3462 struct flash_spec *flash;
3463
3464 /* Determine the selected interface. */
3465 val = REG_RD(bp, BNX2_NVM_CFG1);
3466
3467 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3468
3469 rc = 0;
3470 if (val & 0x40000000) {
3471
3472 /* Flash interface has been reconfigured */
3473 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003474 j++, flash++) {
3475 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3476 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003477 bp->flash_info = flash;
3478 break;
3479 }
3480 }
3481 }
3482 else {
Michael Chan37137702005-11-04 08:49:17 -08003483 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003484 /* Not yet been reconfigured */
3485
Michael Chan37137702005-11-04 08:49:17 -08003486 if (val & (1 << 23))
3487 mask = FLASH_BACKUP_STRAP_MASK;
3488 else
3489 mask = FLASH_STRAP_MASK;
3490
Michael Chanb6016b72005-05-26 13:03:09 -07003491 for (j = 0, flash = &flash_table[0]; j < entry_count;
3492 j++, flash++) {
3493
Michael Chan37137702005-11-04 08:49:17 -08003494 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003495 bp->flash_info = flash;
3496
3497 /* Request access to the flash interface. */
3498 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3499 return rc;
3500
3501 /* Enable access to flash interface */
3502 bnx2_enable_nvram_access(bp);
3503
3504 /* Reconfigure the flash interface */
3505 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3506 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3507 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3508 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3509
3510 /* Disable access to flash interface */
3511 bnx2_disable_nvram_access(bp);
3512 bnx2_release_nvram_lock(bp);
3513
3514 break;
3515 }
3516 }
3517 } /* if (val & 0x40000000) */
3518
3519 if (j == entry_count) {
3520 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003521 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003522 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003523 }
3524
Michael Chan1122db72006-01-23 16:11:42 -08003525 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3526 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3527 if (val)
3528 bp->flash_size = val;
3529 else
3530 bp->flash_size = bp->flash_info->total_size;
3531
Michael Chanb6016b72005-05-26 13:03:09 -07003532 return rc;
3533}
3534
3535static int
3536bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3537 int buf_size)
3538{
3539 int rc = 0;
3540 u32 cmd_flags, offset32, len32, extra;
3541
3542 if (buf_size == 0)
3543 return 0;
3544
3545 /* Request access to the flash interface. */
3546 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3547 return rc;
3548
3549 /* Enable access to flash interface */
3550 bnx2_enable_nvram_access(bp);
3551
3552 len32 = buf_size;
3553 offset32 = offset;
3554 extra = 0;
3555
3556 cmd_flags = 0;
3557
3558 if (offset32 & 3) {
3559 u8 buf[4];
3560 u32 pre_len;
3561
3562 offset32 &= ~3;
3563 pre_len = 4 - (offset & 3);
3564
3565 if (pre_len >= len32) {
3566 pre_len = len32;
3567 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3568 BNX2_NVM_COMMAND_LAST;
3569 }
3570 else {
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3572 }
3573
3574 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3575
3576 if (rc)
3577 return rc;
3578
3579 memcpy(ret_buf, buf + (offset & 3), pre_len);
3580
3581 offset32 += 4;
3582 ret_buf += pre_len;
3583 len32 -= pre_len;
3584 }
3585 if (len32 & 3) {
3586 extra = 4 - (len32 & 3);
3587 len32 = (len32 + 4) & ~3;
3588 }
3589
3590 if (len32 == 4) {
3591 u8 buf[4];
3592
3593 if (cmd_flags)
3594 cmd_flags = BNX2_NVM_COMMAND_LAST;
3595 else
3596 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3597 BNX2_NVM_COMMAND_LAST;
3598
3599 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3600
3601 memcpy(ret_buf, buf, 4 - extra);
3602 }
3603 else if (len32 > 0) {
3604 u8 buf[4];
3605
3606 /* Read the first word. */
3607 if (cmd_flags)
3608 cmd_flags = 0;
3609 else
3610 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3611
3612 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3613
3614 /* Advance to the next dword. */
3615 offset32 += 4;
3616 ret_buf += 4;
3617 len32 -= 4;
3618
3619 while (len32 > 4 && rc == 0) {
3620 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3621
3622 /* Advance to the next dword. */
3623 offset32 += 4;
3624 ret_buf += 4;
3625 len32 -= 4;
3626 }
3627
3628 if (rc)
3629 return rc;
3630
3631 cmd_flags = BNX2_NVM_COMMAND_LAST;
3632 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3633
3634 memcpy(ret_buf, buf, 4 - extra);
3635 }
3636
3637 /* Disable access to flash interface */
3638 bnx2_disable_nvram_access(bp);
3639
3640 bnx2_release_nvram_lock(bp);
3641
3642 return rc;
3643}
3644
3645static int
3646bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3647 int buf_size)
3648{
3649 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003650 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003651 int rc = 0;
3652 int align_start, align_end;
3653
3654 buf = data_buf;
3655 offset32 = offset;
3656 len32 = buf_size;
3657 align_start = align_end = 0;
3658
3659 if ((align_start = (offset32 & 3))) {
3660 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003661 len32 += align_start;
3662 if (len32 < 4)
3663 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003664 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3665 return rc;
3666 }
3667
3668 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003669 align_end = 4 - (len32 & 3);
3670 len32 += align_end;
3671 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3672 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003673 }
3674
3675 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003676 align_buf = kmalloc(len32, GFP_KERNEL);
3677 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003678 return -ENOMEM;
3679 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003680 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003681 }
3682 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003683 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003684 }
Michael Chane6be7632007-01-08 19:56:13 -08003685 memcpy(align_buf + align_start, data_buf, buf_size);
3686 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003687 }
3688
Michael Chanae181bc2006-05-22 16:39:20 -07003689 if (bp->flash_info->buffered == 0) {
3690 flash_buffer = kmalloc(264, GFP_KERNEL);
3691 if (flash_buffer == NULL) {
3692 rc = -ENOMEM;
3693 goto nvram_write_end;
3694 }
3695 }
3696
Michael Chanb6016b72005-05-26 13:03:09 -07003697 written = 0;
3698 while ((written < len32) && (rc == 0)) {
3699 u32 page_start, page_end, data_start, data_end;
3700 u32 addr, cmd_flags;
3701 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003702
3703 /* Find the page_start addr */
3704 page_start = offset32 + written;
3705 page_start -= (page_start % bp->flash_info->page_size);
3706 /* Find the page_end addr */
3707 page_end = page_start + bp->flash_info->page_size;
3708 /* Find the data_start addr */
3709 data_start = (written == 0) ? offset32 : page_start;
3710 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003711 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003712 (offset32 + len32) : page_end;
3713
3714 /* Request access to the flash interface. */
3715 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3716 goto nvram_write_end;
3717
3718 /* Enable access to flash interface */
3719 bnx2_enable_nvram_access(bp);
3720
3721 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3722 if (bp->flash_info->buffered == 0) {
3723 int j;
3724
3725 /* Read the whole page into the buffer
3726 * (non-buffer flash only) */
3727 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3728 if (j == (bp->flash_info->page_size - 4)) {
3729 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3730 }
3731 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003732 page_start + j,
3733 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003734 cmd_flags);
3735
3736 if (rc)
3737 goto nvram_write_end;
3738
3739 cmd_flags = 0;
3740 }
3741 }
3742
3743 /* Enable writes to flash interface (unlock write-protect) */
3744 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3745 goto nvram_write_end;
3746
Michael Chanb6016b72005-05-26 13:03:09 -07003747 /* Loop to write back the buffer data from page_start to
3748 * data_start */
3749 i = 0;
3750 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003751 /* Erase the page */
3752 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3753 goto nvram_write_end;
3754
3755 /* Re-enable the write again for the actual write */
3756 bnx2_enable_nvram_write(bp);
3757
Michael Chanb6016b72005-05-26 13:03:09 -07003758 for (addr = page_start; addr < data_start;
3759 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003760
Michael Chanb6016b72005-05-26 13:03:09 -07003761 rc = bnx2_nvram_write_dword(bp, addr,
3762 &flash_buffer[i], cmd_flags);
3763
3764 if (rc != 0)
3765 goto nvram_write_end;
3766
3767 cmd_flags = 0;
3768 }
3769 }
3770
3771 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003772 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003773 if ((addr == page_end - 4) ||
3774 ((bp->flash_info->buffered) &&
3775 (addr == data_end - 4))) {
3776
3777 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3778 }
3779 rc = bnx2_nvram_write_dword(bp, addr, buf,
3780 cmd_flags);
3781
3782 if (rc != 0)
3783 goto nvram_write_end;
3784
3785 cmd_flags = 0;
3786 buf += 4;
3787 }
3788
3789 /* Loop to write back the buffer data from data_end
3790 * to page_end */
3791 if (bp->flash_info->buffered == 0) {
3792 for (addr = data_end; addr < page_end;
3793 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003794
Michael Chanb6016b72005-05-26 13:03:09 -07003795 if (addr == page_end-4) {
3796 cmd_flags = BNX2_NVM_COMMAND_LAST;
3797 }
3798 rc = bnx2_nvram_write_dword(bp, addr,
3799 &flash_buffer[i], cmd_flags);
3800
3801 if (rc != 0)
3802 goto nvram_write_end;
3803
3804 cmd_flags = 0;
3805 }
3806 }
3807
3808 /* Disable writes to flash interface (lock write-protect) */
3809 bnx2_disable_nvram_write(bp);
3810
3811 /* Disable access to flash interface */
3812 bnx2_disable_nvram_access(bp);
3813 bnx2_release_nvram_lock(bp);
3814
3815 /* Increment written */
3816 written += data_end - data_start;
3817 }
3818
3819nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003820 kfree(flash_buffer);
3821 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003822 return rc;
3823}
3824
Michael Chan0d8a6572007-07-07 22:49:43 -07003825static void
3826bnx2_init_remote_phy(struct bnx2 *bp)
3827{
3828 u32 val;
3829
3830 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3831 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3832 return;
3833
3834 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3835 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3836 return;
3837
3838 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3839 if (netif_running(bp->dev)) {
3840 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3841 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3842 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3843 val);
3844 }
3845 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3846
3847 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3848 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3849 bp->phy_port = PORT_FIBRE;
3850 else
3851 bp->phy_port = PORT_TP;
3852 }
3853}
3854
Michael Chanb6016b72005-05-26 13:03:09 -07003855static int
3856bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3857{
3858 u32 val;
3859 int i, rc = 0;
3860
3861 /* Wait for the current PCI transaction to complete before
3862 * issuing a reset. */
3863 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3864 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3865 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3866 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3867 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3868 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3869 udelay(5);
3870
Michael Chanb090ae22006-01-23 16:07:10 -08003871 /* Wait for the firmware to tell us it is ok to issue a reset. */
3872 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3873
Michael Chanb6016b72005-05-26 13:03:09 -07003874 /* Deposit a driver reset signature so the firmware knows that
3875 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003876 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003877 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3878
Michael Chanb6016b72005-05-26 13:03:09 -07003879 /* Do a dummy read to force the chip to complete all current transaction
3880 * before we issue a reset. */
3881 val = REG_RD(bp, BNX2_MISC_ID);
3882
Michael Chan234754d2006-11-19 14:11:41 -08003883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3885 REG_RD(bp, BNX2_MISC_COMMAND);
3886 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003887
Michael Chan234754d2006-11-19 14:11:41 -08003888 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3889 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003890
Michael Chan234754d2006-11-19 14:11:41 -08003891 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003892
Michael Chan234754d2006-11-19 14:11:41 -08003893 } else {
3894 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3895 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3896 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3897
3898 /* Chip reset. */
3899 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3900
3901 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3902 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3903 current->state = TASK_UNINTERRUPTIBLE;
3904 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003905 }
Michael Chanb6016b72005-05-26 13:03:09 -07003906
Michael Chan234754d2006-11-19 14:11:41 -08003907 /* Reset takes approximate 30 usec */
3908 for (i = 0; i < 10; i++) {
3909 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3910 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3911 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3912 break;
3913 udelay(10);
3914 }
3915
3916 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3917 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3918 printk(KERN_ERR PFX "Chip reset did not complete\n");
3919 return -EBUSY;
3920 }
Michael Chanb6016b72005-05-26 13:03:09 -07003921 }
3922
3923 /* Make sure byte swapping is properly configured. */
3924 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3925 if (val != 0x01020304) {
3926 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3927 return -ENODEV;
3928 }
3929
Michael Chanb6016b72005-05-26 13:03:09 -07003930 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003931 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3932 if (rc)
3933 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003934
Michael Chan0d8a6572007-07-07 22:49:43 -07003935 spin_lock_bh(&bp->phy_lock);
3936 bnx2_init_remote_phy(bp);
3937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3938 bnx2_set_default_remote_link(bp);
3939 spin_unlock_bh(&bp->phy_lock);
3940
Michael Chanb6016b72005-05-26 13:03:09 -07003941 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3942 /* Adjust the voltage regular to two steps lower. The default
3943 * of this register is 0x0000000e. */
3944 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3945
3946 /* Remove bad rbuf memory from the free pool. */
3947 rc = bnx2_alloc_bad_rbuf(bp);
3948 }
3949
3950 return rc;
3951}
3952
3953static int
3954bnx2_init_chip(struct bnx2 *bp)
3955{
3956 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003957 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003958
3959 /* Make sure the interrupt is not active. */
3960 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3961
3962 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3963 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3964#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003965 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003966#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003967 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003968 DMA_READ_CHANS << 12 |
3969 DMA_WRITE_CHANS << 16;
3970
3971 val |= (0x2 << 20) | (1 << 11);
3972
Michael Chandda1e392006-01-23 16:08:14 -08003973 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003974 val |= (1 << 23);
3975
3976 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3977 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3978 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3979
3980 REG_WR(bp, BNX2_DMA_CONFIG, val);
3981
3982 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3983 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3984 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3985 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3986 }
3987
3988 if (bp->flags & PCIX_FLAG) {
3989 u16 val16;
3990
3991 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3992 &val16);
3993 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3994 val16 & ~PCI_X_CMD_ERO);
3995 }
3996
3997 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3998 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3999 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4000 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4001
4002 /* Initialize context mapping and zero out the quick contexts. The
4003 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07004004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4005 rc = bnx2_init_5709_context(bp);
4006 if (rc)
4007 return rc;
4008 } else
Michael Chan59b47d82006-11-19 14:10:45 -08004009 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004010
Michael Chanfba9fe92006-06-12 22:21:25 -07004011 if ((rc = bnx2_init_cpus(bp)) != 0)
4012 return rc;
4013
Michael Chanb6016b72005-05-26 13:03:09 -07004014 bnx2_init_nvram(bp);
4015
4016 bnx2_set_mac_addr(bp);
4017
4018 val = REG_RD(bp, BNX2_MQ_CONFIG);
4019 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4020 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07004021 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4022 val |= BNX2_MQ_CONFIG_HALT_DIS;
4023
Michael Chanb6016b72005-05-26 13:03:09 -07004024 REG_WR(bp, BNX2_MQ_CONFIG, val);
4025
4026 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4027 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4028 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4029
4030 val = (BCM_PAGE_BITS - 8) << 24;
4031 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4032
4033 /* Configure page size. */
4034 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4035 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4036 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4037 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4038
4039 val = bp->mac_addr[0] +
4040 (bp->mac_addr[1] << 8) +
4041 (bp->mac_addr[2] << 16) +
4042 bp->mac_addr[3] +
4043 (bp->mac_addr[4] << 8) +
4044 (bp->mac_addr[5] << 16);
4045 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4046
4047 /* Program the MTU. Also include 4 bytes for CRC32. */
4048 val = bp->dev->mtu + ETH_HLEN + 4;
4049 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4050 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4051 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4052
4053 bp->last_status_idx = 0;
4054 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4055
4056 /* Set up how to generate a link change interrupt. */
4057 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4058
4059 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4060 (u64) bp->status_blk_mapping & 0xffffffff);
4061 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4062
4063 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4064 (u64) bp->stats_blk_mapping & 0xffffffff);
4065 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4066 (u64) bp->stats_blk_mapping >> 32);
4067
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004068 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07004069 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4070
4071 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4072 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4073
4074 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4075 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4076
4077 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4078
4079 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4080
4081 REG_WR(bp, BNX2_HC_COM_TICKS,
4082 (bp->com_ticks_int << 16) | bp->com_ticks);
4083
4084 REG_WR(bp, BNX2_HC_CMD_TICKS,
4085 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4086
Michael Chan02537b062007-06-04 21:24:07 -07004087 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4088 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4089 else
4090 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
Michael Chanb6016b72005-05-26 13:03:09 -07004091 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4092
4093 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07004094 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004095 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004096 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4097 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004098 }
4099
Michael Chan8e6a72c2007-05-03 13:24:48 -07004100 if (bp->flags & ONE_SHOT_MSI_FLAG)
4101 val |= BNX2_HC_CONFIG_ONE_SHOT;
4102
4103 REG_WR(bp, BNX2_HC_CONFIG, val);
4104
Michael Chanb6016b72005-05-26 13:03:09 -07004105 /* Clear internal stats counters. */
4106 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4107
Michael Chanda3e4fb2007-05-03 13:24:23 -07004108 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07004109
Michael Chane29054f2006-01-23 16:06:06 -08004110 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4111 BNX2_PORT_FEATURE_ASF_ENABLED)
4112 bp->flags |= ASF_ENABLE_FLAG;
4113
Michael Chanb6016b72005-05-26 13:03:09 -07004114 /* Initialize the receive filter. */
4115 bnx2_set_rx_mode(bp->dev);
4116
Michael Chan0aa38df2007-06-04 21:23:06 -07004117 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4118 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4119 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4120 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4121 }
Michael Chanb090ae22006-01-23 16:07:10 -08004122 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4123 0);
Michael Chanb6016b72005-05-26 13:03:09 -07004124
4125 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4126 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4127
4128 udelay(20);
4129
Michael Chanbf5295b2006-03-23 01:11:56 -08004130 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4131
Michael Chanb090ae22006-01-23 16:07:10 -08004132 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004133}
4134
Michael Chan59b47d82006-11-19 14:10:45 -08004135static void
4136bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4137{
4138 u32 val, offset0, offset1, offset2, offset3;
4139
4140 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4141 offset0 = BNX2_L2CTX_TYPE_XI;
4142 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4143 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4144 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4145 } else {
4146 offset0 = BNX2_L2CTX_TYPE;
4147 offset1 = BNX2_L2CTX_CMD_TYPE;
4148 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4149 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4150 }
4151 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4152 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4153
4154 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4155 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4156
4157 val = (u64) bp->tx_desc_mapping >> 32;
4158 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4159
4160 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4161 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4162}
Michael Chanb6016b72005-05-26 13:03:09 -07004163
4164static void
4165bnx2_init_tx_ring(struct bnx2 *bp)
4166{
4167 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08004168 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07004169
Michael Chan2f8af122006-08-15 01:39:10 -07004170 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4171
Michael Chanb6016b72005-05-26 13:03:09 -07004172 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004173
Michael Chanb6016b72005-05-26 13:03:09 -07004174 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4175 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4176
4177 bp->tx_prod = 0;
4178 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08004179 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004180 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004181
Michael Chan59b47d82006-11-19 14:10:45 -08004182 cid = TX_CID;
4183 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4184 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07004185
Michael Chan59b47d82006-11-19 14:10:45 -08004186 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07004187}
4188
4189static void
4190bnx2_init_rx_ring(struct bnx2 *bp)
4191{
4192 struct rx_bd *rxbd;
4193 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004194 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07004195 u32 val;
4196
4197 /* 8 for CRC and VLAN */
4198 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08004199 /* hw alignment */
4200 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07004201
4202 ring_prod = prod = bp->rx_prod = 0;
4203 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08004204 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004205 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004206
Michael Chan13daffa2006-03-20 17:49:20 -08004207 for (i = 0; i < bp->rx_max_ring; i++) {
4208 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07004209
Michael Chan13daffa2006-03-20 17:49:20 -08004210 rxbd = &bp->rx_desc_ring[i][0];
4211 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4212 rxbd->rx_bd_len = bp->rx_buf_use_size;
4213 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4214 }
4215 if (i == (bp->rx_max_ring - 1))
4216 j = 0;
4217 else
4218 j = i + 1;
4219 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4220 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4221 0xffffffff;
4222 }
Michael Chanb6016b72005-05-26 13:03:09 -07004223
4224 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4225 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4226 val |= 0x02 << 8;
4227 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4228
Michael Chan13daffa2006-03-20 17:49:20 -08004229 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07004230 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4231
Michael Chan13daffa2006-03-20 17:49:20 -08004232 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07004233 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4234
Michael Chan236b6392006-03-20 17:49:02 -08004235 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004236 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4237 break;
4238 }
4239 prod = NEXT_RX_BD(prod);
4240 ring_prod = RX_RING_IDX(prod);
4241 }
4242 bp->rx_prod = prod;
4243
4244 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4245
4246 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4247}
4248
4249static void
Michael Chan13daffa2006-03-20 17:49:20 -08004250bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4251{
4252 u32 num_rings, max;
4253
4254 bp->rx_ring_size = size;
4255 num_rings = 1;
4256 while (size > MAX_RX_DESC_CNT) {
4257 size -= MAX_RX_DESC_CNT;
4258 num_rings++;
4259 }
4260 /* round to next power of 2 */
4261 max = MAX_RX_RINGS;
4262 while ((max & num_rings) == 0)
4263 max >>= 1;
4264
4265 if (num_rings != max)
4266 max <<= 1;
4267
4268 bp->rx_max_ring = max;
4269 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4270}
4271
4272static void
Michael Chanb6016b72005-05-26 13:03:09 -07004273bnx2_free_tx_skbs(struct bnx2 *bp)
4274{
4275 int i;
4276
4277 if (bp->tx_buf_ring == NULL)
4278 return;
4279
4280 for (i = 0; i < TX_DESC_CNT; ) {
4281 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4282 struct sk_buff *skb = tx_buf->skb;
4283 int j, last;
4284
4285 if (skb == NULL) {
4286 i++;
4287 continue;
4288 }
4289
4290 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4291 skb_headlen(skb), PCI_DMA_TODEVICE);
4292
4293 tx_buf->skb = NULL;
4294
4295 last = skb_shinfo(skb)->nr_frags;
4296 for (j = 0; j < last; j++) {
4297 tx_buf = &bp->tx_buf_ring[i + j + 1];
4298 pci_unmap_page(bp->pdev,
4299 pci_unmap_addr(tx_buf, mapping),
4300 skb_shinfo(skb)->frags[j].size,
4301 PCI_DMA_TODEVICE);
4302 }
Michael Chan745720e2006-06-29 12:37:41 -07004303 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004304 i += j + 1;
4305 }
4306
4307}
4308
4309static void
4310bnx2_free_rx_skbs(struct bnx2 *bp)
4311{
4312 int i;
4313
4314 if (bp->rx_buf_ring == NULL)
4315 return;
4316
Michael Chan13daffa2006-03-20 17:49:20 -08004317 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004318 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4319 struct sk_buff *skb = rx_buf->skb;
4320
Michael Chan05d0f1c2005-11-04 08:53:48 -08004321 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004322 continue;
4323
4324 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4325 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4326
4327 rx_buf->skb = NULL;
4328
Michael Chan745720e2006-06-29 12:37:41 -07004329 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004330 }
4331}
4332
4333static void
4334bnx2_free_skbs(struct bnx2 *bp)
4335{
4336 bnx2_free_tx_skbs(bp);
4337 bnx2_free_rx_skbs(bp);
4338}
4339
4340static int
4341bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4342{
4343 int rc;
4344
4345 rc = bnx2_reset_chip(bp, reset_code);
4346 bnx2_free_skbs(bp);
4347 if (rc)
4348 return rc;
4349
Michael Chanfba9fe92006-06-12 22:21:25 -07004350 if ((rc = bnx2_init_chip(bp)) != 0)
4351 return rc;
4352
Michael Chanb6016b72005-05-26 13:03:09 -07004353 bnx2_init_tx_ring(bp);
4354 bnx2_init_rx_ring(bp);
4355 return 0;
4356}
4357
4358static int
4359bnx2_init_nic(struct bnx2 *bp)
4360{
4361 int rc;
4362
4363 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4364 return rc;
4365
Michael Chan80be4432006-11-19 14:07:28 -08004366 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004367 bnx2_init_phy(bp);
4368 bnx2_set_link(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07004369 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004370 return 0;
4371}
4372
4373static int
4374bnx2_test_registers(struct bnx2 *bp)
4375{
4376 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004377 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004378 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004379 u16 offset;
4380 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004381#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004382 u32 rw_mask;
4383 u32 ro_mask;
4384 } reg_tbl[] = {
4385 { 0x006c, 0, 0x00000000, 0x0000003f },
4386 { 0x0090, 0, 0xffffffff, 0x00000000 },
4387 { 0x0094, 0, 0x00000000, 0x00000000 },
4388
Michael Chan5bae30c2007-05-03 13:18:46 -07004389 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4390 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4391 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4392 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4393 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4394 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4395 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4396 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4397 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004398
Michael Chan5bae30c2007-05-03 13:18:46 -07004399 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4400 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4401 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4402 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4403 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4404 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004405
Michael Chan5bae30c2007-05-03 13:18:46 -07004406 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4407 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4408 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004409
4410 { 0x1000, 0, 0x00000000, 0x00000001 },
4411 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004412
4413 { 0x1408, 0, 0x01c00800, 0x00000000 },
4414 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4415 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004416 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004417 { 0x14b0, 0, 0x00000002, 0x00000001 },
4418 { 0x14b8, 0, 0x00000000, 0x00000000 },
4419 { 0x14c0, 0, 0x00000000, 0x00000009 },
4420 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4421 { 0x14cc, 0, 0x00000000, 0x00000001 },
4422 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004423
4424 { 0x1800, 0, 0x00000000, 0x00000001 },
4425 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004426
4427 { 0x2800, 0, 0x00000000, 0x00000001 },
4428 { 0x2804, 0, 0x00000000, 0x00003f01 },
4429 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4430 { 0x2810, 0, 0xffff0000, 0x00000000 },
4431 { 0x2814, 0, 0xffff0000, 0x00000000 },
4432 { 0x2818, 0, 0xffff0000, 0x00000000 },
4433 { 0x281c, 0, 0xffff0000, 0x00000000 },
4434 { 0x2834, 0, 0xffffffff, 0x00000000 },
4435 { 0x2840, 0, 0x00000000, 0xffffffff },
4436 { 0x2844, 0, 0x00000000, 0xffffffff },
4437 { 0x2848, 0, 0xffffffff, 0x00000000 },
4438 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4439
4440 { 0x2c00, 0, 0x00000000, 0x00000011 },
4441 { 0x2c04, 0, 0x00000000, 0x00030007 },
4442
Michael Chanb6016b72005-05-26 13:03:09 -07004443 { 0x3c00, 0, 0x00000000, 0x00000001 },
4444 { 0x3c04, 0, 0x00000000, 0x00070000 },
4445 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4446 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4447 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4448 { 0x3c14, 0, 0x00000000, 0xffffffff },
4449 { 0x3c18, 0, 0x00000000, 0xffffffff },
4450 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4451 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004452
4453 { 0x5004, 0, 0x00000000, 0x0000007f },
4454 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004455
Michael Chanb6016b72005-05-26 13:03:09 -07004456 { 0x5c00, 0, 0x00000000, 0x00000001 },
4457 { 0x5c04, 0, 0x00000000, 0x0003000f },
4458 { 0x5c08, 0, 0x00000003, 0x00000000 },
4459 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4460 { 0x5c10, 0, 0x00000000, 0xffffffff },
4461 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4462 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4463 { 0x5c88, 0, 0x00000000, 0x00077373 },
4464 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4465
4466 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4467 { 0x680c, 0, 0xffffffff, 0x00000000 },
4468 { 0x6810, 0, 0xffffffff, 0x00000000 },
4469 { 0x6814, 0, 0xffffffff, 0x00000000 },
4470 { 0x6818, 0, 0xffffffff, 0x00000000 },
4471 { 0x681c, 0, 0xffffffff, 0x00000000 },
4472 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4473 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4474 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4475 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4476 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4477 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4478 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4479 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4480 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4481 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4482 { 0x684c, 0, 0xffffffff, 0x00000000 },
4483 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4484 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4485 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4486 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4487 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4488 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4489
4490 { 0xffff, 0, 0x00000000, 0x00000000 },
4491 };
4492
4493 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004494 is_5709 = 0;
4495 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4496 is_5709 = 1;
4497
Michael Chanb6016b72005-05-26 13:03:09 -07004498 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4499 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004500 u16 flags = reg_tbl[i].flags;
4501
4502 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4503 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004504
4505 offset = (u32) reg_tbl[i].offset;
4506 rw_mask = reg_tbl[i].rw_mask;
4507 ro_mask = reg_tbl[i].ro_mask;
4508
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004509 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004510
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004511 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004512
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004513 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004514 if ((val & rw_mask) != 0) {
4515 goto reg_test_err;
4516 }
4517
4518 if ((val & ro_mask) != (save_val & ro_mask)) {
4519 goto reg_test_err;
4520 }
4521
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004522 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004523
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004524 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004525 if ((val & rw_mask) != rw_mask) {
4526 goto reg_test_err;
4527 }
4528
4529 if ((val & ro_mask) != (save_val & ro_mask)) {
4530 goto reg_test_err;
4531 }
4532
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004533 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004534 continue;
4535
4536reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004537 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004538 ret = -ENODEV;
4539 break;
4540 }
4541 return ret;
4542}
4543
4544static int
4545bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4546{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004547 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004548 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4549 int i;
4550
4551 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4552 u32 offset;
4553
4554 for (offset = 0; offset < size; offset += 4) {
4555
4556 REG_WR_IND(bp, start + offset, test_pattern[i]);
4557
4558 if (REG_RD_IND(bp, start + offset) !=
4559 test_pattern[i]) {
4560 return -ENODEV;
4561 }
4562 }
4563 }
4564 return 0;
4565}
4566
4567static int
4568bnx2_test_memory(struct bnx2 *bp)
4569{
4570 int ret = 0;
4571 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004572 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004573 u32 offset;
4574 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004575 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004576 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004577 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004578 { 0xe0000, 0x4000 },
4579 { 0x120000, 0x4000 },
4580 { 0x1a0000, 0x4000 },
4581 { 0x160000, 0x4000 },
4582 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004583 },
4584 mem_tbl_5709[] = {
4585 { 0x60000, 0x4000 },
4586 { 0xa0000, 0x3000 },
4587 { 0xe0000, 0x4000 },
4588 { 0x120000, 0x4000 },
4589 { 0x1a0000, 0x4000 },
4590 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004591 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004592 struct mem_entry *mem_tbl;
4593
4594 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4595 mem_tbl = mem_tbl_5709;
4596 else
4597 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004598
4599 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4600 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4601 mem_tbl[i].len)) != 0) {
4602 return ret;
4603 }
4604 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004605
Michael Chanb6016b72005-05-26 13:03:09 -07004606 return ret;
4607}
4608
Michael Chanbc5a0692006-01-23 16:13:22 -08004609#define BNX2_MAC_LOOPBACK 0
4610#define BNX2_PHY_LOOPBACK 1
4611
Michael Chanb6016b72005-05-26 13:03:09 -07004612static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004613bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004614{
4615 unsigned int pkt_size, num_pkts, i;
4616 struct sk_buff *skb, *rx_skb;
4617 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004618 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004619 dma_addr_t map;
4620 struct tx_bd *txbd;
4621 struct sw_bd *rx_buf;
4622 struct l2_fhdr *rx_hdr;
4623 int ret = -ENODEV;
4624
Michael Chanbc5a0692006-01-23 16:13:22 -08004625 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4626 bp->loopback = MAC_LOOPBACK;
4627 bnx2_set_mac_loopback(bp);
4628 }
4629 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004630 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004631 bnx2_set_phy_loopback(bp);
4632 }
4633 else
4634 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004635
4636 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004637 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004638 if (!skb)
4639 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004640 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004641 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004642 memset(packet + 6, 0x0, 8);
4643 for (i = 14; i < pkt_size; i++)
4644 packet[i] = (unsigned char) (i & 0xff);
4645
4646 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4647 PCI_DMA_TODEVICE);
4648
Michael Chanbf5295b2006-03-23 01:11:56 -08004649 REG_WR(bp, BNX2_HC_COMMAND,
4650 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4651
Michael Chanb6016b72005-05-26 13:03:09 -07004652 REG_RD(bp, BNX2_HC_COMMAND);
4653
4654 udelay(5);
4655 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4656
Michael Chanb6016b72005-05-26 13:03:09 -07004657 num_pkts = 0;
4658
Michael Chanbc5a0692006-01-23 16:13:22 -08004659 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004660
4661 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4662 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4663 txbd->tx_bd_mss_nbytes = pkt_size;
4664 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4665
4666 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004667 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4668 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004669
Michael Chan234754d2006-11-19 14:11:41 -08004670 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4671 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004672
4673 udelay(100);
4674
Michael Chanbf5295b2006-03-23 01:11:56 -08004675 REG_WR(bp, BNX2_HC_COMMAND,
4676 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4677
Michael Chanb6016b72005-05-26 13:03:09 -07004678 REG_RD(bp, BNX2_HC_COMMAND);
4679
4680 udelay(5);
4681
4682 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004683 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004684
Michael Chanbc5a0692006-01-23 16:13:22 -08004685 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004686 goto loopback_test_done;
4687 }
4688
4689 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4690 if (rx_idx != rx_start_idx + num_pkts) {
4691 goto loopback_test_done;
4692 }
4693
4694 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4695 rx_skb = rx_buf->skb;
4696
4697 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4698 skb_reserve(rx_skb, bp->rx_offset);
4699
4700 pci_dma_sync_single_for_cpu(bp->pdev,
4701 pci_unmap_addr(rx_buf, mapping),
4702 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4703
Michael Chanade2bfe2006-01-23 16:09:51 -08004704 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004705 (L2_FHDR_ERRORS_BAD_CRC |
4706 L2_FHDR_ERRORS_PHY_DECODE |
4707 L2_FHDR_ERRORS_ALIGNMENT |
4708 L2_FHDR_ERRORS_TOO_SHORT |
4709 L2_FHDR_ERRORS_GIANT_FRAME)) {
4710
4711 goto loopback_test_done;
4712 }
4713
4714 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4715 goto loopback_test_done;
4716 }
4717
4718 for (i = 14; i < pkt_size; i++) {
4719 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4720 goto loopback_test_done;
4721 }
4722 }
4723
4724 ret = 0;
4725
4726loopback_test_done:
4727 bp->loopback = 0;
4728 return ret;
4729}
4730
Michael Chanbc5a0692006-01-23 16:13:22 -08004731#define BNX2_MAC_LOOPBACK_FAILED 1
4732#define BNX2_PHY_LOOPBACK_FAILED 2
4733#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4734 BNX2_PHY_LOOPBACK_FAILED)
4735
4736static int
4737bnx2_test_loopback(struct bnx2 *bp)
4738{
4739 int rc = 0;
4740
4741 if (!netif_running(bp->dev))
4742 return BNX2_LOOPBACK_FAILED;
4743
4744 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4745 spin_lock_bh(&bp->phy_lock);
4746 bnx2_init_phy(bp);
4747 spin_unlock_bh(&bp->phy_lock);
4748 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4749 rc |= BNX2_MAC_LOOPBACK_FAILED;
4750 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4751 rc |= BNX2_PHY_LOOPBACK_FAILED;
4752 return rc;
4753}
4754
Michael Chanb6016b72005-05-26 13:03:09 -07004755#define NVRAM_SIZE 0x200
4756#define CRC32_RESIDUAL 0xdebb20e3
4757
4758static int
4759bnx2_test_nvram(struct bnx2 *bp)
4760{
4761 u32 buf[NVRAM_SIZE / 4];
4762 u8 *data = (u8 *) buf;
4763 int rc = 0;
4764 u32 magic, csum;
4765
4766 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4767 goto test_nvram_done;
4768
4769 magic = be32_to_cpu(buf[0]);
4770 if (magic != 0x669955aa) {
4771 rc = -ENODEV;
4772 goto test_nvram_done;
4773 }
4774
4775 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4776 goto test_nvram_done;
4777
4778 csum = ether_crc_le(0x100, data);
4779 if (csum != CRC32_RESIDUAL) {
4780 rc = -ENODEV;
4781 goto test_nvram_done;
4782 }
4783
4784 csum = ether_crc_le(0x100, data + 0x100);
4785 if (csum != CRC32_RESIDUAL) {
4786 rc = -ENODEV;
4787 }
4788
4789test_nvram_done:
4790 return rc;
4791}
4792
4793static int
4794bnx2_test_link(struct bnx2 *bp)
4795{
4796 u32 bmsr;
4797
Michael Chanc770a652005-08-25 15:38:39 -07004798 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004799 bnx2_enable_bmsr1(bp);
4800 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4801 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4802 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004803 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004804
Michael Chanb6016b72005-05-26 13:03:09 -07004805 if (bmsr & BMSR_LSTATUS) {
4806 return 0;
4807 }
4808 return -ENODEV;
4809}
4810
4811static int
4812bnx2_test_intr(struct bnx2 *bp)
4813{
4814 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004815 u16 status_idx;
4816
4817 if (!netif_running(bp->dev))
4818 return -ENODEV;
4819
4820 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4821
4822 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004823 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004824 REG_RD(bp, BNX2_HC_COMMAND);
4825
4826 for (i = 0; i < 10; i++) {
4827 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4828 status_idx) {
4829
4830 break;
4831 }
4832
4833 msleep_interruptible(10);
4834 }
4835 if (i < 10)
4836 return 0;
4837
4838 return -ENODEV;
4839}
4840
4841static void
Michael Chan48b01e22006-11-19 14:08:00 -08004842bnx2_5706_serdes_timer(struct bnx2 *bp)
4843{
4844 spin_lock(&bp->phy_lock);
4845 if (bp->serdes_an_pending)
4846 bp->serdes_an_pending--;
4847 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4848 u32 bmcr;
4849
4850 bp->current_interval = bp->timer_interval;
4851
Michael Chanca58c3a2007-05-03 13:22:52 -07004852 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004853
4854 if (bmcr & BMCR_ANENABLE) {
4855 u32 phy1, phy2;
4856
4857 bnx2_write_phy(bp, 0x1c, 0x7c00);
4858 bnx2_read_phy(bp, 0x1c, &phy1);
4859
4860 bnx2_write_phy(bp, 0x17, 0x0f01);
4861 bnx2_read_phy(bp, 0x15, &phy2);
4862 bnx2_write_phy(bp, 0x17, 0x0f01);
4863 bnx2_read_phy(bp, 0x15, &phy2);
4864
4865 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4866 !(phy2 & 0x20)) { /* no CONFIG */
4867
4868 bmcr &= ~BMCR_ANENABLE;
4869 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004870 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004871 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4872 }
4873 }
4874 }
4875 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4876 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4877 u32 phy2;
4878
4879 bnx2_write_phy(bp, 0x17, 0x0f01);
4880 bnx2_read_phy(bp, 0x15, &phy2);
4881 if (phy2 & 0x20) {
4882 u32 bmcr;
4883
Michael Chanca58c3a2007-05-03 13:22:52 -07004884 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004885 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004886 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004887
4888 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4889 }
4890 } else
4891 bp->current_interval = bp->timer_interval;
4892
4893 spin_unlock(&bp->phy_lock);
4894}
4895
4896static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004897bnx2_5708_serdes_timer(struct bnx2 *bp)
4898{
Michael Chan0d8a6572007-07-07 22:49:43 -07004899 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4900 return;
4901
Michael Chanf8dd0642006-11-19 14:08:29 -08004902 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4903 bp->serdes_an_pending = 0;
4904 return;
4905 }
4906
4907 spin_lock(&bp->phy_lock);
4908 if (bp->serdes_an_pending)
4909 bp->serdes_an_pending--;
4910 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4911 u32 bmcr;
4912
Michael Chanca58c3a2007-05-03 13:22:52 -07004913 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004914 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004915 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004916 bp->current_interval = SERDES_FORCED_TIMEOUT;
4917 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004918 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004919 bp->serdes_an_pending = 2;
4920 bp->current_interval = bp->timer_interval;
4921 }
4922
4923 } else
4924 bp->current_interval = bp->timer_interval;
4925
4926 spin_unlock(&bp->phy_lock);
4927}
4928
4929static void
Michael Chanb6016b72005-05-26 13:03:09 -07004930bnx2_timer(unsigned long data)
4931{
4932 struct bnx2 *bp = (struct bnx2 *) data;
4933 u32 msg;
4934
Michael Chancd339a02005-08-25 15:35:24 -07004935 if (!netif_running(bp->dev))
4936 return;
4937
Michael Chanb6016b72005-05-26 13:03:09 -07004938 if (atomic_read(&bp->intr_sem) != 0)
4939 goto bnx2_restart_timer;
4940
4941 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004942 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004943
Michael Chancea94db2006-06-12 22:16:13 -07004944 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4945
Michael Chan02537b062007-06-04 21:24:07 -07004946 /* workaround occasional corrupted counters */
4947 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4948 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4949 BNX2_HC_COMMAND_STATS_NOW);
4950
Michael Chanf8dd0642006-11-19 14:08:29 -08004951 if (bp->phy_flags & PHY_SERDES_FLAG) {
4952 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4953 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004954 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004955 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004956 }
4957
4958bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004959 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004960}
4961
Michael Chan8e6a72c2007-05-03 13:24:48 -07004962static int
4963bnx2_request_irq(struct bnx2 *bp)
4964{
4965 struct net_device *dev = bp->dev;
4966 int rc = 0;
4967
4968 if (bp->flags & USING_MSI_FLAG) {
4969 irq_handler_t fn = bnx2_msi;
4970
4971 if (bp->flags & ONE_SHOT_MSI_FLAG)
4972 fn = bnx2_msi_1shot;
4973
4974 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4975 } else
4976 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4977 IRQF_SHARED, dev->name, dev);
4978 return rc;
4979}
4980
4981static void
4982bnx2_free_irq(struct bnx2 *bp)
4983{
4984 struct net_device *dev = bp->dev;
4985
4986 if (bp->flags & USING_MSI_FLAG) {
4987 free_irq(bp->pdev->irq, dev);
4988 pci_disable_msi(bp->pdev);
4989 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4990 } else
4991 free_irq(bp->pdev->irq, dev);
4992}
4993
Michael Chanb6016b72005-05-26 13:03:09 -07004994/* Called with rtnl_lock */
4995static int
4996bnx2_open(struct net_device *dev)
4997{
Michael Chan972ec0d2006-01-23 16:12:43 -08004998 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004999 int rc;
5000
Michael Chan1b2f9222007-05-03 13:20:19 -07005001 netif_carrier_off(dev);
5002
Pavel Machek829ca9a2005-09-03 15:56:56 -07005003 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005004 bnx2_disable_int(bp);
5005
5006 rc = bnx2_alloc_mem(bp);
5007 if (rc)
5008 return rc;
5009
Michael Chan8e6a72c2007-05-03 13:24:48 -07005010 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07005011 if (pci_enable_msi(bp->pdev) == 0) {
5012 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07005013 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5014 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005015 }
5016 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07005017 rc = bnx2_request_irq(bp);
5018
Michael Chanb6016b72005-05-26 13:03:09 -07005019 if (rc) {
5020 bnx2_free_mem(bp);
5021 return rc;
5022 }
5023
5024 rc = bnx2_init_nic(bp);
5025
5026 if (rc) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07005027 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005028 bnx2_free_skbs(bp);
5029 bnx2_free_mem(bp);
5030 return rc;
5031 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005032
Michael Chancd339a02005-08-25 15:35:24 -07005033 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005034
5035 atomic_set(&bp->intr_sem, 0);
5036
5037 bnx2_enable_int(bp);
5038
5039 if (bp->flags & USING_MSI_FLAG) {
5040 /* Test MSI to make sure it is working
5041 * If MSI test fails, go back to INTx mode
5042 */
5043 if (bnx2_test_intr(bp) != 0) {
5044 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5045 " using MSI, switching to INTx mode. Please"
5046 " report this failure to the PCI maintainer"
5047 " and include system chipset information.\n",
5048 bp->dev->name);
5049
5050 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005051 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005052
5053 rc = bnx2_init_nic(bp);
5054
Michael Chan8e6a72c2007-05-03 13:24:48 -07005055 if (!rc)
5056 rc = bnx2_request_irq(bp);
5057
Michael Chanb6016b72005-05-26 13:03:09 -07005058 if (rc) {
5059 bnx2_free_skbs(bp);
5060 bnx2_free_mem(bp);
5061 del_timer_sync(&bp->timer);
5062 return rc;
5063 }
5064 bnx2_enable_int(bp);
5065 }
5066 }
5067 if (bp->flags & USING_MSI_FLAG) {
5068 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5069 }
5070
5071 netif_start_queue(dev);
5072
5073 return 0;
5074}
5075
5076static void
David Howellsc4028952006-11-22 14:57:56 +00005077bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07005078{
David Howellsc4028952006-11-22 14:57:56 +00005079 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005080
Michael Chanafdc08b2005-08-25 15:34:29 -07005081 if (!netif_running(bp->dev))
5082 return;
5083
5084 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07005085 bnx2_netif_stop(bp);
5086
5087 bnx2_init_nic(bp);
5088
5089 atomic_set(&bp->intr_sem, 1);
5090 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07005091 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005092}
5093
5094static void
5095bnx2_tx_timeout(struct net_device *dev)
5096{
Michael Chan972ec0d2006-01-23 16:12:43 -08005097 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005098
5099 /* This allows the netif to be shutdown gracefully before resetting */
5100 schedule_work(&bp->reset_task);
5101}
5102
5103#ifdef BCM_VLAN
5104/* Called with rtnl_lock */
5105static void
5106bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5107{
Michael Chan972ec0d2006-01-23 16:12:43 -08005108 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005109
5110 bnx2_netif_stop(bp);
5111
5112 bp->vlgrp = vlgrp;
5113 bnx2_set_rx_mode(dev);
5114
5115 bnx2_netif_start(bp);
5116}
Michael Chanb6016b72005-05-26 13:03:09 -07005117#endif
5118
Herbert Xu932ff272006-06-09 12:20:56 -07005119/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07005120 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5121 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07005122 */
5123static int
5124bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5125{
Michael Chan972ec0d2006-01-23 16:12:43 -08005126 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005127 dma_addr_t mapping;
5128 struct tx_bd *txbd;
5129 struct sw_bd *tx_buf;
5130 u32 len, vlan_tag_flags, last_frag, mss;
5131 u16 prod, ring_prod;
5132 int i;
5133
Michael Chane89bbf12005-08-25 15:36:58 -07005134 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07005135 netif_stop_queue(dev);
5136 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5137 dev->name);
5138
5139 return NETDEV_TX_BUSY;
5140 }
5141 len = skb_headlen(skb);
5142 prod = bp->tx_prod;
5143 ring_prod = TX_RING_IDX(prod);
5144
5145 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005146 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07005147 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5148 }
5149
5150 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5151 vlan_tag_flags |=
5152 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5153 }
Michael Chanfde82052007-05-03 17:23:35 -07005154 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07005155 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005156 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07005157
Michael Chanb6016b72005-05-26 13:03:09 -07005158 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5159
Michael Chan4666f872007-05-03 13:22:28 -07005160 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005161
Michael Chan4666f872007-05-03 13:22:28 -07005162 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5163 u32 tcp_off = skb_transport_offset(skb) -
5164 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07005165
Michael Chan4666f872007-05-03 13:22:28 -07005166 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5167 TX_BD_FLAGS_SW_FLAGS;
5168 if (likely(tcp_off == 0))
5169 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5170 else {
5171 tcp_off >>= 3;
5172 vlan_tag_flags |= ((tcp_off & 0x3) <<
5173 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5174 ((tcp_off & 0x10) <<
5175 TX_BD_FLAGS_TCP6_OFF4_SHL);
5176 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5177 }
5178 } else {
5179 if (skb_header_cloned(skb) &&
5180 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5181 dev_kfree_skb(skb);
5182 return NETDEV_TX_OK;
5183 }
5184
5185 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5186
5187 iph = ip_hdr(skb);
5188 iph->check = 0;
5189 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5190 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5191 iph->daddr, 0,
5192 IPPROTO_TCP,
5193 0);
5194 if (tcp_opt_len || (iph->ihl > 5)) {
5195 vlan_tag_flags |= ((iph->ihl - 5) +
5196 (tcp_opt_len >> 2)) << 8;
5197 }
Michael Chanb6016b72005-05-26 13:03:09 -07005198 }
Michael Chan4666f872007-05-03 13:22:28 -07005199 } else
Michael Chanb6016b72005-05-26 13:03:09 -07005200 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005201
5202 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005203
Michael Chanb6016b72005-05-26 13:03:09 -07005204 tx_buf = &bp->tx_buf_ring[ring_prod];
5205 tx_buf->skb = skb;
5206 pci_unmap_addr_set(tx_buf, mapping, mapping);
5207
5208 txbd = &bp->tx_desc_ring[ring_prod];
5209
5210 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5211 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5212 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5213 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5214
5215 last_frag = skb_shinfo(skb)->nr_frags;
5216
5217 for (i = 0; i < last_frag; i++) {
5218 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5219
5220 prod = NEXT_TX_BD(prod);
5221 ring_prod = TX_RING_IDX(prod);
5222 txbd = &bp->tx_desc_ring[ring_prod];
5223
5224 len = frag->size;
5225 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5226 len, PCI_DMA_TODEVICE);
5227 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5228 mapping, mapping);
5229
5230 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5231 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5232 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5233 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5234
5235 }
5236 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5237
5238 prod = NEXT_TX_BD(prod);
5239 bp->tx_prod_bseq += skb->len;
5240
Michael Chan234754d2006-11-19 14:11:41 -08005241 REG_WR16(bp, bp->tx_bidx_addr, prod);
5242 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07005243
5244 mmiowb();
5245
5246 bp->tx_prod = prod;
5247 dev->trans_start = jiffies;
5248
Michael Chane89bbf12005-08-25 15:36:58 -07005249 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07005250 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07005251 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07005252 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005253 }
5254
5255 return NETDEV_TX_OK;
5256}
5257
5258/* Called with rtnl_lock */
5259static int
5260bnx2_close(struct net_device *dev)
5261{
Michael Chan972ec0d2006-01-23 16:12:43 -08005262 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005263 u32 reset_code;
5264
Michael Chanafdc08b2005-08-25 15:34:29 -07005265 /* Calling flush_scheduled_work() may deadlock because
5266 * linkwatch_event() may be on the workqueue and it will try to get
5267 * the rtnl_lock which we are holding.
5268 */
5269 while (bp->in_reset_task)
5270 msleep(1);
5271
Michael Chanb6016b72005-05-26 13:03:09 -07005272 bnx2_netif_stop(bp);
5273 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005274 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07005275 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005276 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005277 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5278 else
5279 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5280 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005281 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005282 bnx2_free_skbs(bp);
5283 bnx2_free_mem(bp);
5284 bp->link_up = 0;
5285 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005286 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07005287 return 0;
5288}
5289
5290#define GET_NET_STATS64(ctr) \
5291 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5292 (unsigned long) (ctr##_lo)
5293
5294#define GET_NET_STATS32(ctr) \
5295 (ctr##_lo)
5296
5297#if (BITS_PER_LONG == 64)
5298#define GET_NET_STATS GET_NET_STATS64
5299#else
5300#define GET_NET_STATS GET_NET_STATS32
5301#endif
5302
5303static struct net_device_stats *
5304bnx2_get_stats(struct net_device *dev)
5305{
Michael Chan972ec0d2006-01-23 16:12:43 -08005306 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005307 struct statistics_block *stats_blk = bp->stats_blk;
5308 struct net_device_stats *net_stats = &bp->net_stats;
5309
5310 if (bp->stats_blk == NULL) {
5311 return net_stats;
5312 }
5313 net_stats->rx_packets =
5314 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5315 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5316 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5317
5318 net_stats->tx_packets =
5319 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5320 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5321 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5322
5323 net_stats->rx_bytes =
5324 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5325
5326 net_stats->tx_bytes =
5327 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5328
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005329 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005330 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5331
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005332 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005333 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5334
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005335 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005336 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5337 stats_blk->stat_EtherStatsOverrsizePkts);
5338
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005339 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005340 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5341
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005342 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005343 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5344
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005345 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005346 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5347
5348 net_stats->rx_errors = net_stats->rx_length_errors +
5349 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5350 net_stats->rx_crc_errors;
5351
5352 net_stats->tx_aborted_errors =
5353 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5354 stats_blk->stat_Dot3StatsLateCollisions);
5355
Michael Chan5b0c76a2005-11-04 08:45:49 -08005356 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5357 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005358 net_stats->tx_carrier_errors = 0;
5359 else {
5360 net_stats->tx_carrier_errors =
5361 (unsigned long)
5362 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5363 }
5364
5365 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005366 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005367 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5368 +
5369 net_stats->tx_aborted_errors +
5370 net_stats->tx_carrier_errors;
5371
Michael Chancea94db2006-06-12 22:16:13 -07005372 net_stats->rx_missed_errors =
5373 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5374 stats_blk->stat_FwRxDrop);
5375
Michael Chanb6016b72005-05-26 13:03:09 -07005376 return net_stats;
5377}
5378
5379/* All ethtool functions called with rtnl_lock */
5380
5381static int
5382bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5383{
Michael Chan972ec0d2006-01-23 16:12:43 -08005384 struct bnx2 *bp = netdev_priv(dev);
Michael Chan7b6b8342007-07-07 22:50:15 -07005385 int support_serdes = 0, support_copper = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005386
5387 cmd->supported = SUPPORTED_Autoneg;
Michael Chan7b6b8342007-07-07 22:50:15 -07005388 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5389 support_serdes = 1;
5390 support_copper = 1;
5391 } else if (bp->phy_port == PORT_FIBRE)
5392 support_serdes = 1;
5393 else
5394 support_copper = 1;
5395
5396 if (support_serdes) {
Michael Chanb6016b72005-05-26 13:03:09 -07005397 cmd->supported |= SUPPORTED_1000baseT_Full |
5398 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005399 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5400 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005401
Michael Chanb6016b72005-05-26 13:03:09 -07005402 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005403 if (support_copper) {
Michael Chanb6016b72005-05-26 13:03:09 -07005404 cmd->supported |= SUPPORTED_10baseT_Half |
5405 SUPPORTED_10baseT_Full |
5406 SUPPORTED_100baseT_Half |
5407 SUPPORTED_100baseT_Full |
5408 SUPPORTED_1000baseT_Full |
5409 SUPPORTED_TP;
5410
Michael Chanb6016b72005-05-26 13:03:09 -07005411 }
5412
Michael Chan7b6b8342007-07-07 22:50:15 -07005413 spin_lock_bh(&bp->phy_lock);
5414 cmd->port = bp->phy_port;
Michael Chanb6016b72005-05-26 13:03:09 -07005415 cmd->advertising = bp->advertising;
5416
5417 if (bp->autoneg & AUTONEG_SPEED) {
5418 cmd->autoneg = AUTONEG_ENABLE;
5419 }
5420 else {
5421 cmd->autoneg = AUTONEG_DISABLE;
5422 }
5423
5424 if (netif_carrier_ok(dev)) {
5425 cmd->speed = bp->line_speed;
5426 cmd->duplex = bp->duplex;
5427 }
5428 else {
5429 cmd->speed = -1;
5430 cmd->duplex = -1;
5431 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005432 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005433
5434 cmd->transceiver = XCVR_INTERNAL;
5435 cmd->phy_address = bp->phy_addr;
5436
5437 return 0;
5438}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005439
Michael Chanb6016b72005-05-26 13:03:09 -07005440static int
5441bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5442{
Michael Chan972ec0d2006-01-23 16:12:43 -08005443 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005444 u8 autoneg = bp->autoneg;
5445 u8 req_duplex = bp->req_duplex;
5446 u16 req_line_speed = bp->req_line_speed;
5447 u32 advertising = bp->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005448 int err = -EINVAL;
5449
5450 spin_lock_bh(&bp->phy_lock);
5451
5452 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5453 goto err_out_unlock;
5454
5455 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5456 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005457
5458 if (cmd->autoneg == AUTONEG_ENABLE) {
5459 autoneg |= AUTONEG_SPEED;
5460
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005461 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005462
5463 /* allow advertising 1 speed */
5464 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5465 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5466 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5467 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5468
Michael Chan7b6b8342007-07-07 22:50:15 -07005469 if (cmd->port == PORT_FIBRE)
5470 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005471
5472 advertising = cmd->advertising;
5473
Michael Chan27a005b2007-05-03 13:23:41 -07005474 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
Michael Chan7b6b8342007-07-07 22:50:15 -07005475 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5476 (cmd->port == PORT_TP))
5477 goto err_out_unlock;
5478 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07005479 advertising = cmd->advertising;
Michael Chan7b6b8342007-07-07 22:50:15 -07005480 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5481 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005482 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005483 if (cmd->port == PORT_FIBRE)
Michael Chanb6016b72005-05-26 13:03:09 -07005484 advertising = ETHTOOL_ALL_FIBRE_SPEED;
Michael Chan7b6b8342007-07-07 22:50:15 -07005485 else
Michael Chanb6016b72005-05-26 13:03:09 -07005486 advertising = ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005487 }
5488 advertising |= ADVERTISED_Autoneg;
5489 }
5490 else {
Michael Chan7b6b8342007-07-07 22:50:15 -07005491 if (cmd->port == PORT_FIBRE) {
Michael Chan80be4432006-11-19 14:07:28 -08005492 if ((cmd->speed != SPEED_1000 &&
5493 cmd->speed != SPEED_2500) ||
5494 (cmd->duplex != DUPLEX_FULL))
Michael Chan7b6b8342007-07-07 22:50:15 -07005495 goto err_out_unlock;
Michael Chan80be4432006-11-19 14:07:28 -08005496
5497 if (cmd->speed == SPEED_2500 &&
5498 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
Michael Chan7b6b8342007-07-07 22:50:15 -07005499 goto err_out_unlock;
Michael Chanb6016b72005-05-26 13:03:09 -07005500 }
Michael Chan7b6b8342007-07-07 22:50:15 -07005501 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5502 goto err_out_unlock;
5503
Michael Chanb6016b72005-05-26 13:03:09 -07005504 autoneg &= ~AUTONEG_SPEED;
5505 req_line_speed = cmd->speed;
5506 req_duplex = cmd->duplex;
5507 advertising = 0;
5508 }
5509
5510 bp->autoneg = autoneg;
5511 bp->advertising = advertising;
5512 bp->req_line_speed = req_line_speed;
5513 bp->req_duplex = req_duplex;
5514
Michael Chan7b6b8342007-07-07 22:50:15 -07005515 err = bnx2_setup_phy(bp, cmd->port);
Michael Chanb6016b72005-05-26 13:03:09 -07005516
Michael Chan7b6b8342007-07-07 22:50:15 -07005517err_out_unlock:
Michael Chanc770a652005-08-25 15:38:39 -07005518 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005519
Michael Chan7b6b8342007-07-07 22:50:15 -07005520 return err;
Michael Chanb6016b72005-05-26 13:03:09 -07005521}
5522
5523static void
5524bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5525{
Michael Chan972ec0d2006-01-23 16:12:43 -08005526 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005527
5528 strcpy(info->driver, DRV_MODULE_NAME);
5529 strcpy(info->version, DRV_MODULE_VERSION);
5530 strcpy(info->bus_info, pci_name(bp->pdev));
5531 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5532 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5533 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005534 info->fw_version[1] = info->fw_version[3] = '.';
5535 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005536}
5537
Michael Chan244ac4f2006-03-20 17:48:46 -08005538#define BNX2_REGDUMP_LEN (32 * 1024)
5539
5540static int
5541bnx2_get_regs_len(struct net_device *dev)
5542{
5543 return BNX2_REGDUMP_LEN;
5544}
5545
5546static void
5547bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5548{
5549 u32 *p = _p, i, offset;
5550 u8 *orig_p = _p;
5551 struct bnx2 *bp = netdev_priv(dev);
5552 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5553 0x0800, 0x0880, 0x0c00, 0x0c10,
5554 0x0c30, 0x0d08, 0x1000, 0x101c,
5555 0x1040, 0x1048, 0x1080, 0x10a4,
5556 0x1400, 0x1490, 0x1498, 0x14f0,
5557 0x1500, 0x155c, 0x1580, 0x15dc,
5558 0x1600, 0x1658, 0x1680, 0x16d8,
5559 0x1800, 0x1820, 0x1840, 0x1854,
5560 0x1880, 0x1894, 0x1900, 0x1984,
5561 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5562 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5563 0x2000, 0x2030, 0x23c0, 0x2400,
5564 0x2800, 0x2820, 0x2830, 0x2850,
5565 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5566 0x3c00, 0x3c94, 0x4000, 0x4010,
5567 0x4080, 0x4090, 0x43c0, 0x4458,
5568 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5569 0x4fc0, 0x5010, 0x53c0, 0x5444,
5570 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5571 0x5fc0, 0x6000, 0x6400, 0x6428,
5572 0x6800, 0x6848, 0x684c, 0x6860,
5573 0x6888, 0x6910, 0x8000 };
5574
5575 regs->version = 0;
5576
5577 memset(p, 0, BNX2_REGDUMP_LEN);
5578
5579 if (!netif_running(bp->dev))
5580 return;
5581
5582 i = 0;
5583 offset = reg_boundaries[0];
5584 p += offset;
5585 while (offset < BNX2_REGDUMP_LEN) {
5586 *p++ = REG_RD(bp, offset);
5587 offset += 4;
5588 if (offset == reg_boundaries[i + 1]) {
5589 offset = reg_boundaries[i + 2];
5590 p = (u32 *) (orig_p + offset);
5591 i += 2;
5592 }
5593 }
5594}
5595
Michael Chanb6016b72005-05-26 13:03:09 -07005596static void
5597bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5598{
Michael Chan972ec0d2006-01-23 16:12:43 -08005599 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005600
5601 if (bp->flags & NO_WOL_FLAG) {
5602 wol->supported = 0;
5603 wol->wolopts = 0;
5604 }
5605 else {
5606 wol->supported = WAKE_MAGIC;
5607 if (bp->wol)
5608 wol->wolopts = WAKE_MAGIC;
5609 else
5610 wol->wolopts = 0;
5611 }
5612 memset(&wol->sopass, 0, sizeof(wol->sopass));
5613}
5614
5615static int
5616bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5617{
Michael Chan972ec0d2006-01-23 16:12:43 -08005618 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005619
5620 if (wol->wolopts & ~WAKE_MAGIC)
5621 return -EINVAL;
5622
5623 if (wol->wolopts & WAKE_MAGIC) {
5624 if (bp->flags & NO_WOL_FLAG)
5625 return -EINVAL;
5626
5627 bp->wol = 1;
5628 }
5629 else {
5630 bp->wol = 0;
5631 }
5632 return 0;
5633}
5634
5635static int
5636bnx2_nway_reset(struct net_device *dev)
5637{
Michael Chan972ec0d2006-01-23 16:12:43 -08005638 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005639 u32 bmcr;
5640
5641 if (!(bp->autoneg & AUTONEG_SPEED)) {
5642 return -EINVAL;
5643 }
5644
Michael Chanc770a652005-08-25 15:38:39 -07005645 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005646
Michael Chan7b6b8342007-07-07 22:50:15 -07005647 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5648 int rc;
5649
5650 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5651 spin_unlock_bh(&bp->phy_lock);
5652 return rc;
5653 }
5654
Michael Chanb6016b72005-05-26 13:03:09 -07005655 /* Force a link down visible on the other side */
5656 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005657 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005658 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005659
5660 msleep(20);
5661
Michael Chanc770a652005-08-25 15:38:39 -07005662 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005663
5664 bp->current_interval = SERDES_AN_TIMEOUT;
5665 bp->serdes_an_pending = 1;
5666 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005667 }
5668
Michael Chanca58c3a2007-05-03 13:22:52 -07005669 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005670 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005671 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005672
Michael Chanc770a652005-08-25 15:38:39 -07005673 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005674
5675 return 0;
5676}
5677
5678static int
5679bnx2_get_eeprom_len(struct net_device *dev)
5680{
Michael Chan972ec0d2006-01-23 16:12:43 -08005681 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005682
Michael Chan1122db72006-01-23 16:11:42 -08005683 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005684 return 0;
5685
Michael Chan1122db72006-01-23 16:11:42 -08005686 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005687}
5688
5689static int
5690bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5691 u8 *eebuf)
5692{
Michael Chan972ec0d2006-01-23 16:12:43 -08005693 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005694 int rc;
5695
John W. Linville1064e942005-11-10 12:58:24 -08005696 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005697
5698 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5699
5700 return rc;
5701}
5702
5703static int
5704bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5705 u8 *eebuf)
5706{
Michael Chan972ec0d2006-01-23 16:12:43 -08005707 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005708 int rc;
5709
John W. Linville1064e942005-11-10 12:58:24 -08005710 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005711
5712 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5713
5714 return rc;
5715}
5716
5717static int
5718bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5719{
Michael Chan972ec0d2006-01-23 16:12:43 -08005720 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005721
5722 memset(coal, 0, sizeof(struct ethtool_coalesce));
5723
5724 coal->rx_coalesce_usecs = bp->rx_ticks;
5725 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5726 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5727 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5728
5729 coal->tx_coalesce_usecs = bp->tx_ticks;
5730 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5731 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5732 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5733
5734 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5735
5736 return 0;
5737}
5738
5739static int
5740bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5741{
Michael Chan972ec0d2006-01-23 16:12:43 -08005742 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005743
5744 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5745 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5746
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005747 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005748 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5749
5750 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5751 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5752
5753 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5754 if (bp->rx_quick_cons_trip_int > 0xff)
5755 bp->rx_quick_cons_trip_int = 0xff;
5756
5757 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5758 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5759
5760 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5761 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5762
5763 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5764 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5765
5766 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5767 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5768 0xff;
5769
5770 bp->stats_ticks = coal->stats_block_coalesce_usecs;
Michael Chan02537b062007-06-04 21:24:07 -07005771 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5772 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5773 bp->stats_ticks = USEC_PER_SEC;
5774 }
Michael Chanb6016b72005-05-26 13:03:09 -07005775 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5776 bp->stats_ticks &= 0xffff00;
5777
5778 if (netif_running(bp->dev)) {
5779 bnx2_netif_stop(bp);
5780 bnx2_init_nic(bp);
5781 bnx2_netif_start(bp);
5782 }
5783
5784 return 0;
5785}
5786
5787static void
5788bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5789{
Michael Chan972ec0d2006-01-23 16:12:43 -08005790 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005791
Michael Chan13daffa2006-03-20 17:49:20 -08005792 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005793 ering->rx_mini_max_pending = 0;
5794 ering->rx_jumbo_max_pending = 0;
5795
5796 ering->rx_pending = bp->rx_ring_size;
5797 ering->rx_mini_pending = 0;
5798 ering->rx_jumbo_pending = 0;
5799
5800 ering->tx_max_pending = MAX_TX_DESC_CNT;
5801 ering->tx_pending = bp->tx_ring_size;
5802}
5803
5804static int
5805bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5806{
Michael Chan972ec0d2006-01-23 16:12:43 -08005807 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005808
Michael Chan13daffa2006-03-20 17:49:20 -08005809 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005810 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5811 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5812
5813 return -EINVAL;
5814 }
Michael Chan13daffa2006-03-20 17:49:20 -08005815 if (netif_running(bp->dev)) {
5816 bnx2_netif_stop(bp);
5817 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5818 bnx2_free_skbs(bp);
5819 bnx2_free_mem(bp);
5820 }
5821
5822 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005823 bp->tx_ring_size = ering->tx_pending;
5824
5825 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005826 int rc;
5827
5828 rc = bnx2_alloc_mem(bp);
5829 if (rc)
5830 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005831 bnx2_init_nic(bp);
5832 bnx2_netif_start(bp);
5833 }
5834
5835 return 0;
5836}
5837
5838static void
5839bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5840{
Michael Chan972ec0d2006-01-23 16:12:43 -08005841 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005842
5843 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5844 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5845 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5846}
5847
5848static int
5849bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5850{
Michael Chan972ec0d2006-01-23 16:12:43 -08005851 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005852
5853 bp->req_flow_ctrl = 0;
5854 if (epause->rx_pause)
5855 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5856 if (epause->tx_pause)
5857 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5858
5859 if (epause->autoneg) {
5860 bp->autoneg |= AUTONEG_FLOW_CTRL;
5861 }
5862 else {
5863 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5864 }
5865
Michael Chanc770a652005-08-25 15:38:39 -07005866 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005867
Michael Chan0d8a6572007-07-07 22:49:43 -07005868 bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07005869
Michael Chanc770a652005-08-25 15:38:39 -07005870 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005871
5872 return 0;
5873}
5874
5875static u32
5876bnx2_get_rx_csum(struct net_device *dev)
5877{
Michael Chan972ec0d2006-01-23 16:12:43 -08005878 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005879
5880 return bp->rx_csum;
5881}
5882
5883static int
5884bnx2_set_rx_csum(struct net_device *dev, u32 data)
5885{
Michael Chan972ec0d2006-01-23 16:12:43 -08005886 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005887
5888 bp->rx_csum = data;
5889 return 0;
5890}
5891
Michael Chanb11d6212006-06-29 12:31:21 -07005892static int
5893bnx2_set_tso(struct net_device *dev, u32 data)
5894{
Michael Chan4666f872007-05-03 13:22:28 -07005895 struct bnx2 *bp = netdev_priv(dev);
5896
5897 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005898 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005899 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5900 dev->features |= NETIF_F_TSO6;
5901 } else
5902 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5903 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005904 return 0;
5905}
5906
Michael Chancea94db2006-06-12 22:16:13 -07005907#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005908
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005909static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005910 char string[ETH_GSTRING_LEN];
5911} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5912 { "rx_bytes" },
5913 { "rx_error_bytes" },
5914 { "tx_bytes" },
5915 { "tx_error_bytes" },
5916 { "rx_ucast_packets" },
5917 { "rx_mcast_packets" },
5918 { "rx_bcast_packets" },
5919 { "tx_ucast_packets" },
5920 { "tx_mcast_packets" },
5921 { "tx_bcast_packets" },
5922 { "tx_mac_errors" },
5923 { "tx_carrier_errors" },
5924 { "rx_crc_errors" },
5925 { "rx_align_errors" },
5926 { "tx_single_collisions" },
5927 { "tx_multi_collisions" },
5928 { "tx_deferred" },
5929 { "tx_excess_collisions" },
5930 { "tx_late_collisions" },
5931 { "tx_total_collisions" },
5932 { "rx_fragments" },
5933 { "rx_jabbers" },
5934 { "rx_undersize_packets" },
5935 { "rx_oversize_packets" },
5936 { "rx_64_byte_packets" },
5937 { "rx_65_to_127_byte_packets" },
5938 { "rx_128_to_255_byte_packets" },
5939 { "rx_256_to_511_byte_packets" },
5940 { "rx_512_to_1023_byte_packets" },
5941 { "rx_1024_to_1522_byte_packets" },
5942 { "rx_1523_to_9022_byte_packets" },
5943 { "tx_64_byte_packets" },
5944 { "tx_65_to_127_byte_packets" },
5945 { "tx_128_to_255_byte_packets" },
5946 { "tx_256_to_511_byte_packets" },
5947 { "tx_512_to_1023_byte_packets" },
5948 { "tx_1024_to_1522_byte_packets" },
5949 { "tx_1523_to_9022_byte_packets" },
5950 { "rx_xon_frames" },
5951 { "rx_xoff_frames" },
5952 { "tx_xon_frames" },
5953 { "tx_xoff_frames" },
5954 { "rx_mac_ctrl_frames" },
5955 { "rx_filtered_packets" },
5956 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005957 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005958};
5959
5960#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5961
Arjan van de Venf71e1302006-03-03 21:33:57 -05005962static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005963 STATS_OFFSET32(stat_IfHCInOctets_hi),
5964 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5965 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5966 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5967 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5968 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5969 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5970 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5971 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5972 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5973 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005974 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5975 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5976 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5977 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5978 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5979 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5980 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5981 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5982 STATS_OFFSET32(stat_EtherStatsCollisions),
5983 STATS_OFFSET32(stat_EtherStatsFragments),
5984 STATS_OFFSET32(stat_EtherStatsJabbers),
5985 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5986 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5987 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5988 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5989 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5990 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5991 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5992 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5993 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5994 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5995 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5996 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5997 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5998 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5999 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6000 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6001 STATS_OFFSET32(stat_XonPauseFramesReceived),
6002 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6003 STATS_OFFSET32(stat_OutXonSent),
6004 STATS_OFFSET32(stat_OutXoffSent),
6005 STATS_OFFSET32(stat_MacControlFramesReceived),
6006 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6007 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07006008 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07006009};
6010
6011/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6012 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006013 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006014static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07006015 8,0,8,8,8,8,8,8,8,8,
6016 4,0,4,4,4,4,4,4,4,4,
6017 4,4,4,4,4,4,4,4,4,4,
6018 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006019 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07006020};
6021
Michael Chan5b0c76a2005-11-04 08:45:49 -08006022static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6023 8,0,8,8,8,8,8,8,8,8,
6024 4,4,4,4,4,4,4,4,4,4,
6025 4,4,4,4,4,4,4,4,4,4,
6026 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07006027 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08006028};
6029
Michael Chanb6016b72005-05-26 13:03:09 -07006030#define BNX2_NUM_TESTS 6
6031
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006032static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07006033 char string[ETH_GSTRING_LEN];
6034} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6035 { "register_test (offline)" },
6036 { "memory_test (offline)" },
6037 { "loopback_test (offline)" },
6038 { "nvram_test (online)" },
6039 { "interrupt_test (online)" },
6040 { "link_test (online)" },
6041};
6042
6043static int
6044bnx2_self_test_count(struct net_device *dev)
6045{
6046 return BNX2_NUM_TESTS;
6047}
6048
6049static void
6050bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6051{
Michael Chan972ec0d2006-01-23 16:12:43 -08006052 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006053
6054 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08006056 int i;
6057
Michael Chanb6016b72005-05-26 13:03:09 -07006058 bnx2_netif_stop(bp);
6059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6060 bnx2_free_skbs(bp);
6061
6062 if (bnx2_test_registers(bp) != 0) {
6063 buf[0] = 1;
6064 etest->flags |= ETH_TEST_FL_FAILED;
6065 }
6066 if (bnx2_test_memory(bp) != 0) {
6067 buf[1] = 1;
6068 etest->flags |= ETH_TEST_FL_FAILED;
6069 }
Michael Chanbc5a0692006-01-23 16:13:22 -08006070 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07006071 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07006072
6073 if (!netif_running(bp->dev)) {
6074 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6075 }
6076 else {
6077 bnx2_init_nic(bp);
6078 bnx2_netif_start(bp);
6079 }
6080
6081 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08006082 for (i = 0; i < 7; i++) {
6083 if (bp->link_up)
6084 break;
6085 msleep_interruptible(1000);
6086 }
Michael Chanb6016b72005-05-26 13:03:09 -07006087 }
6088
6089 if (bnx2_test_nvram(bp) != 0) {
6090 buf[3] = 1;
6091 etest->flags |= ETH_TEST_FL_FAILED;
6092 }
6093 if (bnx2_test_intr(bp) != 0) {
6094 buf[4] = 1;
6095 etest->flags |= ETH_TEST_FL_FAILED;
6096 }
6097
6098 if (bnx2_test_link(bp) != 0) {
6099 buf[5] = 1;
6100 etest->flags |= ETH_TEST_FL_FAILED;
6101
6102 }
6103}
6104
6105static void
6106bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6107{
6108 switch (stringset) {
6109 case ETH_SS_STATS:
6110 memcpy(buf, bnx2_stats_str_arr,
6111 sizeof(bnx2_stats_str_arr));
6112 break;
6113 case ETH_SS_TEST:
6114 memcpy(buf, bnx2_tests_str_arr,
6115 sizeof(bnx2_tests_str_arr));
6116 break;
6117 }
6118}
6119
6120static int
6121bnx2_get_stats_count(struct net_device *dev)
6122{
6123 return BNX2_NUM_STATS;
6124}
6125
6126static void
6127bnx2_get_ethtool_stats(struct net_device *dev,
6128 struct ethtool_stats *stats, u64 *buf)
6129{
Michael Chan972ec0d2006-01-23 16:12:43 -08006130 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006131 int i;
6132 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006133 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006134
6135 if (hw_stats == NULL) {
6136 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6137 return;
6138 }
6139
Michael Chan5b0c76a2005-11-04 08:45:49 -08006140 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6141 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6142 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6143 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07006144 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006145 else
6146 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07006147
6148 for (i = 0; i < BNX2_NUM_STATS; i++) {
6149 if (stats_len_arr[i] == 0) {
6150 /* skip this counter */
6151 buf[i] = 0;
6152 continue;
6153 }
6154 if (stats_len_arr[i] == 4) {
6155 /* 4-byte counter */
6156 buf[i] = (u64)
6157 *(hw_stats + bnx2_stats_offset_arr[i]);
6158 continue;
6159 }
6160 /* 8-byte counter */
6161 buf[i] = (((u64) *(hw_stats +
6162 bnx2_stats_offset_arr[i])) << 32) +
6163 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6164 }
6165}
6166
6167static int
6168bnx2_phys_id(struct net_device *dev, u32 data)
6169{
Michael Chan972ec0d2006-01-23 16:12:43 -08006170 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006171 int i;
6172 u32 save;
6173
6174 if (data == 0)
6175 data = 2;
6176
6177 save = REG_RD(bp, BNX2_MISC_CFG);
6178 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6179
6180 for (i = 0; i < (data * 2); i++) {
6181 if ((i % 2) == 0) {
6182 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6183 }
6184 else {
6185 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6186 BNX2_EMAC_LED_1000MB_OVERRIDE |
6187 BNX2_EMAC_LED_100MB_OVERRIDE |
6188 BNX2_EMAC_LED_10MB_OVERRIDE |
6189 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6190 BNX2_EMAC_LED_TRAFFIC);
6191 }
6192 msleep_interruptible(500);
6193 if (signal_pending(current))
6194 break;
6195 }
6196 REG_WR(bp, BNX2_EMAC_LED, 0);
6197 REG_WR(bp, BNX2_MISC_CFG, save);
6198 return 0;
6199}
6200
Michael Chan4666f872007-05-03 13:22:28 -07006201static int
6202bnx2_set_tx_csum(struct net_device *dev, u32 data)
6203{
6204 struct bnx2 *bp = netdev_priv(dev);
6205
6206 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6207 return (ethtool_op_set_tx_hw_csum(dev, data));
6208 else
6209 return (ethtool_op_set_tx_csum(dev, data));
6210}
6211
Jeff Garzik7282d492006-09-13 14:30:00 -04006212static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07006213 .get_settings = bnx2_get_settings,
6214 .set_settings = bnx2_set_settings,
6215 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08006216 .get_regs_len = bnx2_get_regs_len,
6217 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07006218 .get_wol = bnx2_get_wol,
6219 .set_wol = bnx2_set_wol,
6220 .nway_reset = bnx2_nway_reset,
6221 .get_link = ethtool_op_get_link,
6222 .get_eeprom_len = bnx2_get_eeprom_len,
6223 .get_eeprom = bnx2_get_eeprom,
6224 .set_eeprom = bnx2_set_eeprom,
6225 .get_coalesce = bnx2_get_coalesce,
6226 .set_coalesce = bnx2_set_coalesce,
6227 .get_ringparam = bnx2_get_ringparam,
6228 .set_ringparam = bnx2_set_ringparam,
6229 .get_pauseparam = bnx2_get_pauseparam,
6230 .set_pauseparam = bnx2_set_pauseparam,
6231 .get_rx_csum = bnx2_get_rx_csum,
6232 .set_rx_csum = bnx2_set_rx_csum,
6233 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07006234 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07006235 .get_sg = ethtool_op_get_sg,
6236 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07006237 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07006238 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07006239 .self_test_count = bnx2_self_test_count,
6240 .self_test = bnx2_self_test,
6241 .get_strings = bnx2_get_strings,
6242 .phys_id = bnx2_phys_id,
6243 .get_stats_count = bnx2_get_stats_count,
6244 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07006245 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07006246};
6247
6248/* Called with rtnl_lock */
6249static int
6250bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6251{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006252 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08006253 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006254 int err;
6255
6256 switch(cmd) {
6257 case SIOCGMIIPHY:
6258 data->phy_id = bp->phy_addr;
6259
6260 /* fallthru */
6261 case SIOCGMIIREG: {
6262 u32 mii_regval;
6263
Michael Chan7b6b8342007-07-07 22:50:15 -07006264 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6265 return -EOPNOTSUPP;
6266
Michael Chandad3e452007-05-03 13:18:03 -07006267 if (!netif_running(dev))
6268 return -EAGAIN;
6269
Michael Chanc770a652005-08-25 15:38:39 -07006270 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006271 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07006272 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006273
6274 data->val_out = mii_regval;
6275
6276 return err;
6277 }
6278
6279 case SIOCSMIIREG:
6280 if (!capable(CAP_NET_ADMIN))
6281 return -EPERM;
6282
Michael Chan7b6b8342007-07-07 22:50:15 -07006283 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6284 return -EOPNOTSUPP;
6285
Michael Chandad3e452007-05-03 13:18:03 -07006286 if (!netif_running(dev))
6287 return -EAGAIN;
6288
Michael Chanc770a652005-08-25 15:38:39 -07006289 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006290 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07006291 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006292
6293 return err;
6294
6295 default:
6296 /* do nothing */
6297 break;
6298 }
6299 return -EOPNOTSUPP;
6300}
6301
6302/* Called with rtnl_lock */
6303static int
6304bnx2_change_mac_addr(struct net_device *dev, void *p)
6305{
6306 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08006307 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006308
Michael Chan73eef4c2005-08-25 15:39:15 -07006309 if (!is_valid_ether_addr(addr->sa_data))
6310 return -EINVAL;
6311
Michael Chanb6016b72005-05-26 13:03:09 -07006312 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6313 if (netif_running(dev))
6314 bnx2_set_mac_addr(bp);
6315
6316 return 0;
6317}
6318
6319/* Called with rtnl_lock */
6320static int
6321bnx2_change_mtu(struct net_device *dev, int new_mtu)
6322{
Michael Chan972ec0d2006-01-23 16:12:43 -08006323 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006324
6325 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6326 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6327 return -EINVAL;
6328
6329 dev->mtu = new_mtu;
6330 if (netif_running(dev)) {
6331 bnx2_netif_stop(bp);
6332
6333 bnx2_init_nic(bp);
6334
6335 bnx2_netif_start(bp);
6336 }
6337 return 0;
6338}
6339
6340#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6341static void
6342poll_bnx2(struct net_device *dev)
6343{
Michael Chan972ec0d2006-01-23 16:12:43 -08006344 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006345
6346 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006347 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006348 enable_irq(bp->pdev->irq);
6349}
6350#endif
6351
Michael Chan253c8b72007-01-08 19:56:01 -08006352static void __devinit
6353bnx2_get_5709_media(struct bnx2 *bp)
6354{
6355 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6356 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6357 u32 strap;
6358
6359 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6360 return;
6361 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6362 bp->phy_flags |= PHY_SERDES_FLAG;
6363 return;
6364 }
6365
6366 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6367 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6368 else
6369 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6370
6371 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6372 switch (strap) {
6373 case 0x4:
6374 case 0x5:
6375 case 0x6:
6376 bp->phy_flags |= PHY_SERDES_FLAG;
6377 return;
6378 }
6379 } else {
6380 switch (strap) {
6381 case 0x1:
6382 case 0x2:
6383 case 0x4:
6384 bp->phy_flags |= PHY_SERDES_FLAG;
6385 return;
6386 }
6387 }
6388}
6389
Michael Chan883e5152007-05-03 13:25:11 -07006390static void __devinit
6391bnx2_get_pci_speed(struct bnx2 *bp)
6392{
6393 u32 reg;
6394
6395 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6396 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6397 u32 clkreg;
6398
6399 bp->flags |= PCIX_FLAG;
6400
6401 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6402
6403 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6404 switch (clkreg) {
6405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6406 bp->bus_speed_mhz = 133;
6407 break;
6408
6409 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6410 bp->bus_speed_mhz = 100;
6411 break;
6412
6413 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6414 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6415 bp->bus_speed_mhz = 66;
6416 break;
6417
6418 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6419 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6420 bp->bus_speed_mhz = 50;
6421 break;
6422
6423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6424 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6425 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6426 bp->bus_speed_mhz = 33;
6427 break;
6428 }
6429 }
6430 else {
6431 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6432 bp->bus_speed_mhz = 66;
6433 else
6434 bp->bus_speed_mhz = 33;
6435 }
6436
6437 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6438 bp->flags |= PCI_32BIT_FLAG;
6439
6440}
6441
Michael Chanb6016b72005-05-26 13:03:09 -07006442static int __devinit
6443bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6444{
6445 struct bnx2 *bp;
6446 unsigned long mem_len;
6447 int rc;
6448 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006449 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006450
6451 SET_MODULE_OWNER(dev);
6452 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006453 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006454
6455 bp->flags = 0;
6456 bp->phy_flags = 0;
6457
6458 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6459 rc = pci_enable_device(pdev);
6460 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006461 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006462 goto err_out;
6463 }
6464
6465 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006466 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006467 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006468 rc = -ENODEV;
6469 goto err_out_disable;
6470 }
6471
6472 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6473 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006474 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006475 goto err_out_disable;
6476 }
6477
6478 pci_set_master(pdev);
6479
6480 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6481 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006482 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006483 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006484 rc = -EIO;
6485 goto err_out_release;
6486 }
6487
Michael Chanb6016b72005-05-26 13:03:09 -07006488 bp->dev = dev;
6489 bp->pdev = pdev;
6490
6491 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006492 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006493 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006494
6495 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006496 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006497 dev->mem_end = dev->mem_start + mem_len;
6498 dev->irq = pdev->irq;
6499
6500 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6501
6502 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006503 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006504 rc = -ENOMEM;
6505 goto err_out_release;
6506 }
6507
6508 /* Configure byte swap and enable write to the reg_window registers.
6509 * Rely on CPU to do target byte swapping on big endian systems
6510 * The chip's target access swapping will not swap all accesses
6511 */
6512 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6513 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6514 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6515
Pavel Machek829ca9a2005-09-03 15:56:56 -07006516 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006517
6518 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6519
Michael Chan883e5152007-05-03 13:25:11 -07006520 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6521 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6522 dev_err(&pdev->dev,
6523 "Cannot find PCIE capability, aborting.\n");
6524 rc = -EIO;
6525 goto err_out_unmap;
6526 }
6527 bp->flags |= PCIE_FLAG;
6528 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006529 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6530 if (bp->pcix_cap == 0) {
6531 dev_err(&pdev->dev,
6532 "Cannot find PCIX capability, aborting.\n");
6533 rc = -EIO;
6534 goto err_out_unmap;
6535 }
6536 }
6537
Michael Chan8e6a72c2007-05-03 13:24:48 -07006538 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6539 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6540 bp->flags |= MSI_CAP_FLAG;
6541 }
6542
Michael Chan40453c82007-05-03 13:19:18 -07006543 /* 5708 cannot support DMA addresses > 40-bit. */
6544 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6545 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6546 else
6547 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6548
6549 /* Configure DMA attributes. */
6550 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6551 dev->features |= NETIF_F_HIGHDMA;
6552 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6553 if (rc) {
6554 dev_err(&pdev->dev,
6555 "pci_set_consistent_dma_mask failed, aborting.\n");
6556 goto err_out_unmap;
6557 }
6558 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6559 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6560 goto err_out_unmap;
6561 }
6562
Michael Chan883e5152007-05-03 13:25:11 -07006563 if (!(bp->flags & PCIE_FLAG))
6564 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006565
6566 /* 5706A0 may falsely detect SERR and PERR. */
6567 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6568 reg = REG_RD(bp, PCI_COMMAND);
6569 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6570 REG_WR(bp, PCI_COMMAND, reg);
6571 }
6572 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6573 !(bp->flags & PCIX_FLAG)) {
6574
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006575 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006576 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006577 goto err_out_unmap;
6578 }
6579
6580 bnx2_init_nvram(bp);
6581
Michael Chane3648b32005-11-04 08:51:21 -08006582 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6583
6584 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006585 BNX2_SHM_HDR_SIGNATURE_SIG) {
6586 u32 off = PCI_FUNC(pdev->devfn) << 2;
6587
6588 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6589 } else
Michael Chane3648b32005-11-04 08:51:21 -08006590 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6591
Michael Chanb6016b72005-05-26 13:03:09 -07006592 /* Get the permanent MAC address. First we need to make sure the
6593 * firmware is actually running.
6594 */
Michael Chane3648b32005-11-04 08:51:21 -08006595 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006596
6597 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6598 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006599 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006600 rc = -ENODEV;
6601 goto err_out_unmap;
6602 }
6603
Michael Chane3648b32005-11-04 08:51:21 -08006604 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006605
Michael Chane3648b32005-11-04 08:51:21 -08006606 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006607 bp->mac_addr[0] = (u8) (reg >> 8);
6608 bp->mac_addr[1] = (u8) reg;
6609
Michael Chane3648b32005-11-04 08:51:21 -08006610 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006611 bp->mac_addr[2] = (u8) (reg >> 24);
6612 bp->mac_addr[3] = (u8) (reg >> 16);
6613 bp->mac_addr[4] = (u8) (reg >> 8);
6614 bp->mac_addr[5] = (u8) reg;
6615
6616 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006617 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006618
6619 bp->rx_csum = 1;
6620
6621 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6622
6623 bp->tx_quick_cons_trip_int = 20;
6624 bp->tx_quick_cons_trip = 20;
6625 bp->tx_ticks_int = 80;
6626 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006627
Michael Chanb6016b72005-05-26 13:03:09 -07006628 bp->rx_quick_cons_trip_int = 6;
6629 bp->rx_quick_cons_trip = 6;
6630 bp->rx_ticks_int = 18;
6631 bp->rx_ticks = 18;
6632
6633 bp->stats_ticks = 1000000 & 0xffff00;
6634
6635 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006636 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006637
Michael Chan5b0c76a2005-11-04 08:45:49 -08006638 bp->phy_addr = 1;
6639
Michael Chanb6016b72005-05-26 13:03:09 -07006640 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006641 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6642 bnx2_get_5709_media(bp);
6643 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006644 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006645
Michael Chan0d8a6572007-07-07 22:49:43 -07006646 bp->phy_port = PORT_TP;
Michael Chanbac0dff2006-11-19 14:15:05 -08006647 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07006648 bp->phy_port = PORT_FIBRE;
Michael Chanb6016b72005-05-26 13:03:09 -07006649 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006650 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006651 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006652 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006653 BNX2_SHARED_HW_CFG_CONFIG);
6654 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6655 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6656 }
Michael Chan0d8a6572007-07-07 22:49:43 -07006657 bnx2_init_remote_phy(bp);
6658
Michael Chan261dd5c2007-01-08 19:55:46 -08006659 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6660 CHIP_NUM(bp) == CHIP_NUM_5708)
6661 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006662 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6663 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006664
Michael Chan16088272006-06-12 22:16:43 -07006665 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6666 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6667 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006668 bp->flags |= NO_WOL_FLAG;
6669
Michael Chanb6016b72005-05-26 13:03:09 -07006670 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6671 bp->tx_quick_cons_trip_int =
6672 bp->tx_quick_cons_trip;
6673 bp->tx_ticks_int = bp->tx_ticks;
6674 bp->rx_quick_cons_trip_int =
6675 bp->rx_quick_cons_trip;
6676 bp->rx_ticks_int = bp->rx_ticks;
6677 bp->comp_prod_trip_int = bp->comp_prod_trip;
6678 bp->com_ticks_int = bp->com_ticks;
6679 bp->cmd_ticks_int = bp->cmd_ticks;
6680 }
6681
Michael Chanf9317a42006-09-29 17:06:23 -07006682 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6683 *
6684 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6685 * with byte enables disabled on the unused 32-bit word. This is legal
6686 * but causes problems on the AMD 8132 which will eventually stop
6687 * responding after a while.
6688 *
6689 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006690 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006691 */
6692 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6693 struct pci_dev *amd_8132 = NULL;
6694
6695 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6696 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6697 amd_8132))) {
6698 u8 rev;
6699
6700 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6701 if (rev >= 0x10 && rev <= 0x13) {
6702 disable_msi = 1;
6703 pci_dev_put(amd_8132);
6704 break;
6705 }
6706 }
6707 }
6708
Michael Chandeaf3912007-07-07 22:48:00 -07006709 bnx2_set_default_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006710 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6711
Michael Chancd339a02005-08-25 15:35:24 -07006712 init_timer(&bp->timer);
6713 bp->timer.expires = RUN_AT(bp->timer_interval);
6714 bp->timer.data = (unsigned long) bp;
6715 bp->timer.function = bnx2_timer;
6716
Michael Chanb6016b72005-05-26 13:03:09 -07006717 return 0;
6718
6719err_out_unmap:
6720 if (bp->regview) {
6721 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006722 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006723 }
6724
6725err_out_release:
6726 pci_release_regions(pdev);
6727
6728err_out_disable:
6729 pci_disable_device(pdev);
6730 pci_set_drvdata(pdev, NULL);
6731
6732err_out:
6733 return rc;
6734}
6735
Michael Chan883e5152007-05-03 13:25:11 -07006736static char * __devinit
6737bnx2_bus_string(struct bnx2 *bp, char *str)
6738{
6739 char *s = str;
6740
6741 if (bp->flags & PCIE_FLAG) {
6742 s += sprintf(s, "PCI Express");
6743 } else {
6744 s += sprintf(s, "PCI");
6745 if (bp->flags & PCIX_FLAG)
6746 s += sprintf(s, "-X");
6747 if (bp->flags & PCI_32BIT_FLAG)
6748 s += sprintf(s, " 32-bit");
6749 else
6750 s += sprintf(s, " 64-bit");
6751 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6752 }
6753 return str;
6754}
6755
Michael Chanb6016b72005-05-26 13:03:09 -07006756static int __devinit
6757bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6758{
6759 static int version_printed = 0;
6760 struct net_device *dev = NULL;
6761 struct bnx2 *bp;
6762 int rc, i;
Michael Chan883e5152007-05-03 13:25:11 -07006763 char str[40];
Michael Chanb6016b72005-05-26 13:03:09 -07006764
6765 if (version_printed++ == 0)
6766 printk(KERN_INFO "%s", version);
6767
6768 /* dev zeroed in init_etherdev */
6769 dev = alloc_etherdev(sizeof(*bp));
6770
6771 if (!dev)
6772 return -ENOMEM;
6773
6774 rc = bnx2_init_board(pdev, dev);
6775 if (rc < 0) {
6776 free_netdev(dev);
6777 return rc;
6778 }
6779
6780 dev->open = bnx2_open;
6781 dev->hard_start_xmit = bnx2_start_xmit;
6782 dev->stop = bnx2_close;
6783 dev->get_stats = bnx2_get_stats;
6784 dev->set_multicast_list = bnx2_set_rx_mode;
6785 dev->do_ioctl = bnx2_ioctl;
6786 dev->set_mac_address = bnx2_change_mac_addr;
6787 dev->change_mtu = bnx2_change_mtu;
6788 dev->tx_timeout = bnx2_tx_timeout;
6789 dev->watchdog_timeo = TX_TIMEOUT;
6790#ifdef BCM_VLAN
6791 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07006792#endif
6793 dev->poll = bnx2_poll;
6794 dev->ethtool_ops = &bnx2_ethtool_ops;
6795 dev->weight = 64;
6796
Michael Chan972ec0d2006-01-23 16:12:43 -08006797 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006798
6799#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6800 dev->poll_controller = poll_bnx2;
6801#endif
6802
Michael Chan1b2f9222007-05-03 13:20:19 -07006803 pci_set_drvdata(pdev, dev);
6804
6805 memcpy(dev->dev_addr, bp->mac_addr, 6);
6806 memcpy(dev->perm_addr, bp->mac_addr, 6);
6807 bp->name = board_info[ent->driver_data].name;
6808
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006809 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan4666f872007-05-03 13:22:28 -07006810 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006811 dev->features |= NETIF_F_IPV6_CSUM;
6812
Michael Chan1b2f9222007-05-03 13:20:19 -07006813#ifdef BCM_VLAN
6814 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6815#endif
6816 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006817 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6818 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006819
Michael Chanb6016b72005-05-26 13:03:09 -07006820 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006821 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006822 if (bp->regview)
6823 iounmap(bp->regview);
6824 pci_release_regions(pdev);
6825 pci_disable_device(pdev);
6826 pci_set_drvdata(pdev, NULL);
6827 free_netdev(dev);
6828 return rc;
6829 }
6830
Michael Chan883e5152007-05-03 13:25:11 -07006831 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Michael Chanb6016b72005-05-26 13:03:09 -07006832 "IRQ %d, ",
6833 dev->name,
6834 bp->name,
6835 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6836 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07006837 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07006838 dev->base_addr,
6839 bp->pdev->irq);
6840
6841 printk("node addr ");
6842 for (i = 0; i < 6; i++)
6843 printk("%2.2x", dev->dev_addr[i]);
6844 printk("\n");
6845
Michael Chanb6016b72005-05-26 13:03:09 -07006846 return 0;
6847}
6848
6849static void __devexit
6850bnx2_remove_one(struct pci_dev *pdev)
6851{
6852 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006853 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006854
Michael Chanafdc08b2005-08-25 15:34:29 -07006855 flush_scheduled_work();
6856
Michael Chanb6016b72005-05-26 13:03:09 -07006857 unregister_netdev(dev);
6858
6859 if (bp->regview)
6860 iounmap(bp->regview);
6861
6862 free_netdev(dev);
6863 pci_release_regions(pdev);
6864 pci_disable_device(pdev);
6865 pci_set_drvdata(pdev, NULL);
6866}
6867
6868static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006869bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006870{
6871 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006872 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006873 u32 reset_code;
6874
6875 if (!netif_running(dev))
6876 return 0;
6877
Michael Chan1d60290f2006-03-20 17:50:08 -08006878 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006879 bnx2_netif_stop(bp);
6880 netif_device_detach(dev);
6881 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006882 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006883 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006884 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006885 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6886 else
6887 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6888 bnx2_reset_chip(bp, reset_code);
6889 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006890 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006891 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006892 return 0;
6893}
6894
6895static int
6896bnx2_resume(struct pci_dev *pdev)
6897{
6898 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006899 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006900
6901 if (!netif_running(dev))
6902 return 0;
6903
Michael Chan30c517b2007-05-03 13:20:40 -07006904 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006905 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006906 netif_device_attach(dev);
6907 bnx2_init_nic(bp);
6908 bnx2_netif_start(bp);
6909 return 0;
6910}
6911
6912static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006913 .name = DRV_MODULE_NAME,
6914 .id_table = bnx2_pci_tbl,
6915 .probe = bnx2_init_one,
6916 .remove = __devexit_p(bnx2_remove_one),
6917 .suspend = bnx2_suspend,
6918 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006919};
6920
6921static int __init bnx2_init(void)
6922{
Jeff Garzik29917622006-08-19 17:48:59 -04006923 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006924}
6925
6926static void __exit bnx2_cleanup(void)
6927{
6928 pci_unregister_driver(&bnx2_pci_driver);
6929}
6930
6931module_init(bnx2_init);
6932module_exit(bnx2_cleanup);
6933
6934
6935