blob: 2633579b8c43c5bc68e018607e6d84cdd033bb3b [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
56
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chanf9317a42006-09-29 17:06:23 -070059#define DRV_MODULE_VERSION "1.4.45"
60#define DRV_MODULE_RELDATE "September 29, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -0700118 { 0, }
119};
120
121static struct flash_spec flash_table[] =
122{
123 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700206};
207
208MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
Michael Chane89bbf12005-08-25 15:36:58 -0700210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{
Michael Chan2f8af122006-08-15 01:39:10 -0700212 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700213
Michael Chan2f8af122006-08-15 01:39:10 -0700214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
Michael Chane89bbf12005-08-25 15:36:58 -0700216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219}
220
Michael Chanb6016b72005-05-26 13:03:09 -0700221static u32
222bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223{
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226}
227
228static void
229bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230{
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233}
234
235static void
236bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237{
238 offset += cid_addr;
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
241}
242
243static int
244bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245{
246 u32 val1;
247 int i, ret;
248
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256 udelay(40);
257 }
258
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264 for (i = 0; i < 50; i++) {
265 udelay(10);
266
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269 udelay(5);
270
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274 break;
275 }
276 }
277
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279 *val = 0x0;
280 ret = -EBUSY;
281 }
282 else {
283 *val = val1;
284 ret = 0;
285 }
286
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294 udelay(40);
295 }
296
297 return ret;
298}
299
300static int
301bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302{
303 u32 val1;
304 int i, ret;
305
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313 udelay(40);
314 }
315
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400320
Michael Chanb6016b72005-05-26 13:03:09 -0700321 for (i = 0; i < 50; i++) {
322 udelay(10);
323
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326 udelay(5);
327 break;
328 }
329 }
330
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332 ret = -EBUSY;
333 else
334 ret = 0;
335
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343 udelay(40);
344 }
345
346 return ret;
347}
348
349static void
350bnx2_disable_int(struct bnx2 *bp)
351{
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355}
356
357static void
358bnx2_enable_int(struct bnx2 *bp)
359{
Michael Chanb6016b72005-05-26 13:03:09 -0700360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
Michael Chanbf5295b2006-03-23 01:11:56 -0800367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700368}
369
370static void
371bnx2_disable_int_sync(struct bnx2 *bp)
372{
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
376}
377
378static void
379bnx2_netif_stop(struct bnx2 *bp)
380{
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386 }
387}
388
389static void
390bnx2_netif_start(struct bnx2 *bp)
391{
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
396 bnx2_enable_int(bp);
397 }
398 }
399}
400
401static void
402bnx2_free_mem(struct bnx2 *bp)
403{
Michael Chan13daffa2006-03-20 17:49:20 -0800404 int i;
405
Michael Chanb6016b72005-05-26 13:03:09 -0700406 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800407 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800410 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700411 }
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
417 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_ring[i],
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700427 }
Michael Chan13daffa2006-03-20 17:49:20 -0800428 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400429 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700430}
431
432static int
433bnx2_alloc_mem(struct bnx2 *bp)
434{
Michael Chan0f31f992006-03-23 01:12:38 -0800435 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800436
Michael Chan0f31f992006-03-23 01:12:38 -0800437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700439 if (bp->tx_buf_ring == NULL)
440 return -ENOMEM;
441
Michael Chanb6016b72005-05-26 13:03:09 -0700442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
444 TX_DESC_CNT,
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
447 goto alloc_mem_err;
448
Michael Chan13daffa2006-03-20 17:49:20 -0800449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->rx_buf_ring == NULL)
452 goto alloc_mem_err;
453
Michael Chan13daffa2006-03-20 17:49:20 -0800454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455 bp->rx_max_ring);
456
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
463 goto alloc_mem_err;
464
465 }
Michael Chanb6016b72005-05-26 13:03:09 -0700466
Michael Chan0f31f992006-03-23 01:12:38 -0800467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
471
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
475 goto alloc_mem_err;
476
Michael Chan0f31f992006-03-23 01:12:38 -0800477 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700478
Michael Chan0f31f992006-03-23 01:12:38 -0800479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700483
484 return 0;
485
486alloc_mem_err:
487 bnx2_free_mem(bp);
488 return -ENOMEM;
489}
490
491static void
Michael Chane3648b32005-11-04 08:51:21 -0800492bnx2_report_fw_link(struct bnx2 *bp)
493{
494 u32 fw_link_status = 0;
495
496 if (bp->link_up) {
497 u32 bmsr;
498
499 switch (bp->line_speed) {
500 case SPEED_10:
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 else
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
505 break;
506 case SPEED_100:
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 else
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
511 break;
512 case SPEED_1000:
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 else
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517 break;
518 case SPEED_2500:
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 else
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 break;
524 }
525
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528 if (bp->autoneg) {
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 else
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 }
540 }
541 else
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545}
546
547static void
Michael Chanb6016b72005-05-26 13:03:09 -0700548bnx2_report_link(struct bnx2 *bp)
549{
550 if (bp->link_up) {
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554 printk("%d Mbps ", bp->line_speed);
555
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
558 else
559 printk("half duplex");
560
561 if (bp->flow_ctrl) {
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
566 }
567 else {
568 printk(", transmit ");
569 }
570 printk("flow control ON");
571 }
572 printk("\n");
573 }
574 else {
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577 }
Michael Chane3648b32005-11-04 08:51:21 -0800578
579 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700580}
581
582static void
583bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584{
585 u32 local_adv, remote_adv;
586
587 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
593 }
594 return;
595 }
596
597 if (bp->duplex != DUPLEX_FULL) {
598 return;
599 }
600
Michael Chan5b0c76a2005-11-04 08:45:49 -0800601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603 u32 val;
604
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
610 return;
611 }
612
Michael Chanb6016b72005-05-26 13:03:09 -0700613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
619
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
631 }
632
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 }
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
641 }
642 }
643 else {
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 }
647 }
648 }
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653 bp->flow_ctrl = FLOW_CTRL_TX;
654 }
655 }
656}
657
658static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800659bnx2_5708s_linkup(struct bnx2 *bp)
660{
661 u32 val;
662
663 bp->link_up = 1;
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
674 break;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
677 break;
678 }
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
681 else
682 bp->duplex = DUPLEX_HALF;
683
684 return 0;
685}
686
687static int
688bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700689{
690 u32 bmcr, local_adv, remote_adv, common;
691
692 bp->link_up = 1;
693 bp->line_speed = SPEED_1000;
694
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
698 }
699 else {
700 bp->duplex = DUPLEX_HALF;
701 }
702
703 if (!(bmcr & BMCR_ANENABLE)) {
704 return 0;
705 }
706
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
715 }
716 else {
717 bp->duplex = DUPLEX_HALF;
718 }
719 }
720
721 return 0;
722}
723
724static int
725bnx2_copper_linkup(struct bnx2 *bp)
726{
727 u32 bmcr;
728
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
732
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
740 }
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
744 }
745 else {
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
753 }
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
757 }
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
761 }
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
765 }
766 else {
767 bp->line_speed = 0;
768 bp->link_up = 0;
769 }
770 }
771 }
772 else {
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
775 }
776 else {
777 bp->line_speed = SPEED_10;
778 }
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
781 }
782 else {
783 bp->duplex = DUPLEX_HALF;
784 }
785 }
786
787 return 0;
788}
789
790static int
791bnx2_set_mac_link(struct bnx2 *bp)
792{
793 u32 val;
794
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799 }
800
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
803
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 BNX2_EMAC_MODE_25G);
Michael Chanb6016b72005-05-26 13:03:09 -0700807
808 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800809 switch (bp->line_speed) {
810 case SPEED_10:
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
813 break;
814 }
815 /* fall through */
816 case SPEED_100:
817 val |= BNX2_EMAC_MODE_PORT_MII;
818 break;
819 case SPEED_2500:
820 val |= BNX2_EMAC_MODE_25G;
821 /* fall through */
822 case SPEED_1000:
823 val |= BNX2_EMAC_MODE_PORT_GMII;
824 break;
825 }
Michael Chanb6016b72005-05-26 13:03:09 -0700826 }
827 else {
828 val |= BNX2_EMAC_MODE_PORT_GMII;
829 }
830
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
835
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854 return 0;
855}
856
857static int
858bnx2_set_link(struct bnx2 *bp)
859{
860 u32 bmsr;
861 u8 link_up;
862
Michael Chan80be4432006-11-19 14:07:28 -0800863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700864 bp->link_up = 1;
865 return 0;
866 }
867
868 link_up = bp->link_up;
869
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875 u32 val;
876
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
880 else
881 bmsr &= ~BMSR_LSTATUS;
882 }
883
884 if (bmsr & BMSR_LSTATUS) {
885 bp->link_up = 1;
886
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700892 }
893 else {
894 bnx2_copper_linkup(bp);
895 }
896 bnx2_resolve_flow_ctrl(bp);
897 }
898 else {
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
901
902 u32 bmcr;
903
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
908 BMCR_ANENABLE);
909 }
910 }
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
912 bp->link_up = 0;
913 }
914
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
917 }
918
919 bnx2_set_mac_link(bp);
920
921 return 0;
922}
923
924static int
925bnx2_reset_phy(struct bnx2 *bp)
926{
927 int i;
928 u32 reg;
929
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931
932#define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
934 udelay(10);
935
936 bnx2_read_phy(bp, MII_BMCR, &reg);
937 if (!(reg & BMCR_RESET)) {
938 udelay(20);
939 break;
940 }
941 }
942 if (i == PHY_RESET_MAX_WAIT) {
943 return -EBUSY;
944 }
945 return 0;
946}
947
948static u32
949bnx2_phy_get_pause_adv(struct bnx2 *bp)
950{
951 u32 adv = 0;
952
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
958 }
959 else {
960 adv = ADVERTISE_PAUSE_CAP;
961 }
962 }
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
966 }
967 else {
968 adv = ADVERTISE_PAUSE_ASYM;
969 }
970 }
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
974 }
975 else {
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
977 }
978 }
979 return adv;
980}
981
982static int
983bnx2_setup_serdes_phy(struct bnx2 *bp)
984{
Michael Chan5b0c76a2005-11-04 08:45:49 -0800985 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -0700986 u32 new_adv = 0;
987
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
989 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800990 int force_link_down = 0;
991
Michael Chan80be4432006-11-19 14:07:28 -0800992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
994
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1005 }
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1012 }
1013 }
1014
Michael Chanb6016b72005-05-26 13:03:09 -07001015 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001016 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001017 new_bmcr |= BMCR_FULLDPLX;
1018 }
1019 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001020 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001021 new_bmcr &= ~BMCR_FULLDPLX;
1022 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001023 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001024 /* Force a link down visible on the other side */
1025 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1031
1032 bp->link_up = 0;
1033 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001035 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001036 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1039 }
1040 return 0;
1041 }
1042
Michael Chan5b0c76a2005-11-04 08:45:49 -08001043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047 }
1048
Michael Chanb6016b72005-05-26 13:03:09 -07001049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1051
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1053
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1056
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1060 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001062 spin_unlock_bh(&bp->phy_lock);
1063 msleep(20);
1064 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001065 }
1066
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1069 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001070 /* Speed up link-up time when the link partner
1071 * does not autonegotiate which is very common
1072 * in blade servers. Some blade servers use
1073 * IPMI for kerboard input and it's important
1074 * to minimize link disruptions. Autoneg. involves
1075 * exchanging base pages plus 3 next pages and
1076 * normally completes in about 120 msec.
1077 */
1078 bp->current_interval = SERDES_AN_TIMEOUT;
1079 bp->serdes_an_pending = 1;
1080 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001081 }
1082
1083 return 0;
1084}
1085
1086#define ETHTOOL_ALL_FIBRE_SPEED \
1087 (ADVERTISED_1000baseT_Full)
1088
1089#define ETHTOOL_ALL_COPPER_SPEED \
1090 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1091 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1092 ADVERTISED_1000baseT_Full)
1093
1094#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1095 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001096
Michael Chanb6016b72005-05-26 13:03:09 -07001097#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1098
1099static int
1100bnx2_setup_copper_phy(struct bnx2 *bp)
1101{
1102 u32 bmcr;
1103 u32 new_bmcr;
1104
1105 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1106
1107 if (bp->autoneg & AUTONEG_SPEED) {
1108 u32 adv_reg, adv1000_reg;
1109 u32 new_adv_reg = 0;
1110 u32 new_adv1000_reg = 0;
1111
1112 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1113 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1114 ADVERTISE_PAUSE_ASYM);
1115
1116 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1117 adv1000_reg &= PHY_ALL_1000_SPEED;
1118
1119 if (bp->advertising & ADVERTISED_10baseT_Half)
1120 new_adv_reg |= ADVERTISE_10HALF;
1121 if (bp->advertising & ADVERTISED_10baseT_Full)
1122 new_adv_reg |= ADVERTISE_10FULL;
1123 if (bp->advertising & ADVERTISED_100baseT_Half)
1124 new_adv_reg |= ADVERTISE_100HALF;
1125 if (bp->advertising & ADVERTISED_100baseT_Full)
1126 new_adv_reg |= ADVERTISE_100FULL;
1127 if (bp->advertising & ADVERTISED_1000baseT_Full)
1128 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001129
Michael Chanb6016b72005-05-26 13:03:09 -07001130 new_adv_reg |= ADVERTISE_CSMA;
1131
1132 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1133
1134 if ((adv1000_reg != new_adv1000_reg) ||
1135 (adv_reg != new_adv_reg) ||
1136 ((bmcr & BMCR_ANENABLE) == 0)) {
1137
1138 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1139 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1140 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1141 BMCR_ANENABLE);
1142 }
1143 else if (bp->link_up) {
1144 /* Flow ctrl may have changed from auto to forced */
1145 /* or vice-versa. */
1146
1147 bnx2_resolve_flow_ctrl(bp);
1148 bnx2_set_mac_link(bp);
1149 }
1150 return 0;
1151 }
1152
1153 new_bmcr = 0;
1154 if (bp->req_line_speed == SPEED_100) {
1155 new_bmcr |= BMCR_SPEED100;
1156 }
1157 if (bp->req_duplex == DUPLEX_FULL) {
1158 new_bmcr |= BMCR_FULLDPLX;
1159 }
1160 if (new_bmcr != bmcr) {
1161 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001165
Michael Chanb6016b72005-05-26 13:03:09 -07001166 if (bmsr & BMSR_LSTATUS) {
1167 /* Force link down */
1168 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001169 spin_unlock_bh(&bp->phy_lock);
1170 msleep(50);
1171 spin_lock_bh(&bp->phy_lock);
1172
1173 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1174 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001175 }
1176
1177 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1178
1179 /* Normally, the new speed is setup after the link has
1180 * gone down and up again. In some cases, link will not go
1181 * down so we need to set up the new speed here.
1182 */
1183 if (bmsr & BMSR_LSTATUS) {
1184 bp->line_speed = bp->req_line_speed;
1185 bp->duplex = bp->req_duplex;
1186 bnx2_resolve_flow_ctrl(bp);
1187 bnx2_set_mac_link(bp);
1188 }
1189 }
1190 return 0;
1191}
1192
1193static int
1194bnx2_setup_phy(struct bnx2 *bp)
1195{
1196 if (bp->loopback == MAC_LOOPBACK)
1197 return 0;
1198
1199 if (bp->phy_flags & PHY_SERDES_FLAG) {
1200 return (bnx2_setup_serdes_phy(bp));
1201 }
1202 else {
1203 return (bnx2_setup_copper_phy(bp));
1204 }
1205}
1206
1207static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001208bnx2_init_5708s_phy(struct bnx2 *bp)
1209{
1210 u32 val;
1211
1212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1213 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1214 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1215
1216 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1217 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1218 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1219
1220 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1221 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1222 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1223
1224 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1225 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1226 val |= BCM5708S_UP1_2G5;
1227 bnx2_write_phy(bp, BCM5708S_UP1, val);
1228 }
1229
1230 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001231 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1232 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001233 /* increase tx signal amplitude */
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1235 BCM5708S_BLK_ADDR_TX_MISC);
1236 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1237 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1238 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1239 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1240 }
1241
Michael Chane3648b32005-11-04 08:51:21 -08001242 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001243 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1244
1245 if (val) {
1246 u32 is_backplane;
1247
Michael Chane3648b32005-11-04 08:51:21 -08001248 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001249 BNX2_SHARED_HW_CFG_CONFIG);
1250 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1252 BCM5708S_BLK_ADDR_TX_MISC);
1253 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1255 BCM5708S_BLK_ADDR_DIG);
1256 }
1257 }
1258 return 0;
1259}
1260
1261static int
1262bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001263{
1264 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1265
1266 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1267 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1268 }
1269
1270 if (bp->dev->mtu > 1500) {
1271 u32 val;
1272
1273 /* Set extended packet length bit */
1274 bnx2_write_phy(bp, 0x18, 0x7);
1275 bnx2_read_phy(bp, 0x18, &val);
1276 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1277
1278 bnx2_write_phy(bp, 0x1c, 0x6c00);
1279 bnx2_read_phy(bp, 0x1c, &val);
1280 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1281 }
1282 else {
1283 u32 val;
1284
1285 bnx2_write_phy(bp, 0x18, 0x7);
1286 bnx2_read_phy(bp, 0x18, &val);
1287 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1288
1289 bnx2_write_phy(bp, 0x1c, 0x6c00);
1290 bnx2_read_phy(bp, 0x1c, &val);
1291 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1292 }
1293
1294 return 0;
1295}
1296
1297static int
1298bnx2_init_copper_phy(struct bnx2 *bp)
1299{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001300 u32 val;
1301
Michael Chanb6016b72005-05-26 13:03:09 -07001302 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1303
1304 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1305 bnx2_write_phy(bp, 0x18, 0x0c00);
1306 bnx2_write_phy(bp, 0x17, 0x000a);
1307 bnx2_write_phy(bp, 0x15, 0x310b);
1308 bnx2_write_phy(bp, 0x17, 0x201f);
1309 bnx2_write_phy(bp, 0x15, 0x9506);
1310 bnx2_write_phy(bp, 0x17, 0x401f);
1311 bnx2_write_phy(bp, 0x15, 0x14e2);
1312 bnx2_write_phy(bp, 0x18, 0x0400);
1313 }
1314
1315 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, val | 0x4000);
1320
1321 bnx2_read_phy(bp, 0x10, &val);
1322 bnx2_write_phy(bp, 0x10, val | 0x1);
1323 }
1324 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001325 bnx2_write_phy(bp, 0x18, 0x7);
1326 bnx2_read_phy(bp, 0x18, &val);
1327 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1328
1329 bnx2_read_phy(bp, 0x10, &val);
1330 bnx2_write_phy(bp, 0x10, val & ~0x1);
1331 }
1332
Michael Chan5b0c76a2005-11-04 08:45:49 -08001333 /* ethernet@wirespeed */
1334 bnx2_write_phy(bp, 0x18, 0x7007);
1335 bnx2_read_phy(bp, 0x18, &val);
1336 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001337 return 0;
1338}
1339
1340
1341static int
1342bnx2_init_phy(struct bnx2 *bp)
1343{
1344 u32 val;
1345 int rc = 0;
1346
1347 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1348 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1349
1350 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1351
1352 bnx2_reset_phy(bp);
1353
1354 bnx2_read_phy(bp, MII_PHYSID1, &val);
1355 bp->phy_id = val << 16;
1356 bnx2_read_phy(bp, MII_PHYSID2, &val);
1357 bp->phy_id |= val & 0xffff;
1358
1359 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001360 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1361 rc = bnx2_init_5706s_phy(bp);
1362 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1363 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001364 }
1365 else {
1366 rc = bnx2_init_copper_phy(bp);
1367 }
1368
1369 bnx2_setup_phy(bp);
1370
1371 return rc;
1372}
1373
1374static int
1375bnx2_set_mac_loopback(struct bnx2 *bp)
1376{
1377 u32 mac_mode;
1378
1379 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1380 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1381 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1382 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1383 bp->link_up = 1;
1384 return 0;
1385}
1386
Michael Chanbc5a0692006-01-23 16:13:22 -08001387static int bnx2_test_link(struct bnx2 *);
1388
1389static int
1390bnx2_set_phy_loopback(struct bnx2 *bp)
1391{
1392 u32 mac_mode;
1393 int rc, i;
1394
1395 spin_lock_bh(&bp->phy_lock);
1396 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1397 BMCR_SPEED1000);
1398 spin_unlock_bh(&bp->phy_lock);
1399 if (rc)
1400 return rc;
1401
1402 for (i = 0; i < 10; i++) {
1403 if (bnx2_test_link(bp) == 0)
1404 break;
Michael Chan80be4432006-11-19 14:07:28 -08001405 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001406 }
1407
1408 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1409 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1410 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1411 BNX2_EMAC_MODE_25G);
1412
1413 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1414 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1415 bp->link_up = 1;
1416 return 0;
1417}
1418
Michael Chanb6016b72005-05-26 13:03:09 -07001419static int
Michael Chanb090ae22006-01-23 16:07:10 -08001420bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001421{
1422 int i;
1423 u32 val;
1424
Michael Chanb6016b72005-05-26 13:03:09 -07001425 bp->fw_wr_seq++;
1426 msg_data |= bp->fw_wr_seq;
1427
Michael Chane3648b32005-11-04 08:51:21 -08001428 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001429
1430 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001431 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1432 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001433
Michael Chane3648b32005-11-04 08:51:21 -08001434 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001435
1436 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1437 break;
1438 }
Michael Chanb090ae22006-01-23 16:07:10 -08001439 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1440 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001441
1442 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001443 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1444 if (!silent)
1445 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1446 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001447
1448 msg_data &= ~BNX2_DRV_MSG_CODE;
1449 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1450
Michael Chane3648b32005-11-04 08:51:21 -08001451 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001452
Michael Chanb6016b72005-05-26 13:03:09 -07001453 return -EBUSY;
1454 }
1455
Michael Chanb090ae22006-01-23 16:07:10 -08001456 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1457 return -EIO;
1458
Michael Chanb6016b72005-05-26 13:03:09 -07001459 return 0;
1460}
1461
1462static void
1463bnx2_init_context(struct bnx2 *bp)
1464{
1465 u32 vcid;
1466
1467 vcid = 96;
1468 while (vcid) {
1469 u32 vcid_addr, pcid_addr, offset;
1470
1471 vcid--;
1472
1473 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1474 u32 new_vcid;
1475
1476 vcid_addr = GET_PCID_ADDR(vcid);
1477 if (vcid & 0x8) {
1478 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1479 }
1480 else {
1481 new_vcid = vcid;
1482 }
1483 pcid_addr = GET_PCID_ADDR(new_vcid);
1484 }
1485 else {
1486 vcid_addr = GET_CID_ADDR(vcid);
1487 pcid_addr = vcid_addr;
1488 }
1489
1490 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1491 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1492
1493 /* Zero out the context. */
1494 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1495 CTX_WR(bp, 0x00, offset, 0);
1496 }
1497
1498 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1499 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1500 }
1501}
1502
1503static int
1504bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1505{
1506 u16 *good_mbuf;
1507 u32 good_mbuf_cnt;
1508 u32 val;
1509
1510 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1511 if (good_mbuf == NULL) {
1512 printk(KERN_ERR PFX "Failed to allocate memory in "
1513 "bnx2_alloc_bad_rbuf\n");
1514 return -ENOMEM;
1515 }
1516
1517 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1518 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1519
1520 good_mbuf_cnt = 0;
1521
1522 /* Allocate a bunch of mbufs and save the good ones in an array. */
1523 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1524 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1525 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1526
1527 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1528
1529 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1530
1531 /* The addresses with Bit 9 set are bad memory blocks. */
1532 if (!(val & (1 << 9))) {
1533 good_mbuf[good_mbuf_cnt] = (u16) val;
1534 good_mbuf_cnt++;
1535 }
1536
1537 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1538 }
1539
1540 /* Free the good ones back to the mbuf pool thus discarding
1541 * all the bad ones. */
1542 while (good_mbuf_cnt) {
1543 good_mbuf_cnt--;
1544
1545 val = good_mbuf[good_mbuf_cnt];
1546 val = (val << 9) | val | 1;
1547
1548 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1549 }
1550 kfree(good_mbuf);
1551 return 0;
1552}
1553
1554static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001555bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001556{
1557 u32 val;
1558 u8 *mac_addr = bp->dev->dev_addr;
1559
1560 val = (mac_addr[0] << 8) | mac_addr[1];
1561
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1563
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001564 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001565 (mac_addr[4] << 8) | mac_addr[5];
1566
1567 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1568}
1569
1570static inline int
1571bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1572{
1573 struct sk_buff *skb;
1574 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1575 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001576 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001577 unsigned long align;
1578
Michael Chan932f3772006-08-15 01:39:36 -07001579 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001580 if (skb == NULL) {
1581 return -ENOMEM;
1582 }
1583
1584 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1585 skb_reserve(skb, 8 - align);
1586 }
1587
Michael Chanb6016b72005-05-26 13:03:09 -07001588 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1589 PCI_DMA_FROMDEVICE);
1590
1591 rx_buf->skb = skb;
1592 pci_unmap_addr_set(rx_buf, mapping, mapping);
1593
1594 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1595 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1596
1597 bp->rx_prod_bseq += bp->rx_buf_use_size;
1598
1599 return 0;
1600}
1601
1602static void
1603bnx2_phy_int(struct bnx2 *bp)
1604{
1605 u32 new_link_state, old_link_state;
1606
1607 new_link_state = bp->status_blk->status_attn_bits &
1608 STATUS_ATTN_BITS_LINK_STATE;
1609 old_link_state = bp->status_blk->status_attn_bits_ack &
1610 STATUS_ATTN_BITS_LINK_STATE;
1611 if (new_link_state != old_link_state) {
1612 if (new_link_state) {
1613 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1614 STATUS_ATTN_BITS_LINK_STATE);
1615 }
1616 else {
1617 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1618 STATUS_ATTN_BITS_LINK_STATE);
1619 }
1620 bnx2_set_link(bp);
1621 }
1622}
1623
1624static void
1625bnx2_tx_int(struct bnx2 *bp)
1626{
Michael Chanf4e418f2005-11-04 08:53:48 -08001627 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001628 u16 hw_cons, sw_cons, sw_ring_cons;
1629 int tx_free_bd = 0;
1630
Michael Chanf4e418f2005-11-04 08:53:48 -08001631 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001632 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1633 hw_cons++;
1634 }
1635 sw_cons = bp->tx_cons;
1636
1637 while (sw_cons != hw_cons) {
1638 struct sw_bd *tx_buf;
1639 struct sk_buff *skb;
1640 int i, last;
1641
1642 sw_ring_cons = TX_RING_IDX(sw_cons);
1643
1644 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1645 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001646#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001647 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001648 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001649 u16 last_idx, last_ring_idx;
1650
1651 last_idx = sw_cons +
1652 skb_shinfo(skb)->nr_frags + 1;
1653 last_ring_idx = sw_ring_cons +
1654 skb_shinfo(skb)->nr_frags + 1;
1655 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1656 last_idx++;
1657 }
1658 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1659 break;
1660 }
1661 }
1662#endif
1663 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1664 skb_headlen(skb), PCI_DMA_TODEVICE);
1665
1666 tx_buf->skb = NULL;
1667 last = skb_shinfo(skb)->nr_frags;
1668
1669 for (i = 0; i < last; i++) {
1670 sw_cons = NEXT_TX_BD(sw_cons);
1671
1672 pci_unmap_page(bp->pdev,
1673 pci_unmap_addr(
1674 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1675 mapping),
1676 skb_shinfo(skb)->frags[i].size,
1677 PCI_DMA_TODEVICE);
1678 }
1679
1680 sw_cons = NEXT_TX_BD(sw_cons);
1681
1682 tx_free_bd += last + 1;
1683
Michael Chan745720e2006-06-29 12:37:41 -07001684 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001685
Michael Chanf4e418f2005-11-04 08:53:48 -08001686 hw_cons = bp->hw_tx_cons =
1687 sblk->status_tx_quick_consumer_index0;
1688
Michael Chanb6016b72005-05-26 13:03:09 -07001689 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1690 hw_cons++;
1691 }
1692 }
1693
Michael Chane89bbf12005-08-25 15:36:58 -07001694 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001695 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1696 * before checking for netif_queue_stopped(). Without the
1697 * memory barrier, there is a small possibility that bnx2_start_xmit()
1698 * will miss it and cause the queue to be stopped forever.
1699 */
1700 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001701
Michael Chan2f8af122006-08-15 01:39:10 -07001702 if (unlikely(netif_queue_stopped(bp->dev)) &&
1703 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1704 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001705 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001706 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001707 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001708 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001709 }
Michael Chanb6016b72005-05-26 13:03:09 -07001710}
1711
1712static inline void
1713bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1714 u16 cons, u16 prod)
1715{
Michael Chan236b6392006-03-20 17:49:02 -08001716 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1717 struct rx_bd *cons_bd, *prod_bd;
1718
1719 cons_rx_buf = &bp->rx_buf_ring[cons];
1720 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001721
1722 pci_dma_sync_single_for_device(bp->pdev,
1723 pci_unmap_addr(cons_rx_buf, mapping),
1724 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1725
Michael Chan236b6392006-03-20 17:49:02 -08001726 bp->rx_prod_bseq += bp->rx_buf_use_size;
1727
1728 prod_rx_buf->skb = skb;
1729
1730 if (cons == prod)
1731 return;
1732
Michael Chanb6016b72005-05-26 13:03:09 -07001733 pci_unmap_addr_set(prod_rx_buf, mapping,
1734 pci_unmap_addr(cons_rx_buf, mapping));
1735
Michael Chan3fdfcc22006-03-20 17:49:49 -08001736 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1737 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001738 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1739 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001740}
1741
1742static int
1743bnx2_rx_int(struct bnx2 *bp, int budget)
1744{
Michael Chanf4e418f2005-11-04 08:53:48 -08001745 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001746 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1747 struct l2_fhdr *rx_hdr;
1748 int rx_pkt = 0;
1749
Michael Chanf4e418f2005-11-04 08:53:48 -08001750 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001751 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1752 hw_cons++;
1753 }
1754 sw_cons = bp->rx_cons;
1755 sw_prod = bp->rx_prod;
1756
1757 /* Memory barrier necessary as speculative reads of the rx
1758 * buffer can be ahead of the index in the status block
1759 */
1760 rmb();
1761 while (sw_cons != hw_cons) {
1762 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001763 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001764 struct sw_bd *rx_buf;
1765 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001766 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001767
1768 sw_ring_cons = RX_RING_IDX(sw_cons);
1769 sw_ring_prod = RX_RING_IDX(sw_prod);
1770
1771 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1772 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001773
1774 rx_buf->skb = NULL;
1775
1776 dma_addr = pci_unmap_addr(rx_buf, mapping);
1777
1778 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001779 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1780
1781 rx_hdr = (struct l2_fhdr *) skb->data;
1782 len = rx_hdr->l2_fhdr_pkt_len - 4;
1783
Michael Chanade2bfe2006-01-23 16:09:51 -08001784 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001785 (L2_FHDR_ERRORS_BAD_CRC |
1786 L2_FHDR_ERRORS_PHY_DECODE |
1787 L2_FHDR_ERRORS_ALIGNMENT |
1788 L2_FHDR_ERRORS_TOO_SHORT |
1789 L2_FHDR_ERRORS_GIANT_FRAME)) {
1790
1791 goto reuse_rx;
1792 }
1793
1794 /* Since we don't have a jumbo ring, copy small packets
1795 * if mtu > 1500
1796 */
1797 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1798 struct sk_buff *new_skb;
1799
Michael Chan932f3772006-08-15 01:39:36 -07001800 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001801 if (new_skb == NULL)
1802 goto reuse_rx;
1803
1804 /* aligned copy */
1805 memcpy(new_skb->data,
1806 skb->data + bp->rx_offset - 2,
1807 len + 2);
1808
1809 skb_reserve(new_skb, 2);
1810 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001811
1812 bnx2_reuse_rx_skb(bp, skb,
1813 sw_ring_cons, sw_ring_prod);
1814
1815 skb = new_skb;
1816 }
1817 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001818 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001819 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1820
1821 skb_reserve(skb, bp->rx_offset);
1822 skb_put(skb, len);
1823 }
1824 else {
1825reuse_rx:
1826 bnx2_reuse_rx_skb(bp, skb,
1827 sw_ring_cons, sw_ring_prod);
1828 goto next_rx;
1829 }
1830
1831 skb->protocol = eth_type_trans(skb, bp->dev);
1832
1833 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001834 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001835
Michael Chan745720e2006-06-29 12:37:41 -07001836 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001837 goto next_rx;
1838
1839 }
1840
Michael Chanb6016b72005-05-26 13:03:09 -07001841 skb->ip_summed = CHECKSUM_NONE;
1842 if (bp->rx_csum &&
1843 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1844 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1845
Michael Chanade2bfe2006-01-23 16:09:51 -08001846 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1847 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001848 skb->ip_summed = CHECKSUM_UNNECESSARY;
1849 }
1850
1851#ifdef BCM_VLAN
1852 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1853 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1854 rx_hdr->l2_fhdr_vlan_tag);
1855 }
1856 else
1857#endif
1858 netif_receive_skb(skb);
1859
1860 bp->dev->last_rx = jiffies;
1861 rx_pkt++;
1862
1863next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001864 sw_cons = NEXT_RX_BD(sw_cons);
1865 sw_prod = NEXT_RX_BD(sw_prod);
1866
1867 if ((rx_pkt == budget))
1868 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001869
1870 /* Refresh hw_cons to see if there is new work */
1871 if (sw_cons == hw_cons) {
1872 hw_cons = bp->hw_rx_cons =
1873 sblk->status_rx_quick_consumer_index0;
1874 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1875 hw_cons++;
1876 rmb();
1877 }
Michael Chanb6016b72005-05-26 13:03:09 -07001878 }
1879 bp->rx_cons = sw_cons;
1880 bp->rx_prod = sw_prod;
1881
1882 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1883
1884 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1885
1886 mmiowb();
1887
1888 return rx_pkt;
1889
1890}
1891
1892/* MSI ISR - The only difference between this and the INTx ISR
1893 * is that the MSI interrupt is always serviced.
1894 */
1895static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001896bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001897{
1898 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001899 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001900
Michael Chanc921e4c2005-09-08 13:15:32 -07001901 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001902 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1903 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1904 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1905
1906 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001907 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1908 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001909
Michael Chan73eef4c2005-08-25 15:39:15 -07001910 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001911
Michael Chan73eef4c2005-08-25 15:39:15 -07001912 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001913}
1914
1915static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001916bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001917{
1918 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001919 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001920
1921 /* When using INTx, it is possible for the interrupt to arrive
1922 * at the CPU before the status block posted prior to the
1923 * interrupt. Reading a register will flush the status block.
1924 * When using MSI, the MSI message will always complete after
1925 * the status block write.
1926 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001927 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001928 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1929 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001930 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001931
1932 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1933 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1934 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1935
1936 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001937 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1938 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001939
Michael Chan73eef4c2005-08-25 15:39:15 -07001940 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001941
Michael Chan73eef4c2005-08-25 15:39:15 -07001942 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001943}
1944
Michael Chanf4e418f2005-11-04 08:53:48 -08001945static inline int
1946bnx2_has_work(struct bnx2 *bp)
1947{
1948 struct status_block *sblk = bp->status_blk;
1949
1950 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1951 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1952 return 1;
1953
1954 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1955 bp->link_up)
1956 return 1;
1957
1958 return 0;
1959}
1960
Michael Chanb6016b72005-05-26 13:03:09 -07001961static int
1962bnx2_poll(struct net_device *dev, int *budget)
1963{
Michael Chan972ec0d2006-01-23 16:12:43 -08001964 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001965
Michael Chanb6016b72005-05-26 13:03:09 -07001966 if ((bp->status_blk->status_attn_bits &
1967 STATUS_ATTN_BITS_LINK_STATE) !=
1968 (bp->status_blk->status_attn_bits_ack &
1969 STATUS_ATTN_BITS_LINK_STATE)) {
1970
Michael Chanc770a652005-08-25 15:38:39 -07001971 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001972 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07001973 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08001974
1975 /* This is needed to take care of transient status
1976 * during link changes.
1977 */
1978 REG_WR(bp, BNX2_HC_COMMAND,
1979 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1980 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07001981 }
1982
Michael Chanf4e418f2005-11-04 08:53:48 -08001983 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07001984 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001985
Michael Chanf4e418f2005-11-04 08:53:48 -08001986 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07001987 int orig_budget = *budget;
1988 int work_done;
1989
1990 if (orig_budget > dev->quota)
1991 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001992
Michael Chanb6016b72005-05-26 13:03:09 -07001993 work_done = bnx2_rx_int(bp, orig_budget);
1994 *budget -= work_done;
1995 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07001996 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001997
Michael Chanf4e418f2005-11-04 08:53:48 -08001998 bp->last_status_idx = bp->status_blk->status_idx;
1999 rmb();
2000
2001 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002002 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002003 if (likely(bp->flags & USING_MSI_FLAG)) {
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2006 bp->last_status_idx);
2007 return 0;
2008 }
Michael Chanb6016b72005-05-26 13:03:09 -07002009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002010 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2012 bp->last_status_idx);
2013
2014 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2015 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2016 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002017 return 0;
2018 }
2019
2020 return 1;
2021}
2022
Herbert Xu932ff272006-06-09 12:20:56 -07002023/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002024 * from set_multicast.
2025 */
2026static void
2027bnx2_set_rx_mode(struct net_device *dev)
2028{
Michael Chan972ec0d2006-01-23 16:12:43 -08002029 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002030 u32 rx_mode, sort_mode;
2031 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002032
Michael Chanc770a652005-08-25 15:38:39 -07002033 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002034
2035 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2036 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2037 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2038#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002039 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002040 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002041#else
Michael Chane29054f2006-01-23 16:06:06 -08002042 if (!(bp->flags & ASF_ENABLE_FLAG))
2043 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002044#endif
2045 if (dev->flags & IFF_PROMISC) {
2046 /* Promiscuous mode. */
2047 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002048 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2049 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002050 }
2051 else if (dev->flags & IFF_ALLMULTI) {
2052 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2053 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2054 0xffffffff);
2055 }
2056 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2057 }
2058 else {
2059 /* Accept one or more multicast(s). */
2060 struct dev_mc_list *mclist;
2061 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2062 u32 regidx;
2063 u32 bit;
2064 u32 crc;
2065
2066 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2067
2068 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2069 i++, mclist = mclist->next) {
2070
2071 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2072 bit = crc & 0xff;
2073 regidx = (bit & 0xe0) >> 5;
2074 bit &= 0x1f;
2075 mc_filter[regidx] |= (1 << bit);
2076 }
2077
2078 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2079 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2080 mc_filter[i]);
2081 }
2082
2083 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2084 }
2085
2086 if (rx_mode != bp->rx_mode) {
2087 bp->rx_mode = rx_mode;
2088 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2089 }
2090
2091 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2092 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2093 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2094
Michael Chanc770a652005-08-25 15:38:39 -07002095 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002096}
2097
Michael Chanfba9fe92006-06-12 22:21:25 -07002098#define FW_BUF_SIZE 0x8000
2099
2100static int
2101bnx2_gunzip_init(struct bnx2 *bp)
2102{
2103 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2104 goto gunzip_nomem1;
2105
2106 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2107 goto gunzip_nomem2;
2108
2109 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2110 if (bp->strm->workspace == NULL)
2111 goto gunzip_nomem3;
2112
2113 return 0;
2114
2115gunzip_nomem3:
2116 kfree(bp->strm);
2117 bp->strm = NULL;
2118
2119gunzip_nomem2:
2120 vfree(bp->gunzip_buf);
2121 bp->gunzip_buf = NULL;
2122
2123gunzip_nomem1:
2124 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2125 "uncompression.\n", bp->dev->name);
2126 return -ENOMEM;
2127}
2128
2129static void
2130bnx2_gunzip_end(struct bnx2 *bp)
2131{
2132 kfree(bp->strm->workspace);
2133
2134 kfree(bp->strm);
2135 bp->strm = NULL;
2136
2137 if (bp->gunzip_buf) {
2138 vfree(bp->gunzip_buf);
2139 bp->gunzip_buf = NULL;
2140 }
2141}
2142
2143static int
2144bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2145{
2146 int n, rc;
2147
2148 /* check gzip header */
2149 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2150 return -EINVAL;
2151
2152 n = 10;
2153
2154#define FNAME 0x8
2155 if (zbuf[3] & FNAME)
2156 while ((zbuf[n++] != 0) && (n < len));
2157
2158 bp->strm->next_in = zbuf + n;
2159 bp->strm->avail_in = len - n;
2160 bp->strm->next_out = bp->gunzip_buf;
2161 bp->strm->avail_out = FW_BUF_SIZE;
2162
2163 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2164 if (rc != Z_OK)
2165 return rc;
2166
2167 rc = zlib_inflate(bp->strm, Z_FINISH);
2168
2169 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2170 *outbuf = bp->gunzip_buf;
2171
2172 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2173 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2174 bp->dev->name, bp->strm->msg);
2175
2176 zlib_inflateEnd(bp->strm);
2177
2178 if (rc == Z_STREAM_END)
2179 return 0;
2180
2181 return rc;
2182}
2183
Michael Chanb6016b72005-05-26 13:03:09 -07002184static void
2185load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2186 u32 rv2p_proc)
2187{
2188 int i;
2189 u32 val;
2190
2191
2192 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002193 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002194 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002195 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002196 rv2p_code++;
2197
2198 if (rv2p_proc == RV2P_PROC1) {
2199 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2200 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2201 }
2202 else {
2203 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2204 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2205 }
2206 }
2207
2208 /* Reset the processor, un-stall is done later. */
2209 if (rv2p_proc == RV2P_PROC1) {
2210 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2211 }
2212 else {
2213 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2214 }
2215}
2216
Michael Chanaf3ee512006-11-19 14:09:25 -08002217static int
Michael Chanb6016b72005-05-26 13:03:09 -07002218load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2219{
2220 u32 offset;
2221 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002222 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002223
2224 /* Halt the CPU. */
2225 val = REG_RD_IND(bp, cpu_reg->mode);
2226 val |= cpu_reg->mode_value_halt;
2227 REG_WR_IND(bp, cpu_reg->mode, val);
2228 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2229
2230 /* Load the Text area. */
2231 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002232 if (fw->gz_text) {
2233 u32 text_len;
2234 void *text;
2235
2236 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2237 &text_len);
2238 if (rc)
2239 return rc;
2240
2241 fw->text = text;
2242 }
2243 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002244 int j;
2245
2246 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002247 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002248 }
2249 }
2250
2251 /* Load the Data area. */
2252 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2253 if (fw->data) {
2254 int j;
2255
2256 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2257 REG_WR_IND(bp, offset, fw->data[j]);
2258 }
2259 }
2260
2261 /* Load the SBSS area. */
2262 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2263 if (fw->sbss) {
2264 int j;
2265
2266 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2267 REG_WR_IND(bp, offset, fw->sbss[j]);
2268 }
2269 }
2270
2271 /* Load the BSS area. */
2272 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2273 if (fw->bss) {
2274 int j;
2275
2276 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2277 REG_WR_IND(bp, offset, fw->bss[j]);
2278 }
2279 }
2280
2281 /* Load the Read-Only area. */
2282 offset = cpu_reg->spad_base +
2283 (fw->rodata_addr - cpu_reg->mips_view_base);
2284 if (fw->rodata) {
2285 int j;
2286
2287 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2288 REG_WR_IND(bp, offset, fw->rodata[j]);
2289 }
2290 }
2291
2292 /* Clear the pre-fetch instruction. */
2293 REG_WR_IND(bp, cpu_reg->inst, 0);
2294 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2295
2296 /* Start the CPU. */
2297 val = REG_RD_IND(bp, cpu_reg->mode);
2298 val &= ~cpu_reg->mode_value_halt;
2299 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2300 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002301
2302 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002303}
2304
Michael Chanfba9fe92006-06-12 22:21:25 -07002305static int
Michael Chanb6016b72005-05-26 13:03:09 -07002306bnx2_init_cpus(struct bnx2 *bp)
2307{
2308 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002309 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002310 int rc = 0;
2311 void *text;
2312 u32 text_len;
2313
2314 if ((rc = bnx2_gunzip_init(bp)) != 0)
2315 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002316
2317 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002318 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2319 &text_len);
2320 if (rc)
2321 goto init_cpu_err;
2322
2323 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2324
2325 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2326 &text_len);
2327 if (rc)
2328 goto init_cpu_err;
2329
2330 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002331
2332 /* Initialize the RX Processor. */
2333 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2334 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2335 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2336 cpu_reg.state = BNX2_RXP_CPU_STATE;
2337 cpu_reg.state_value_clear = 0xffffff;
2338 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2339 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2340 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2341 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2342 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2343 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2344 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002345
Michael Chanaf3ee512006-11-19 14:09:25 -08002346 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002347
Michael Chanaf3ee512006-11-19 14:09:25 -08002348 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002349 if (rc)
2350 goto init_cpu_err;
2351
Michael Chanb6016b72005-05-26 13:03:09 -07002352 /* Initialize the TX Processor. */
2353 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2354 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2355 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2356 cpu_reg.state = BNX2_TXP_CPU_STATE;
2357 cpu_reg.state_value_clear = 0xffffff;
2358 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2359 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2360 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2361 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2362 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2363 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2364 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002365
Michael Chanaf3ee512006-11-19 14:09:25 -08002366 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002367
Michael Chanaf3ee512006-11-19 14:09:25 -08002368 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002369 if (rc)
2370 goto init_cpu_err;
2371
Michael Chanb6016b72005-05-26 13:03:09 -07002372 /* Initialize the TX Patch-up Processor. */
2373 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2374 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2375 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2376 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2377 cpu_reg.state_value_clear = 0xffffff;
2378 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2379 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2380 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2381 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2382 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2383 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2384 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002385
Michael Chanaf3ee512006-11-19 14:09:25 -08002386 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002387
Michael Chanaf3ee512006-11-19 14:09:25 -08002388 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002389 if (rc)
2390 goto init_cpu_err;
2391
Michael Chanb6016b72005-05-26 13:03:09 -07002392 /* Initialize the Completion Processor. */
2393 cpu_reg.mode = BNX2_COM_CPU_MODE;
2394 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2395 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2396 cpu_reg.state = BNX2_COM_CPU_STATE;
2397 cpu_reg.state_value_clear = 0xffffff;
2398 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2399 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2400 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2401 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2402 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2403 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2404 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002405
Michael Chanaf3ee512006-11-19 14:09:25 -08002406 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002407
Michael Chanaf3ee512006-11-19 14:09:25 -08002408 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002409 if (rc)
2410 goto init_cpu_err;
2411
Michael Chanfba9fe92006-06-12 22:21:25 -07002412init_cpu_err:
2413 bnx2_gunzip_end(bp);
2414 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002415}
2416
2417static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002418bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002419{
2420 u16 pmcsr;
2421
2422 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2423
2424 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002425 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002426 u32 val;
2427
2428 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2429 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2430 PCI_PM_CTRL_PME_STATUS);
2431
2432 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2433 /* delay required during transition out of D3hot */
2434 msleep(20);
2435
2436 val = REG_RD(bp, BNX2_EMAC_MODE);
2437 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2438 val &= ~BNX2_EMAC_MODE_MPKT;
2439 REG_WR(bp, BNX2_EMAC_MODE, val);
2440
2441 val = REG_RD(bp, BNX2_RPM_CONFIG);
2442 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2443 REG_WR(bp, BNX2_RPM_CONFIG, val);
2444 break;
2445 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002446 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002447 int i;
2448 u32 val, wol_msg;
2449
2450 if (bp->wol) {
2451 u32 advertising;
2452 u8 autoneg;
2453
2454 autoneg = bp->autoneg;
2455 advertising = bp->advertising;
2456
2457 bp->autoneg = AUTONEG_SPEED;
2458 bp->advertising = ADVERTISED_10baseT_Half |
2459 ADVERTISED_10baseT_Full |
2460 ADVERTISED_100baseT_Half |
2461 ADVERTISED_100baseT_Full |
2462 ADVERTISED_Autoneg;
2463
2464 bnx2_setup_copper_phy(bp);
2465
2466 bp->autoneg = autoneg;
2467 bp->advertising = advertising;
2468
2469 bnx2_set_mac_addr(bp);
2470
2471 val = REG_RD(bp, BNX2_EMAC_MODE);
2472
2473 /* Enable port mode. */
2474 val &= ~BNX2_EMAC_MODE_PORT;
2475 val |= BNX2_EMAC_MODE_PORT_MII |
2476 BNX2_EMAC_MODE_MPKT_RCVD |
2477 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002478 BNX2_EMAC_MODE_MPKT;
2479
2480 REG_WR(bp, BNX2_EMAC_MODE, val);
2481
2482 /* receive all multicast */
2483 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2484 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2485 0xffffffff);
2486 }
2487 REG_WR(bp, BNX2_EMAC_RX_MODE,
2488 BNX2_EMAC_RX_MODE_SORT_MODE);
2489
2490 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2491 BNX2_RPM_SORT_USER0_MC_EN;
2492 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2493 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2494 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2495 BNX2_RPM_SORT_USER0_ENA);
2496
2497 /* Need to enable EMAC and RPM for WOL. */
2498 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2499 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2500 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2501 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2502
2503 val = REG_RD(bp, BNX2_RPM_CONFIG);
2504 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2505 REG_WR(bp, BNX2_RPM_CONFIG, val);
2506
2507 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2508 }
2509 else {
2510 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2511 }
2512
Michael Chandda1e392006-01-23 16:08:14 -08002513 if (!(bp->flags & NO_WOL_FLAG))
2514 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002515
2516 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2517 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2518 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2519
2520 if (bp->wol)
2521 pmcsr |= 3;
2522 }
2523 else {
2524 pmcsr |= 3;
2525 }
2526 if (bp->wol) {
2527 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2528 }
2529 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2530 pmcsr);
2531
2532 /* No more memory access after this point until
2533 * device is brought back to D0.
2534 */
2535 udelay(50);
2536 break;
2537 }
2538 default:
2539 return -EINVAL;
2540 }
2541 return 0;
2542}
2543
2544static int
2545bnx2_acquire_nvram_lock(struct bnx2 *bp)
2546{
2547 u32 val;
2548 int j;
2549
2550 /* Request access to the flash interface. */
2551 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2552 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2553 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2554 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2555 break;
2556
2557 udelay(5);
2558 }
2559
2560 if (j >= NVRAM_TIMEOUT_COUNT)
2561 return -EBUSY;
2562
2563 return 0;
2564}
2565
2566static int
2567bnx2_release_nvram_lock(struct bnx2 *bp)
2568{
2569 int j;
2570 u32 val;
2571
2572 /* Relinquish nvram interface. */
2573 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2574
2575 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2576 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2577 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2578 break;
2579
2580 udelay(5);
2581 }
2582
2583 if (j >= NVRAM_TIMEOUT_COUNT)
2584 return -EBUSY;
2585
2586 return 0;
2587}
2588
2589
2590static int
2591bnx2_enable_nvram_write(struct bnx2 *bp)
2592{
2593 u32 val;
2594
2595 val = REG_RD(bp, BNX2_MISC_CFG);
2596 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2597
2598 if (!bp->flash_info->buffered) {
2599 int j;
2600
2601 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2602 REG_WR(bp, BNX2_NVM_COMMAND,
2603 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2604
2605 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2606 udelay(5);
2607
2608 val = REG_RD(bp, BNX2_NVM_COMMAND);
2609 if (val & BNX2_NVM_COMMAND_DONE)
2610 break;
2611 }
2612
2613 if (j >= NVRAM_TIMEOUT_COUNT)
2614 return -EBUSY;
2615 }
2616 return 0;
2617}
2618
2619static void
2620bnx2_disable_nvram_write(struct bnx2 *bp)
2621{
2622 u32 val;
2623
2624 val = REG_RD(bp, BNX2_MISC_CFG);
2625 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2626}
2627
2628
2629static void
2630bnx2_enable_nvram_access(struct bnx2 *bp)
2631{
2632 u32 val;
2633
2634 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2635 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002636 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002637 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2638}
2639
2640static void
2641bnx2_disable_nvram_access(struct bnx2 *bp)
2642{
2643 u32 val;
2644
2645 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2646 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002647 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002648 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2649 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2650}
2651
2652static int
2653bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2654{
2655 u32 cmd;
2656 int j;
2657
2658 if (bp->flash_info->buffered)
2659 /* Buffered flash, no erase needed */
2660 return 0;
2661
2662 /* Build an erase command */
2663 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2664 BNX2_NVM_COMMAND_DOIT;
2665
2666 /* Need to clear DONE bit separately. */
2667 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2668
2669 /* Address of the NVRAM to read from. */
2670 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2671
2672 /* Issue an erase command. */
2673 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2674
2675 /* Wait for completion. */
2676 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2677 u32 val;
2678
2679 udelay(5);
2680
2681 val = REG_RD(bp, BNX2_NVM_COMMAND);
2682 if (val & BNX2_NVM_COMMAND_DONE)
2683 break;
2684 }
2685
2686 if (j >= NVRAM_TIMEOUT_COUNT)
2687 return -EBUSY;
2688
2689 return 0;
2690}
2691
2692static int
2693bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2694{
2695 u32 cmd;
2696 int j;
2697
2698 /* Build the command word. */
2699 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2700
2701 /* Calculate an offset of a buffered flash. */
2702 if (bp->flash_info->buffered) {
2703 offset = ((offset / bp->flash_info->page_size) <<
2704 bp->flash_info->page_bits) +
2705 (offset % bp->flash_info->page_size);
2706 }
2707
2708 /* Need to clear DONE bit separately. */
2709 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2710
2711 /* Address of the NVRAM to read from. */
2712 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2713
2714 /* Issue a read command. */
2715 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2716
2717 /* Wait for completion. */
2718 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2719 u32 val;
2720
2721 udelay(5);
2722
2723 val = REG_RD(bp, BNX2_NVM_COMMAND);
2724 if (val & BNX2_NVM_COMMAND_DONE) {
2725 val = REG_RD(bp, BNX2_NVM_READ);
2726
2727 val = be32_to_cpu(val);
2728 memcpy(ret_val, &val, 4);
2729 break;
2730 }
2731 }
2732 if (j >= NVRAM_TIMEOUT_COUNT)
2733 return -EBUSY;
2734
2735 return 0;
2736}
2737
2738
2739static int
2740bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2741{
2742 u32 cmd, val32;
2743 int j;
2744
2745 /* Build the command word. */
2746 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2747
2748 /* Calculate an offset of a buffered flash. */
2749 if (bp->flash_info->buffered) {
2750 offset = ((offset / bp->flash_info->page_size) <<
2751 bp->flash_info->page_bits) +
2752 (offset % bp->flash_info->page_size);
2753 }
2754
2755 /* Need to clear DONE bit separately. */
2756 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2757
2758 memcpy(&val32, val, 4);
2759 val32 = cpu_to_be32(val32);
2760
2761 /* Write the data. */
2762 REG_WR(bp, BNX2_NVM_WRITE, val32);
2763
2764 /* Address of the NVRAM to write to. */
2765 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2766
2767 /* Issue the write command. */
2768 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2769
2770 /* Wait for completion. */
2771 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2772 udelay(5);
2773
2774 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2775 break;
2776 }
2777 if (j >= NVRAM_TIMEOUT_COUNT)
2778 return -EBUSY;
2779
2780 return 0;
2781}
2782
2783static int
2784bnx2_init_nvram(struct bnx2 *bp)
2785{
2786 u32 val;
2787 int j, entry_count, rc;
2788 struct flash_spec *flash;
2789
2790 /* Determine the selected interface. */
2791 val = REG_RD(bp, BNX2_NVM_CFG1);
2792
2793 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2794
2795 rc = 0;
2796 if (val & 0x40000000) {
2797
2798 /* Flash interface has been reconfigured */
2799 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002800 j++, flash++) {
2801 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2802 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002803 bp->flash_info = flash;
2804 break;
2805 }
2806 }
2807 }
2808 else {
Michael Chan37137702005-11-04 08:49:17 -08002809 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002810 /* Not yet been reconfigured */
2811
Michael Chan37137702005-11-04 08:49:17 -08002812 if (val & (1 << 23))
2813 mask = FLASH_BACKUP_STRAP_MASK;
2814 else
2815 mask = FLASH_STRAP_MASK;
2816
Michael Chanb6016b72005-05-26 13:03:09 -07002817 for (j = 0, flash = &flash_table[0]; j < entry_count;
2818 j++, flash++) {
2819
Michael Chan37137702005-11-04 08:49:17 -08002820 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002821 bp->flash_info = flash;
2822
2823 /* Request access to the flash interface. */
2824 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2825 return rc;
2826
2827 /* Enable access to flash interface */
2828 bnx2_enable_nvram_access(bp);
2829
2830 /* Reconfigure the flash interface */
2831 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2832 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2833 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2834 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2835
2836 /* Disable access to flash interface */
2837 bnx2_disable_nvram_access(bp);
2838 bnx2_release_nvram_lock(bp);
2839
2840 break;
2841 }
2842 }
2843 } /* if (val & 0x40000000) */
2844
2845 if (j == entry_count) {
2846 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002847 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002848 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002849 }
2850
Michael Chan1122db72006-01-23 16:11:42 -08002851 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2852 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2853 if (val)
2854 bp->flash_size = val;
2855 else
2856 bp->flash_size = bp->flash_info->total_size;
2857
Michael Chanb6016b72005-05-26 13:03:09 -07002858 return rc;
2859}
2860
2861static int
2862bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2863 int buf_size)
2864{
2865 int rc = 0;
2866 u32 cmd_flags, offset32, len32, extra;
2867
2868 if (buf_size == 0)
2869 return 0;
2870
2871 /* Request access to the flash interface. */
2872 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2873 return rc;
2874
2875 /* Enable access to flash interface */
2876 bnx2_enable_nvram_access(bp);
2877
2878 len32 = buf_size;
2879 offset32 = offset;
2880 extra = 0;
2881
2882 cmd_flags = 0;
2883
2884 if (offset32 & 3) {
2885 u8 buf[4];
2886 u32 pre_len;
2887
2888 offset32 &= ~3;
2889 pre_len = 4 - (offset & 3);
2890
2891 if (pre_len >= len32) {
2892 pre_len = len32;
2893 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2894 BNX2_NVM_COMMAND_LAST;
2895 }
2896 else {
2897 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2898 }
2899
2900 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2901
2902 if (rc)
2903 return rc;
2904
2905 memcpy(ret_buf, buf + (offset & 3), pre_len);
2906
2907 offset32 += 4;
2908 ret_buf += pre_len;
2909 len32 -= pre_len;
2910 }
2911 if (len32 & 3) {
2912 extra = 4 - (len32 & 3);
2913 len32 = (len32 + 4) & ~3;
2914 }
2915
2916 if (len32 == 4) {
2917 u8 buf[4];
2918
2919 if (cmd_flags)
2920 cmd_flags = BNX2_NVM_COMMAND_LAST;
2921 else
2922 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2923 BNX2_NVM_COMMAND_LAST;
2924
2925 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2926
2927 memcpy(ret_buf, buf, 4 - extra);
2928 }
2929 else if (len32 > 0) {
2930 u8 buf[4];
2931
2932 /* Read the first word. */
2933 if (cmd_flags)
2934 cmd_flags = 0;
2935 else
2936 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2937
2938 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2939
2940 /* Advance to the next dword. */
2941 offset32 += 4;
2942 ret_buf += 4;
2943 len32 -= 4;
2944
2945 while (len32 > 4 && rc == 0) {
2946 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2947
2948 /* Advance to the next dword. */
2949 offset32 += 4;
2950 ret_buf += 4;
2951 len32 -= 4;
2952 }
2953
2954 if (rc)
2955 return rc;
2956
2957 cmd_flags = BNX2_NVM_COMMAND_LAST;
2958 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2959
2960 memcpy(ret_buf, buf, 4 - extra);
2961 }
2962
2963 /* Disable access to flash interface */
2964 bnx2_disable_nvram_access(bp);
2965
2966 bnx2_release_nvram_lock(bp);
2967
2968 return rc;
2969}
2970
2971static int
2972bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2973 int buf_size)
2974{
2975 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07002976 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07002977 int rc = 0;
2978 int align_start, align_end;
2979
2980 buf = data_buf;
2981 offset32 = offset;
2982 len32 = buf_size;
2983 align_start = align_end = 0;
2984
2985 if ((align_start = (offset32 & 3))) {
2986 offset32 &= ~3;
2987 len32 += align_start;
2988 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2989 return rc;
2990 }
2991
2992 if (len32 & 3) {
2993 if ((len32 > 4) || !align_start) {
2994 align_end = 4 - (len32 & 3);
2995 len32 += align_end;
2996 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2997 end, 4))) {
2998 return rc;
2999 }
3000 }
3001 }
3002
3003 if (align_start || align_end) {
3004 buf = kmalloc(len32, GFP_KERNEL);
3005 if (buf == 0)
3006 return -ENOMEM;
3007 if (align_start) {
3008 memcpy(buf, start, 4);
3009 }
3010 if (align_end) {
3011 memcpy(buf + len32 - 4, end, 4);
3012 }
3013 memcpy(buf + align_start, data_buf, buf_size);
3014 }
3015
Michael Chanae181bc2006-05-22 16:39:20 -07003016 if (bp->flash_info->buffered == 0) {
3017 flash_buffer = kmalloc(264, GFP_KERNEL);
3018 if (flash_buffer == NULL) {
3019 rc = -ENOMEM;
3020 goto nvram_write_end;
3021 }
3022 }
3023
Michael Chanb6016b72005-05-26 13:03:09 -07003024 written = 0;
3025 while ((written < len32) && (rc == 0)) {
3026 u32 page_start, page_end, data_start, data_end;
3027 u32 addr, cmd_flags;
3028 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003029
3030 /* Find the page_start addr */
3031 page_start = offset32 + written;
3032 page_start -= (page_start % bp->flash_info->page_size);
3033 /* Find the page_end addr */
3034 page_end = page_start + bp->flash_info->page_size;
3035 /* Find the data_start addr */
3036 data_start = (written == 0) ? offset32 : page_start;
3037 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003038 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003039 (offset32 + len32) : page_end;
3040
3041 /* Request access to the flash interface. */
3042 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3043 goto nvram_write_end;
3044
3045 /* Enable access to flash interface */
3046 bnx2_enable_nvram_access(bp);
3047
3048 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3049 if (bp->flash_info->buffered == 0) {
3050 int j;
3051
3052 /* Read the whole page into the buffer
3053 * (non-buffer flash only) */
3054 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3055 if (j == (bp->flash_info->page_size - 4)) {
3056 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3057 }
3058 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003059 page_start + j,
3060 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003061 cmd_flags);
3062
3063 if (rc)
3064 goto nvram_write_end;
3065
3066 cmd_flags = 0;
3067 }
3068 }
3069
3070 /* Enable writes to flash interface (unlock write-protect) */
3071 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3072 goto nvram_write_end;
3073
3074 /* Erase the page */
3075 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3076 goto nvram_write_end;
3077
3078 /* Re-enable the write again for the actual write */
3079 bnx2_enable_nvram_write(bp);
3080
3081 /* Loop to write back the buffer data from page_start to
3082 * data_start */
3083 i = 0;
3084 if (bp->flash_info->buffered == 0) {
3085 for (addr = page_start; addr < data_start;
3086 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003087
Michael Chanb6016b72005-05-26 13:03:09 -07003088 rc = bnx2_nvram_write_dword(bp, addr,
3089 &flash_buffer[i], cmd_flags);
3090
3091 if (rc != 0)
3092 goto nvram_write_end;
3093
3094 cmd_flags = 0;
3095 }
3096 }
3097
3098 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003099 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003100 if ((addr == page_end - 4) ||
3101 ((bp->flash_info->buffered) &&
3102 (addr == data_end - 4))) {
3103
3104 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3105 }
3106 rc = bnx2_nvram_write_dword(bp, addr, buf,
3107 cmd_flags);
3108
3109 if (rc != 0)
3110 goto nvram_write_end;
3111
3112 cmd_flags = 0;
3113 buf += 4;
3114 }
3115
3116 /* Loop to write back the buffer data from data_end
3117 * to page_end */
3118 if (bp->flash_info->buffered == 0) {
3119 for (addr = data_end; addr < page_end;
3120 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003121
Michael Chanb6016b72005-05-26 13:03:09 -07003122 if (addr == page_end-4) {
3123 cmd_flags = BNX2_NVM_COMMAND_LAST;
3124 }
3125 rc = bnx2_nvram_write_dword(bp, addr,
3126 &flash_buffer[i], cmd_flags);
3127
3128 if (rc != 0)
3129 goto nvram_write_end;
3130
3131 cmd_flags = 0;
3132 }
3133 }
3134
3135 /* Disable writes to flash interface (lock write-protect) */
3136 bnx2_disable_nvram_write(bp);
3137
3138 /* Disable access to flash interface */
3139 bnx2_disable_nvram_access(bp);
3140 bnx2_release_nvram_lock(bp);
3141
3142 /* Increment written */
3143 written += data_end - data_start;
3144 }
3145
3146nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003147 if (bp->flash_info->buffered == 0)
3148 kfree(flash_buffer);
3149
Michael Chanb6016b72005-05-26 13:03:09 -07003150 if (align_start || align_end)
3151 kfree(buf);
3152 return rc;
3153}
3154
3155static int
3156bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3157{
3158 u32 val;
3159 int i, rc = 0;
3160
3161 /* Wait for the current PCI transaction to complete before
3162 * issuing a reset. */
3163 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3164 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3165 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3166 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3167 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3168 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3169 udelay(5);
3170
Michael Chanb090ae22006-01-23 16:07:10 -08003171 /* Wait for the firmware to tell us it is ok to issue a reset. */
3172 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3173
Michael Chanb6016b72005-05-26 13:03:09 -07003174 /* Deposit a driver reset signature so the firmware knows that
3175 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003176 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003177 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3178
Michael Chanb6016b72005-05-26 13:03:09 -07003179 /* Do a dummy read to force the chip to complete all current transaction
3180 * before we issue a reset. */
3181 val = REG_RD(bp, BNX2_MISC_ID);
3182
3183 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3184 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3185 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3186
3187 /* Chip reset. */
3188 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3189
3190 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3191 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3192 msleep(15);
3193
3194 /* Reset takes approximate 30 usec */
3195 for (i = 0; i < 10; i++) {
3196 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3197 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3198 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3199 break;
3200 }
3201 udelay(10);
3202 }
3203
3204 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3205 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3206 printk(KERN_ERR PFX "Chip reset did not complete\n");
3207 return -EBUSY;
3208 }
3209
3210 /* Make sure byte swapping is properly configured. */
3211 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3212 if (val != 0x01020304) {
3213 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3214 return -ENODEV;
3215 }
3216
Michael Chanb6016b72005-05-26 13:03:09 -07003217 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003218 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3219 if (rc)
3220 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003221
3222 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3223 /* Adjust the voltage regular to two steps lower. The default
3224 * of this register is 0x0000000e. */
3225 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3226
3227 /* Remove bad rbuf memory from the free pool. */
3228 rc = bnx2_alloc_bad_rbuf(bp);
3229 }
3230
3231 return rc;
3232}
3233
3234static int
3235bnx2_init_chip(struct bnx2 *bp)
3236{
3237 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003238 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003239
3240 /* Make sure the interrupt is not active. */
3241 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3242
3243 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3244 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3245#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003246 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003247#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003248 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003249 DMA_READ_CHANS << 12 |
3250 DMA_WRITE_CHANS << 16;
3251
3252 val |= (0x2 << 20) | (1 << 11);
3253
Michael Chandda1e392006-01-23 16:08:14 -08003254 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003255 val |= (1 << 23);
3256
3257 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3258 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3259 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3260
3261 REG_WR(bp, BNX2_DMA_CONFIG, val);
3262
3263 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3264 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3265 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3266 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3267 }
3268
3269 if (bp->flags & PCIX_FLAG) {
3270 u16 val16;
3271
3272 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3273 &val16);
3274 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3275 val16 & ~PCI_X_CMD_ERO);
3276 }
3277
3278 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3279 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3280 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3281 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3282
3283 /* Initialize context mapping and zero out the quick contexts. The
3284 * context block must have already been enabled. */
3285 bnx2_init_context(bp);
3286
Michael Chanfba9fe92006-06-12 22:21:25 -07003287 if ((rc = bnx2_init_cpus(bp)) != 0)
3288 return rc;
3289
Michael Chanb6016b72005-05-26 13:03:09 -07003290 bnx2_init_nvram(bp);
3291
3292 bnx2_set_mac_addr(bp);
3293
3294 val = REG_RD(bp, BNX2_MQ_CONFIG);
3295 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3296 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3297 REG_WR(bp, BNX2_MQ_CONFIG, val);
3298
3299 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3300 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3301 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3302
3303 val = (BCM_PAGE_BITS - 8) << 24;
3304 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3305
3306 /* Configure page size. */
3307 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3308 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3309 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3310 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3311
3312 val = bp->mac_addr[0] +
3313 (bp->mac_addr[1] << 8) +
3314 (bp->mac_addr[2] << 16) +
3315 bp->mac_addr[3] +
3316 (bp->mac_addr[4] << 8) +
3317 (bp->mac_addr[5] << 16);
3318 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3319
3320 /* Program the MTU. Also include 4 bytes for CRC32. */
3321 val = bp->dev->mtu + ETH_HLEN + 4;
3322 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3323 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3324 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3325
3326 bp->last_status_idx = 0;
3327 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3328
3329 /* Set up how to generate a link change interrupt. */
3330 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3331
3332 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3333 (u64) bp->status_blk_mapping & 0xffffffff);
3334 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3335
3336 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3337 (u64) bp->stats_blk_mapping & 0xffffffff);
3338 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3339 (u64) bp->stats_blk_mapping >> 32);
3340
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003341 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003342 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3343
3344 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3345 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3346
3347 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3348 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3349
3350 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3351
3352 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3353
3354 REG_WR(bp, BNX2_HC_COM_TICKS,
3355 (bp->com_ticks_int << 16) | bp->com_ticks);
3356
3357 REG_WR(bp, BNX2_HC_CMD_TICKS,
3358 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3359
3360 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3361 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3362
3363 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3364 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3365 else {
3366 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3367 BNX2_HC_CONFIG_TX_TMR_MODE |
3368 BNX2_HC_CONFIG_COLLECT_STATS);
3369 }
3370
3371 /* Clear internal stats counters. */
3372 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3373
3374 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3375
Michael Chane29054f2006-01-23 16:06:06 -08003376 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3377 BNX2_PORT_FEATURE_ASF_ENABLED)
3378 bp->flags |= ASF_ENABLE_FLAG;
3379
Michael Chanb6016b72005-05-26 13:03:09 -07003380 /* Initialize the receive filter. */
3381 bnx2_set_rx_mode(bp->dev);
3382
Michael Chanb090ae22006-01-23 16:07:10 -08003383 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3384 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003385
3386 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3387 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3388
3389 udelay(20);
3390
Michael Chanbf5295b2006-03-23 01:11:56 -08003391 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3392
Michael Chanb090ae22006-01-23 16:07:10 -08003393 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003394}
3395
3396
3397static void
3398bnx2_init_tx_ring(struct bnx2 *bp)
3399{
3400 struct tx_bd *txbd;
3401 u32 val;
3402
Michael Chan2f8af122006-08-15 01:39:10 -07003403 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3404
Michael Chanb6016b72005-05-26 13:03:09 -07003405 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003406
Michael Chanb6016b72005-05-26 13:03:09 -07003407 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3408 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3409
3410 bp->tx_prod = 0;
3411 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003412 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003413 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003414
Michael Chanb6016b72005-05-26 13:03:09 -07003415 val = BNX2_L2CTX_TYPE_TYPE_L2;
3416 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3417 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3418
3419 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3420 val |= 8 << 16;
3421 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3422
3423 val = (u64) bp->tx_desc_mapping >> 32;
3424 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3425
3426 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3427 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3428}
3429
3430static void
3431bnx2_init_rx_ring(struct bnx2 *bp)
3432{
3433 struct rx_bd *rxbd;
3434 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003435 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003436 u32 val;
3437
3438 /* 8 for CRC and VLAN */
3439 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3440 /* 8 for alignment */
3441 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3442
3443 ring_prod = prod = bp->rx_prod = 0;
3444 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003445 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003446 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003447
Michael Chan13daffa2006-03-20 17:49:20 -08003448 for (i = 0; i < bp->rx_max_ring; i++) {
3449 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003450
Michael Chan13daffa2006-03-20 17:49:20 -08003451 rxbd = &bp->rx_desc_ring[i][0];
3452 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3453 rxbd->rx_bd_len = bp->rx_buf_use_size;
3454 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3455 }
3456 if (i == (bp->rx_max_ring - 1))
3457 j = 0;
3458 else
3459 j = i + 1;
3460 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3461 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3462 0xffffffff;
3463 }
Michael Chanb6016b72005-05-26 13:03:09 -07003464
3465 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3466 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3467 val |= 0x02 << 8;
3468 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3469
Michael Chan13daffa2006-03-20 17:49:20 -08003470 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003471 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3472
Michael Chan13daffa2006-03-20 17:49:20 -08003473 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003474 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3475
Michael Chan236b6392006-03-20 17:49:02 -08003476 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003477 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3478 break;
3479 }
3480 prod = NEXT_RX_BD(prod);
3481 ring_prod = RX_RING_IDX(prod);
3482 }
3483 bp->rx_prod = prod;
3484
3485 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3486
3487 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3488}
3489
3490static void
Michael Chan13daffa2006-03-20 17:49:20 -08003491bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3492{
3493 u32 num_rings, max;
3494
3495 bp->rx_ring_size = size;
3496 num_rings = 1;
3497 while (size > MAX_RX_DESC_CNT) {
3498 size -= MAX_RX_DESC_CNT;
3499 num_rings++;
3500 }
3501 /* round to next power of 2 */
3502 max = MAX_RX_RINGS;
3503 while ((max & num_rings) == 0)
3504 max >>= 1;
3505
3506 if (num_rings != max)
3507 max <<= 1;
3508
3509 bp->rx_max_ring = max;
3510 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3511}
3512
3513static void
Michael Chanb6016b72005-05-26 13:03:09 -07003514bnx2_free_tx_skbs(struct bnx2 *bp)
3515{
3516 int i;
3517
3518 if (bp->tx_buf_ring == NULL)
3519 return;
3520
3521 for (i = 0; i < TX_DESC_CNT; ) {
3522 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3523 struct sk_buff *skb = tx_buf->skb;
3524 int j, last;
3525
3526 if (skb == NULL) {
3527 i++;
3528 continue;
3529 }
3530
3531 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3532 skb_headlen(skb), PCI_DMA_TODEVICE);
3533
3534 tx_buf->skb = NULL;
3535
3536 last = skb_shinfo(skb)->nr_frags;
3537 for (j = 0; j < last; j++) {
3538 tx_buf = &bp->tx_buf_ring[i + j + 1];
3539 pci_unmap_page(bp->pdev,
3540 pci_unmap_addr(tx_buf, mapping),
3541 skb_shinfo(skb)->frags[j].size,
3542 PCI_DMA_TODEVICE);
3543 }
Michael Chan745720e2006-06-29 12:37:41 -07003544 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003545 i += j + 1;
3546 }
3547
3548}
3549
3550static void
3551bnx2_free_rx_skbs(struct bnx2 *bp)
3552{
3553 int i;
3554
3555 if (bp->rx_buf_ring == NULL)
3556 return;
3557
Michael Chan13daffa2006-03-20 17:49:20 -08003558 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003559 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3560 struct sk_buff *skb = rx_buf->skb;
3561
Michael Chan05d0f1c2005-11-04 08:53:48 -08003562 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003563 continue;
3564
3565 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3566 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3567
3568 rx_buf->skb = NULL;
3569
Michael Chan745720e2006-06-29 12:37:41 -07003570 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003571 }
3572}
3573
3574static void
3575bnx2_free_skbs(struct bnx2 *bp)
3576{
3577 bnx2_free_tx_skbs(bp);
3578 bnx2_free_rx_skbs(bp);
3579}
3580
3581static int
3582bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3583{
3584 int rc;
3585
3586 rc = bnx2_reset_chip(bp, reset_code);
3587 bnx2_free_skbs(bp);
3588 if (rc)
3589 return rc;
3590
Michael Chanfba9fe92006-06-12 22:21:25 -07003591 if ((rc = bnx2_init_chip(bp)) != 0)
3592 return rc;
3593
Michael Chanb6016b72005-05-26 13:03:09 -07003594 bnx2_init_tx_ring(bp);
3595 bnx2_init_rx_ring(bp);
3596 return 0;
3597}
3598
3599static int
3600bnx2_init_nic(struct bnx2 *bp)
3601{
3602 int rc;
3603
3604 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3605 return rc;
3606
Michael Chan80be4432006-11-19 14:07:28 -08003607 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003608 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003609 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003610 bnx2_set_link(bp);
3611 return 0;
3612}
3613
3614static int
3615bnx2_test_registers(struct bnx2 *bp)
3616{
3617 int ret;
3618 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003619 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003620 u16 offset;
3621 u16 flags;
3622 u32 rw_mask;
3623 u32 ro_mask;
3624 } reg_tbl[] = {
3625 { 0x006c, 0, 0x00000000, 0x0000003f },
3626 { 0x0090, 0, 0xffffffff, 0x00000000 },
3627 { 0x0094, 0, 0x00000000, 0x00000000 },
3628
3629 { 0x0404, 0, 0x00003f00, 0x00000000 },
3630 { 0x0418, 0, 0x00000000, 0xffffffff },
3631 { 0x041c, 0, 0x00000000, 0xffffffff },
3632 { 0x0420, 0, 0x00000000, 0x80ffffff },
3633 { 0x0424, 0, 0x00000000, 0x00000000 },
3634 { 0x0428, 0, 0x00000000, 0x00000001 },
3635 { 0x0450, 0, 0x00000000, 0x0000ffff },
3636 { 0x0454, 0, 0x00000000, 0xffffffff },
3637 { 0x0458, 0, 0x00000000, 0xffffffff },
3638
3639 { 0x0808, 0, 0x00000000, 0xffffffff },
3640 { 0x0854, 0, 0x00000000, 0xffffffff },
3641 { 0x0868, 0, 0x00000000, 0x77777777 },
3642 { 0x086c, 0, 0x00000000, 0x77777777 },
3643 { 0x0870, 0, 0x00000000, 0x77777777 },
3644 { 0x0874, 0, 0x00000000, 0x77777777 },
3645
3646 { 0x0c00, 0, 0x00000000, 0x00000001 },
3647 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3648 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003649
3650 { 0x1000, 0, 0x00000000, 0x00000001 },
3651 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003652
3653 { 0x1408, 0, 0x01c00800, 0x00000000 },
3654 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3655 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003656 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003657 { 0x14b0, 0, 0x00000002, 0x00000001 },
3658 { 0x14b8, 0, 0x00000000, 0x00000000 },
3659 { 0x14c0, 0, 0x00000000, 0x00000009 },
3660 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3661 { 0x14cc, 0, 0x00000000, 0x00000001 },
3662 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003663
3664 { 0x1800, 0, 0x00000000, 0x00000001 },
3665 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003666
3667 { 0x2800, 0, 0x00000000, 0x00000001 },
3668 { 0x2804, 0, 0x00000000, 0x00003f01 },
3669 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3670 { 0x2810, 0, 0xffff0000, 0x00000000 },
3671 { 0x2814, 0, 0xffff0000, 0x00000000 },
3672 { 0x2818, 0, 0xffff0000, 0x00000000 },
3673 { 0x281c, 0, 0xffff0000, 0x00000000 },
3674 { 0x2834, 0, 0xffffffff, 0x00000000 },
3675 { 0x2840, 0, 0x00000000, 0xffffffff },
3676 { 0x2844, 0, 0x00000000, 0xffffffff },
3677 { 0x2848, 0, 0xffffffff, 0x00000000 },
3678 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3679
3680 { 0x2c00, 0, 0x00000000, 0x00000011 },
3681 { 0x2c04, 0, 0x00000000, 0x00030007 },
3682
Michael Chanb6016b72005-05-26 13:03:09 -07003683 { 0x3c00, 0, 0x00000000, 0x00000001 },
3684 { 0x3c04, 0, 0x00000000, 0x00070000 },
3685 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3686 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3687 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3688 { 0x3c14, 0, 0x00000000, 0xffffffff },
3689 { 0x3c18, 0, 0x00000000, 0xffffffff },
3690 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3691 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003692
3693 { 0x5004, 0, 0x00000000, 0x0000007f },
3694 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3695 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3696
Michael Chanb6016b72005-05-26 13:03:09 -07003697 { 0x5c00, 0, 0x00000000, 0x00000001 },
3698 { 0x5c04, 0, 0x00000000, 0x0003000f },
3699 { 0x5c08, 0, 0x00000003, 0x00000000 },
3700 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3701 { 0x5c10, 0, 0x00000000, 0xffffffff },
3702 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3703 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3704 { 0x5c88, 0, 0x00000000, 0x00077373 },
3705 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3706
3707 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3708 { 0x680c, 0, 0xffffffff, 0x00000000 },
3709 { 0x6810, 0, 0xffffffff, 0x00000000 },
3710 { 0x6814, 0, 0xffffffff, 0x00000000 },
3711 { 0x6818, 0, 0xffffffff, 0x00000000 },
3712 { 0x681c, 0, 0xffffffff, 0x00000000 },
3713 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3714 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3715 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3716 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3717 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3718 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3719 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3720 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3721 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3722 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3723 { 0x684c, 0, 0xffffffff, 0x00000000 },
3724 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3725 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3726 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3727 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3728 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3729 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3730
3731 { 0xffff, 0, 0x00000000, 0x00000000 },
3732 };
3733
3734 ret = 0;
3735 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3736 u32 offset, rw_mask, ro_mask, save_val, val;
3737
3738 offset = (u32) reg_tbl[i].offset;
3739 rw_mask = reg_tbl[i].rw_mask;
3740 ro_mask = reg_tbl[i].ro_mask;
3741
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003742 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003743
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003744 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003745
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003746 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003747 if ((val & rw_mask) != 0) {
3748 goto reg_test_err;
3749 }
3750
3751 if ((val & ro_mask) != (save_val & ro_mask)) {
3752 goto reg_test_err;
3753 }
3754
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003755 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003756
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003757 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003758 if ((val & rw_mask) != rw_mask) {
3759 goto reg_test_err;
3760 }
3761
3762 if ((val & ro_mask) != (save_val & ro_mask)) {
3763 goto reg_test_err;
3764 }
3765
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003766 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003767 continue;
3768
3769reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003770 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003771 ret = -ENODEV;
3772 break;
3773 }
3774 return ret;
3775}
3776
3777static int
3778bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3779{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003780 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003781 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3782 int i;
3783
3784 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3785 u32 offset;
3786
3787 for (offset = 0; offset < size; offset += 4) {
3788
3789 REG_WR_IND(bp, start + offset, test_pattern[i]);
3790
3791 if (REG_RD_IND(bp, start + offset) !=
3792 test_pattern[i]) {
3793 return -ENODEV;
3794 }
3795 }
3796 }
3797 return 0;
3798}
3799
3800static int
3801bnx2_test_memory(struct bnx2 *bp)
3802{
3803 int ret = 0;
3804 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003805 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003806 u32 offset;
3807 u32 len;
3808 } mem_tbl[] = {
3809 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003810 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003811 { 0xe0000, 0x4000 },
3812 { 0x120000, 0x4000 },
3813 { 0x1a0000, 0x4000 },
3814 { 0x160000, 0x4000 },
3815 { 0xffffffff, 0 },
3816 };
3817
3818 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3819 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3820 mem_tbl[i].len)) != 0) {
3821 return ret;
3822 }
3823 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003824
Michael Chanb6016b72005-05-26 13:03:09 -07003825 return ret;
3826}
3827
Michael Chanbc5a0692006-01-23 16:13:22 -08003828#define BNX2_MAC_LOOPBACK 0
3829#define BNX2_PHY_LOOPBACK 1
3830
Michael Chanb6016b72005-05-26 13:03:09 -07003831static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003832bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003833{
3834 unsigned int pkt_size, num_pkts, i;
3835 struct sk_buff *skb, *rx_skb;
3836 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003837 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003838 dma_addr_t map;
3839 struct tx_bd *txbd;
3840 struct sw_bd *rx_buf;
3841 struct l2_fhdr *rx_hdr;
3842 int ret = -ENODEV;
3843
Michael Chanbc5a0692006-01-23 16:13:22 -08003844 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3845 bp->loopback = MAC_LOOPBACK;
3846 bnx2_set_mac_loopback(bp);
3847 }
3848 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003849 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003850 bnx2_set_phy_loopback(bp);
3851 }
3852 else
3853 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003854
3855 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07003856 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003857 if (!skb)
3858 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003859 packet = skb_put(skb, pkt_size);
3860 memcpy(packet, bp->mac_addr, 6);
3861 memset(packet + 6, 0x0, 8);
3862 for (i = 14; i < pkt_size; i++)
3863 packet[i] = (unsigned char) (i & 0xff);
3864
3865 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3866 PCI_DMA_TODEVICE);
3867
Michael Chanbf5295b2006-03-23 01:11:56 -08003868 REG_WR(bp, BNX2_HC_COMMAND,
3869 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3870
Michael Chanb6016b72005-05-26 13:03:09 -07003871 REG_RD(bp, BNX2_HC_COMMAND);
3872
3873 udelay(5);
3874 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3875
Michael Chanb6016b72005-05-26 13:03:09 -07003876 num_pkts = 0;
3877
Michael Chanbc5a0692006-01-23 16:13:22 -08003878 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07003879
3880 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3881 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3882 txbd->tx_bd_mss_nbytes = pkt_size;
3883 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3884
3885 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08003886 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3887 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07003888
Michael Chanbc5a0692006-01-23 16:13:22 -08003889 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3890 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07003891
3892 udelay(100);
3893
Michael Chanbf5295b2006-03-23 01:11:56 -08003894 REG_WR(bp, BNX2_HC_COMMAND,
3895 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3896
Michael Chanb6016b72005-05-26 13:03:09 -07003897 REG_RD(bp, BNX2_HC_COMMAND);
3898
3899 udelay(5);
3900
3901 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07003902 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003903
Michael Chanbc5a0692006-01-23 16:13:22 -08003904 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07003905 goto loopback_test_done;
3906 }
3907
3908 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3909 if (rx_idx != rx_start_idx + num_pkts) {
3910 goto loopback_test_done;
3911 }
3912
3913 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3914 rx_skb = rx_buf->skb;
3915
3916 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3917 skb_reserve(rx_skb, bp->rx_offset);
3918
3919 pci_dma_sync_single_for_cpu(bp->pdev,
3920 pci_unmap_addr(rx_buf, mapping),
3921 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3922
Michael Chanade2bfe2006-01-23 16:09:51 -08003923 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07003924 (L2_FHDR_ERRORS_BAD_CRC |
3925 L2_FHDR_ERRORS_PHY_DECODE |
3926 L2_FHDR_ERRORS_ALIGNMENT |
3927 L2_FHDR_ERRORS_TOO_SHORT |
3928 L2_FHDR_ERRORS_GIANT_FRAME)) {
3929
3930 goto loopback_test_done;
3931 }
3932
3933 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3934 goto loopback_test_done;
3935 }
3936
3937 for (i = 14; i < pkt_size; i++) {
3938 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3939 goto loopback_test_done;
3940 }
3941 }
3942
3943 ret = 0;
3944
3945loopback_test_done:
3946 bp->loopback = 0;
3947 return ret;
3948}
3949
Michael Chanbc5a0692006-01-23 16:13:22 -08003950#define BNX2_MAC_LOOPBACK_FAILED 1
3951#define BNX2_PHY_LOOPBACK_FAILED 2
3952#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3953 BNX2_PHY_LOOPBACK_FAILED)
3954
3955static int
3956bnx2_test_loopback(struct bnx2 *bp)
3957{
3958 int rc = 0;
3959
3960 if (!netif_running(bp->dev))
3961 return BNX2_LOOPBACK_FAILED;
3962
3963 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3964 spin_lock_bh(&bp->phy_lock);
3965 bnx2_init_phy(bp);
3966 spin_unlock_bh(&bp->phy_lock);
3967 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3968 rc |= BNX2_MAC_LOOPBACK_FAILED;
3969 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3970 rc |= BNX2_PHY_LOOPBACK_FAILED;
3971 return rc;
3972}
3973
Michael Chanb6016b72005-05-26 13:03:09 -07003974#define NVRAM_SIZE 0x200
3975#define CRC32_RESIDUAL 0xdebb20e3
3976
3977static int
3978bnx2_test_nvram(struct bnx2 *bp)
3979{
3980 u32 buf[NVRAM_SIZE / 4];
3981 u8 *data = (u8 *) buf;
3982 int rc = 0;
3983 u32 magic, csum;
3984
3985 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3986 goto test_nvram_done;
3987
3988 magic = be32_to_cpu(buf[0]);
3989 if (magic != 0x669955aa) {
3990 rc = -ENODEV;
3991 goto test_nvram_done;
3992 }
3993
3994 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3995 goto test_nvram_done;
3996
3997 csum = ether_crc_le(0x100, data);
3998 if (csum != CRC32_RESIDUAL) {
3999 rc = -ENODEV;
4000 goto test_nvram_done;
4001 }
4002
4003 csum = ether_crc_le(0x100, data + 0x100);
4004 if (csum != CRC32_RESIDUAL) {
4005 rc = -ENODEV;
4006 }
4007
4008test_nvram_done:
4009 return rc;
4010}
4011
4012static int
4013bnx2_test_link(struct bnx2 *bp)
4014{
4015 u32 bmsr;
4016
Michael Chanc770a652005-08-25 15:38:39 -07004017 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004018 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4019 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004020 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004021
Michael Chanb6016b72005-05-26 13:03:09 -07004022 if (bmsr & BMSR_LSTATUS) {
4023 return 0;
4024 }
4025 return -ENODEV;
4026}
4027
4028static int
4029bnx2_test_intr(struct bnx2 *bp)
4030{
4031 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004032 u16 status_idx;
4033
4034 if (!netif_running(bp->dev))
4035 return -ENODEV;
4036
4037 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4038
4039 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004040 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004041 REG_RD(bp, BNX2_HC_COMMAND);
4042
4043 for (i = 0; i < 10; i++) {
4044 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4045 status_idx) {
4046
4047 break;
4048 }
4049
4050 msleep_interruptible(10);
4051 }
4052 if (i < 10)
4053 return 0;
4054
4055 return -ENODEV;
4056}
4057
4058static void
Michael Chan48b01e22006-11-19 14:08:00 -08004059bnx2_5706_serdes_timer(struct bnx2 *bp)
4060{
4061 spin_lock(&bp->phy_lock);
4062 if (bp->serdes_an_pending)
4063 bp->serdes_an_pending--;
4064 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4065 u32 bmcr;
4066
4067 bp->current_interval = bp->timer_interval;
4068
4069 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4070
4071 if (bmcr & BMCR_ANENABLE) {
4072 u32 phy1, phy2;
4073
4074 bnx2_write_phy(bp, 0x1c, 0x7c00);
4075 bnx2_read_phy(bp, 0x1c, &phy1);
4076
4077 bnx2_write_phy(bp, 0x17, 0x0f01);
4078 bnx2_read_phy(bp, 0x15, &phy2);
4079 bnx2_write_phy(bp, 0x17, 0x0f01);
4080 bnx2_read_phy(bp, 0x15, &phy2);
4081
4082 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4083 !(phy2 & 0x20)) { /* no CONFIG */
4084
4085 bmcr &= ~BMCR_ANENABLE;
4086 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4087 bnx2_write_phy(bp, MII_BMCR, bmcr);
4088 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4089 }
4090 }
4091 }
4092 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4093 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4094 u32 phy2;
4095
4096 bnx2_write_phy(bp, 0x17, 0x0f01);
4097 bnx2_read_phy(bp, 0x15, &phy2);
4098 if (phy2 & 0x20) {
4099 u32 bmcr;
4100
4101 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4102 bmcr |= BMCR_ANENABLE;
4103 bnx2_write_phy(bp, MII_BMCR, bmcr);
4104
4105 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4106 }
4107 } else
4108 bp->current_interval = bp->timer_interval;
4109
4110 spin_unlock(&bp->phy_lock);
4111}
4112
4113static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004114bnx2_5708_serdes_timer(struct bnx2 *bp)
4115{
4116 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4117 bp->serdes_an_pending = 0;
4118 return;
4119 }
4120
4121 spin_lock(&bp->phy_lock);
4122 if (bp->serdes_an_pending)
4123 bp->serdes_an_pending--;
4124 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4125 u32 bmcr;
4126
4127 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4128
4129 if (bmcr & BMCR_ANENABLE) {
4130 bmcr &= ~BMCR_ANENABLE;
4131 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4132 bnx2_write_phy(bp, MII_BMCR, bmcr);
4133 bp->current_interval = SERDES_FORCED_TIMEOUT;
4134 } else {
4135 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4136 bmcr |= BMCR_ANENABLE;
4137 bnx2_write_phy(bp, MII_BMCR, bmcr);
4138 bp->serdes_an_pending = 2;
4139 bp->current_interval = bp->timer_interval;
4140 }
4141
4142 } else
4143 bp->current_interval = bp->timer_interval;
4144
4145 spin_unlock(&bp->phy_lock);
4146}
4147
4148static void
Michael Chanb6016b72005-05-26 13:03:09 -07004149bnx2_timer(unsigned long data)
4150{
4151 struct bnx2 *bp = (struct bnx2 *) data;
4152 u32 msg;
4153
Michael Chancd339a02005-08-25 15:35:24 -07004154 if (!netif_running(bp->dev))
4155 return;
4156
Michael Chanb6016b72005-05-26 13:03:09 -07004157 if (atomic_read(&bp->intr_sem) != 0)
4158 goto bnx2_restart_timer;
4159
4160 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004161 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004162
Michael Chancea94db2006-06-12 22:16:13 -07004163 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4164
Michael Chanf8dd0642006-11-19 14:08:29 -08004165 if (bp->phy_flags & PHY_SERDES_FLAG) {
4166 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4167 bnx2_5706_serdes_timer(bp);
4168 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4169 bnx2_5708_serdes_timer(bp);
4170 }
Michael Chanb6016b72005-05-26 13:03:09 -07004171
4172bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004173 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004174}
4175
4176/* Called with rtnl_lock */
4177static int
4178bnx2_open(struct net_device *dev)
4179{
Michael Chan972ec0d2006-01-23 16:12:43 -08004180 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004181 int rc;
4182
Pavel Machek829ca9a2005-09-03 15:56:56 -07004183 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004184 bnx2_disable_int(bp);
4185
4186 rc = bnx2_alloc_mem(bp);
4187 if (rc)
4188 return rc;
4189
4190 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4191 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4192 !disable_msi) {
4193
4194 if (pci_enable_msi(bp->pdev) == 0) {
4195 bp->flags |= USING_MSI_FLAG;
4196 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4197 dev);
4198 }
4199 else {
4200 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004201 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004202 }
4203 }
4204 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004205 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004206 dev->name, dev);
4207 }
4208 if (rc) {
4209 bnx2_free_mem(bp);
4210 return rc;
4211 }
4212
4213 rc = bnx2_init_nic(bp);
4214
4215 if (rc) {
4216 free_irq(bp->pdev->irq, dev);
4217 if (bp->flags & USING_MSI_FLAG) {
4218 pci_disable_msi(bp->pdev);
4219 bp->flags &= ~USING_MSI_FLAG;
4220 }
4221 bnx2_free_skbs(bp);
4222 bnx2_free_mem(bp);
4223 return rc;
4224 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004225
Michael Chancd339a02005-08-25 15:35:24 -07004226 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004227
4228 atomic_set(&bp->intr_sem, 0);
4229
4230 bnx2_enable_int(bp);
4231
4232 if (bp->flags & USING_MSI_FLAG) {
4233 /* Test MSI to make sure it is working
4234 * If MSI test fails, go back to INTx mode
4235 */
4236 if (bnx2_test_intr(bp) != 0) {
4237 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4238 " using MSI, switching to INTx mode. Please"
4239 " report this failure to the PCI maintainer"
4240 " and include system chipset information.\n",
4241 bp->dev->name);
4242
4243 bnx2_disable_int(bp);
4244 free_irq(bp->pdev->irq, dev);
4245 pci_disable_msi(bp->pdev);
4246 bp->flags &= ~USING_MSI_FLAG;
4247
4248 rc = bnx2_init_nic(bp);
4249
4250 if (!rc) {
4251 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004252 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004253 }
4254 if (rc) {
4255 bnx2_free_skbs(bp);
4256 bnx2_free_mem(bp);
4257 del_timer_sync(&bp->timer);
4258 return rc;
4259 }
4260 bnx2_enable_int(bp);
4261 }
4262 }
4263 if (bp->flags & USING_MSI_FLAG) {
4264 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4265 }
4266
4267 netif_start_queue(dev);
4268
4269 return 0;
4270}
4271
4272static void
4273bnx2_reset_task(void *data)
4274{
4275 struct bnx2 *bp = data;
4276
Michael Chanafdc08b2005-08-25 15:34:29 -07004277 if (!netif_running(bp->dev))
4278 return;
4279
4280 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004281 bnx2_netif_stop(bp);
4282
4283 bnx2_init_nic(bp);
4284
4285 atomic_set(&bp->intr_sem, 1);
4286 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004287 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004288}
4289
4290static void
4291bnx2_tx_timeout(struct net_device *dev)
4292{
Michael Chan972ec0d2006-01-23 16:12:43 -08004293 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004294
4295 /* This allows the netif to be shutdown gracefully before resetting */
4296 schedule_work(&bp->reset_task);
4297}
4298
4299#ifdef BCM_VLAN
4300/* Called with rtnl_lock */
4301static void
4302bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4303{
Michael Chan972ec0d2006-01-23 16:12:43 -08004304 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004305
4306 bnx2_netif_stop(bp);
4307
4308 bp->vlgrp = vlgrp;
4309 bnx2_set_rx_mode(dev);
4310
4311 bnx2_netif_start(bp);
4312}
4313
4314/* Called with rtnl_lock */
4315static void
4316bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4317{
Michael Chan972ec0d2006-01-23 16:12:43 -08004318 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004319
4320 bnx2_netif_stop(bp);
4321
4322 if (bp->vlgrp)
4323 bp->vlgrp->vlan_devices[vid] = NULL;
4324 bnx2_set_rx_mode(dev);
4325
4326 bnx2_netif_start(bp);
4327}
4328#endif
4329
Herbert Xu932ff272006-06-09 12:20:56 -07004330/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004331 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4332 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004333 */
4334static int
4335bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4336{
Michael Chan972ec0d2006-01-23 16:12:43 -08004337 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004338 dma_addr_t mapping;
4339 struct tx_bd *txbd;
4340 struct sw_bd *tx_buf;
4341 u32 len, vlan_tag_flags, last_frag, mss;
4342 u16 prod, ring_prod;
4343 int i;
4344
Michael Chane89bbf12005-08-25 15:36:58 -07004345 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004346 netif_stop_queue(dev);
4347 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4348 dev->name);
4349
4350 return NETDEV_TX_BUSY;
4351 }
4352 len = skb_headlen(skb);
4353 prod = bp->tx_prod;
4354 ring_prod = TX_RING_IDX(prod);
4355
4356 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004357 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004358 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4359 }
4360
4361 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4362 vlan_tag_flags |=
4363 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4364 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004365#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004366 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004367 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4368 u32 tcp_opt_len, ip_tcp_len;
4369
4370 if (skb_header_cloned(skb) &&
4371 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4372 dev_kfree_skb(skb);
4373 return NETDEV_TX_OK;
4374 }
4375
4376 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4377 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4378
4379 tcp_opt_len = 0;
4380 if (skb->h.th->doff > 5) {
4381 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4382 }
4383 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4384
4385 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004386 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004387 skb->h.th->check =
4388 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4389 skb->nh.iph->daddr,
4390 0, IPPROTO_TCP, 0);
4391
4392 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4393 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4394 (tcp_opt_len >> 2)) << 8;
4395 }
4396 }
4397 else
4398#endif
4399 {
4400 mss = 0;
4401 }
4402
4403 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004404
Michael Chanb6016b72005-05-26 13:03:09 -07004405 tx_buf = &bp->tx_buf_ring[ring_prod];
4406 tx_buf->skb = skb;
4407 pci_unmap_addr_set(tx_buf, mapping, mapping);
4408
4409 txbd = &bp->tx_desc_ring[ring_prod];
4410
4411 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4412 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4413 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4414 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4415
4416 last_frag = skb_shinfo(skb)->nr_frags;
4417
4418 for (i = 0; i < last_frag; i++) {
4419 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4420
4421 prod = NEXT_TX_BD(prod);
4422 ring_prod = TX_RING_IDX(prod);
4423 txbd = &bp->tx_desc_ring[ring_prod];
4424
4425 len = frag->size;
4426 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4427 len, PCI_DMA_TODEVICE);
4428 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4429 mapping, mapping);
4430
4431 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4432 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4433 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4434 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4435
4436 }
4437 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4438
4439 prod = NEXT_TX_BD(prod);
4440 bp->tx_prod_bseq += skb->len;
4441
Michael Chanb6016b72005-05-26 13:03:09 -07004442 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4443 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4444
4445 mmiowb();
4446
4447 bp->tx_prod = prod;
4448 dev->trans_start = jiffies;
4449
Michael Chane89bbf12005-08-25 15:36:58 -07004450 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004451 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004452 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004453 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004454 }
4455
4456 return NETDEV_TX_OK;
4457}
4458
4459/* Called with rtnl_lock */
4460static int
4461bnx2_close(struct net_device *dev)
4462{
Michael Chan972ec0d2006-01-23 16:12:43 -08004463 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004464 u32 reset_code;
4465
Michael Chanafdc08b2005-08-25 15:34:29 -07004466 /* Calling flush_scheduled_work() may deadlock because
4467 * linkwatch_event() may be on the workqueue and it will try to get
4468 * the rtnl_lock which we are holding.
4469 */
4470 while (bp->in_reset_task)
4471 msleep(1);
4472
Michael Chanb6016b72005-05-26 13:03:09 -07004473 bnx2_netif_stop(bp);
4474 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004475 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004476 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004477 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004478 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4479 else
4480 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4481 bnx2_reset_chip(bp, reset_code);
4482 free_irq(bp->pdev->irq, dev);
4483 if (bp->flags & USING_MSI_FLAG) {
4484 pci_disable_msi(bp->pdev);
4485 bp->flags &= ~USING_MSI_FLAG;
4486 }
4487 bnx2_free_skbs(bp);
4488 bnx2_free_mem(bp);
4489 bp->link_up = 0;
4490 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004491 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004492 return 0;
4493}
4494
4495#define GET_NET_STATS64(ctr) \
4496 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4497 (unsigned long) (ctr##_lo)
4498
4499#define GET_NET_STATS32(ctr) \
4500 (ctr##_lo)
4501
4502#if (BITS_PER_LONG == 64)
4503#define GET_NET_STATS GET_NET_STATS64
4504#else
4505#define GET_NET_STATS GET_NET_STATS32
4506#endif
4507
4508static struct net_device_stats *
4509bnx2_get_stats(struct net_device *dev)
4510{
Michael Chan972ec0d2006-01-23 16:12:43 -08004511 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004512 struct statistics_block *stats_blk = bp->stats_blk;
4513 struct net_device_stats *net_stats = &bp->net_stats;
4514
4515 if (bp->stats_blk == NULL) {
4516 return net_stats;
4517 }
4518 net_stats->rx_packets =
4519 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4520 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4521 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4522
4523 net_stats->tx_packets =
4524 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4525 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4526 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4527
4528 net_stats->rx_bytes =
4529 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4530
4531 net_stats->tx_bytes =
4532 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4533
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004534 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004535 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4536
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004537 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004538 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4539
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004540 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004541 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4542 stats_blk->stat_EtherStatsOverrsizePkts);
4543
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004544 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004545 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4546
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004547 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004548 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4549
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004550 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004551 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4552
4553 net_stats->rx_errors = net_stats->rx_length_errors +
4554 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4555 net_stats->rx_crc_errors;
4556
4557 net_stats->tx_aborted_errors =
4558 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4559 stats_blk->stat_Dot3StatsLateCollisions);
4560
Michael Chan5b0c76a2005-11-04 08:45:49 -08004561 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4562 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004563 net_stats->tx_carrier_errors = 0;
4564 else {
4565 net_stats->tx_carrier_errors =
4566 (unsigned long)
4567 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4568 }
4569
4570 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004571 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004572 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4573 +
4574 net_stats->tx_aborted_errors +
4575 net_stats->tx_carrier_errors;
4576
Michael Chancea94db2006-06-12 22:16:13 -07004577 net_stats->rx_missed_errors =
4578 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4579 stats_blk->stat_FwRxDrop);
4580
Michael Chanb6016b72005-05-26 13:03:09 -07004581 return net_stats;
4582}
4583
4584/* All ethtool functions called with rtnl_lock */
4585
4586static int
4587bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4588{
Michael Chan972ec0d2006-01-23 16:12:43 -08004589 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004590
4591 cmd->supported = SUPPORTED_Autoneg;
4592 if (bp->phy_flags & PHY_SERDES_FLAG) {
4593 cmd->supported |= SUPPORTED_1000baseT_Full |
4594 SUPPORTED_FIBRE;
4595
4596 cmd->port = PORT_FIBRE;
4597 }
4598 else {
4599 cmd->supported |= SUPPORTED_10baseT_Half |
4600 SUPPORTED_10baseT_Full |
4601 SUPPORTED_100baseT_Half |
4602 SUPPORTED_100baseT_Full |
4603 SUPPORTED_1000baseT_Full |
4604 SUPPORTED_TP;
4605
4606 cmd->port = PORT_TP;
4607 }
4608
4609 cmd->advertising = bp->advertising;
4610
4611 if (bp->autoneg & AUTONEG_SPEED) {
4612 cmd->autoneg = AUTONEG_ENABLE;
4613 }
4614 else {
4615 cmd->autoneg = AUTONEG_DISABLE;
4616 }
4617
4618 if (netif_carrier_ok(dev)) {
4619 cmd->speed = bp->line_speed;
4620 cmd->duplex = bp->duplex;
4621 }
4622 else {
4623 cmd->speed = -1;
4624 cmd->duplex = -1;
4625 }
4626
4627 cmd->transceiver = XCVR_INTERNAL;
4628 cmd->phy_address = bp->phy_addr;
4629
4630 return 0;
4631}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004632
Michael Chanb6016b72005-05-26 13:03:09 -07004633static int
4634bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4635{
Michael Chan972ec0d2006-01-23 16:12:43 -08004636 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004637 u8 autoneg = bp->autoneg;
4638 u8 req_duplex = bp->req_duplex;
4639 u16 req_line_speed = bp->req_line_speed;
4640 u32 advertising = bp->advertising;
4641
4642 if (cmd->autoneg == AUTONEG_ENABLE) {
4643 autoneg |= AUTONEG_SPEED;
4644
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004645 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004646
4647 /* allow advertising 1 speed */
4648 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4649 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4650 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4651 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4652
4653 if (bp->phy_flags & PHY_SERDES_FLAG)
4654 return -EINVAL;
4655
4656 advertising = cmd->advertising;
4657
4658 }
4659 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4660 advertising = cmd->advertising;
4661 }
4662 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4663 return -EINVAL;
4664 }
4665 else {
4666 if (bp->phy_flags & PHY_SERDES_FLAG) {
4667 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4668 }
4669 else {
4670 advertising = ETHTOOL_ALL_COPPER_SPEED;
4671 }
4672 }
4673 advertising |= ADVERTISED_Autoneg;
4674 }
4675 else {
4676 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004677 if ((cmd->speed != SPEED_1000 &&
4678 cmd->speed != SPEED_2500) ||
4679 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004680 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004681
4682 if (cmd->speed == SPEED_2500 &&
4683 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4684 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004685 }
4686 else if (cmd->speed == SPEED_1000) {
4687 return -EINVAL;
4688 }
4689 autoneg &= ~AUTONEG_SPEED;
4690 req_line_speed = cmd->speed;
4691 req_duplex = cmd->duplex;
4692 advertising = 0;
4693 }
4694
4695 bp->autoneg = autoneg;
4696 bp->advertising = advertising;
4697 bp->req_line_speed = req_line_speed;
4698 bp->req_duplex = req_duplex;
4699
Michael Chanc770a652005-08-25 15:38:39 -07004700 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004701
4702 bnx2_setup_phy(bp);
4703
Michael Chanc770a652005-08-25 15:38:39 -07004704 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004705
4706 return 0;
4707}
4708
4709static void
4710bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4711{
Michael Chan972ec0d2006-01-23 16:12:43 -08004712 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004713
4714 strcpy(info->driver, DRV_MODULE_NAME);
4715 strcpy(info->version, DRV_MODULE_VERSION);
4716 strcpy(info->bus_info, pci_name(bp->pdev));
4717 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4718 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4719 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004720 info->fw_version[1] = info->fw_version[3] = '.';
4721 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004722}
4723
Michael Chan244ac4f2006-03-20 17:48:46 -08004724#define BNX2_REGDUMP_LEN (32 * 1024)
4725
4726static int
4727bnx2_get_regs_len(struct net_device *dev)
4728{
4729 return BNX2_REGDUMP_LEN;
4730}
4731
4732static void
4733bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4734{
4735 u32 *p = _p, i, offset;
4736 u8 *orig_p = _p;
4737 struct bnx2 *bp = netdev_priv(dev);
4738 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4739 0x0800, 0x0880, 0x0c00, 0x0c10,
4740 0x0c30, 0x0d08, 0x1000, 0x101c,
4741 0x1040, 0x1048, 0x1080, 0x10a4,
4742 0x1400, 0x1490, 0x1498, 0x14f0,
4743 0x1500, 0x155c, 0x1580, 0x15dc,
4744 0x1600, 0x1658, 0x1680, 0x16d8,
4745 0x1800, 0x1820, 0x1840, 0x1854,
4746 0x1880, 0x1894, 0x1900, 0x1984,
4747 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4748 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4749 0x2000, 0x2030, 0x23c0, 0x2400,
4750 0x2800, 0x2820, 0x2830, 0x2850,
4751 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4752 0x3c00, 0x3c94, 0x4000, 0x4010,
4753 0x4080, 0x4090, 0x43c0, 0x4458,
4754 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4755 0x4fc0, 0x5010, 0x53c0, 0x5444,
4756 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4757 0x5fc0, 0x6000, 0x6400, 0x6428,
4758 0x6800, 0x6848, 0x684c, 0x6860,
4759 0x6888, 0x6910, 0x8000 };
4760
4761 regs->version = 0;
4762
4763 memset(p, 0, BNX2_REGDUMP_LEN);
4764
4765 if (!netif_running(bp->dev))
4766 return;
4767
4768 i = 0;
4769 offset = reg_boundaries[0];
4770 p += offset;
4771 while (offset < BNX2_REGDUMP_LEN) {
4772 *p++ = REG_RD(bp, offset);
4773 offset += 4;
4774 if (offset == reg_boundaries[i + 1]) {
4775 offset = reg_boundaries[i + 2];
4776 p = (u32 *) (orig_p + offset);
4777 i += 2;
4778 }
4779 }
4780}
4781
Michael Chanb6016b72005-05-26 13:03:09 -07004782static void
4783bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4784{
Michael Chan972ec0d2006-01-23 16:12:43 -08004785 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004786
4787 if (bp->flags & NO_WOL_FLAG) {
4788 wol->supported = 0;
4789 wol->wolopts = 0;
4790 }
4791 else {
4792 wol->supported = WAKE_MAGIC;
4793 if (bp->wol)
4794 wol->wolopts = WAKE_MAGIC;
4795 else
4796 wol->wolopts = 0;
4797 }
4798 memset(&wol->sopass, 0, sizeof(wol->sopass));
4799}
4800
4801static int
4802bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4803{
Michael Chan972ec0d2006-01-23 16:12:43 -08004804 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004805
4806 if (wol->wolopts & ~WAKE_MAGIC)
4807 return -EINVAL;
4808
4809 if (wol->wolopts & WAKE_MAGIC) {
4810 if (bp->flags & NO_WOL_FLAG)
4811 return -EINVAL;
4812
4813 bp->wol = 1;
4814 }
4815 else {
4816 bp->wol = 0;
4817 }
4818 return 0;
4819}
4820
4821static int
4822bnx2_nway_reset(struct net_device *dev)
4823{
Michael Chan972ec0d2006-01-23 16:12:43 -08004824 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004825 u32 bmcr;
4826
4827 if (!(bp->autoneg & AUTONEG_SPEED)) {
4828 return -EINVAL;
4829 }
4830
Michael Chanc770a652005-08-25 15:38:39 -07004831 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004832
4833 /* Force a link down visible on the other side */
4834 if (bp->phy_flags & PHY_SERDES_FLAG) {
4835 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004836 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004837
4838 msleep(20);
4839
Michael Chanc770a652005-08-25 15:38:39 -07004840 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004841
4842 bp->current_interval = SERDES_AN_TIMEOUT;
4843 bp->serdes_an_pending = 1;
4844 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004845 }
4846
4847 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4848 bmcr &= ~BMCR_LOOPBACK;
4849 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4850
Michael Chanc770a652005-08-25 15:38:39 -07004851 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004852
4853 return 0;
4854}
4855
4856static int
4857bnx2_get_eeprom_len(struct net_device *dev)
4858{
Michael Chan972ec0d2006-01-23 16:12:43 -08004859 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004860
Michael Chan1122db72006-01-23 16:11:42 -08004861 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004862 return 0;
4863
Michael Chan1122db72006-01-23 16:11:42 -08004864 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004865}
4866
4867static int
4868bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4869 u8 *eebuf)
4870{
Michael Chan972ec0d2006-01-23 16:12:43 -08004871 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004872 int rc;
4873
John W. Linville1064e942005-11-10 12:58:24 -08004874 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004875
4876 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4877
4878 return rc;
4879}
4880
4881static int
4882bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4883 u8 *eebuf)
4884{
Michael Chan972ec0d2006-01-23 16:12:43 -08004885 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004886 int rc;
4887
John W. Linville1064e942005-11-10 12:58:24 -08004888 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004889
4890 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4891
4892 return rc;
4893}
4894
4895static int
4896bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4897{
Michael Chan972ec0d2006-01-23 16:12:43 -08004898 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004899
4900 memset(coal, 0, sizeof(struct ethtool_coalesce));
4901
4902 coal->rx_coalesce_usecs = bp->rx_ticks;
4903 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4904 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4905 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4906
4907 coal->tx_coalesce_usecs = bp->tx_ticks;
4908 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4909 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4910 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4911
4912 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4913
4914 return 0;
4915}
4916
4917static int
4918bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4919{
Michael Chan972ec0d2006-01-23 16:12:43 -08004920 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004921
4922 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4923 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4924
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004925 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07004926 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4927
4928 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4929 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4930
4931 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4932 if (bp->rx_quick_cons_trip_int > 0xff)
4933 bp->rx_quick_cons_trip_int = 0xff;
4934
4935 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4936 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4937
4938 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4939 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4940
4941 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4942 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4943
4944 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4945 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4946 0xff;
4947
4948 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4949 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4950 bp->stats_ticks &= 0xffff00;
4951
4952 if (netif_running(bp->dev)) {
4953 bnx2_netif_stop(bp);
4954 bnx2_init_nic(bp);
4955 bnx2_netif_start(bp);
4956 }
4957
4958 return 0;
4959}
4960
4961static void
4962bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4963{
Michael Chan972ec0d2006-01-23 16:12:43 -08004964 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004965
Michael Chan13daffa2006-03-20 17:49:20 -08004966 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07004967 ering->rx_mini_max_pending = 0;
4968 ering->rx_jumbo_max_pending = 0;
4969
4970 ering->rx_pending = bp->rx_ring_size;
4971 ering->rx_mini_pending = 0;
4972 ering->rx_jumbo_pending = 0;
4973
4974 ering->tx_max_pending = MAX_TX_DESC_CNT;
4975 ering->tx_pending = bp->tx_ring_size;
4976}
4977
4978static int
4979bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4980{
Michael Chan972ec0d2006-01-23 16:12:43 -08004981 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004982
Michael Chan13daffa2006-03-20 17:49:20 -08004983 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07004984 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4985 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4986
4987 return -EINVAL;
4988 }
Michael Chan13daffa2006-03-20 17:49:20 -08004989 if (netif_running(bp->dev)) {
4990 bnx2_netif_stop(bp);
4991 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4992 bnx2_free_skbs(bp);
4993 bnx2_free_mem(bp);
4994 }
4995
4996 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07004997 bp->tx_ring_size = ering->tx_pending;
4998
4999 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005000 int rc;
5001
5002 rc = bnx2_alloc_mem(bp);
5003 if (rc)
5004 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005005 bnx2_init_nic(bp);
5006 bnx2_netif_start(bp);
5007 }
5008
5009 return 0;
5010}
5011
5012static void
5013bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5014{
Michael Chan972ec0d2006-01-23 16:12:43 -08005015 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005016
5017 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5018 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5019 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5020}
5021
5022static int
5023bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5024{
Michael Chan972ec0d2006-01-23 16:12:43 -08005025 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005026
5027 bp->req_flow_ctrl = 0;
5028 if (epause->rx_pause)
5029 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5030 if (epause->tx_pause)
5031 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5032
5033 if (epause->autoneg) {
5034 bp->autoneg |= AUTONEG_FLOW_CTRL;
5035 }
5036 else {
5037 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5038 }
5039
Michael Chanc770a652005-08-25 15:38:39 -07005040 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005041
5042 bnx2_setup_phy(bp);
5043
Michael Chanc770a652005-08-25 15:38:39 -07005044 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005045
5046 return 0;
5047}
5048
5049static u32
5050bnx2_get_rx_csum(struct net_device *dev)
5051{
Michael Chan972ec0d2006-01-23 16:12:43 -08005052 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005053
5054 return bp->rx_csum;
5055}
5056
5057static int
5058bnx2_set_rx_csum(struct net_device *dev, u32 data)
5059{
Michael Chan972ec0d2006-01-23 16:12:43 -08005060 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005061
5062 bp->rx_csum = data;
5063 return 0;
5064}
5065
Michael Chanb11d6212006-06-29 12:31:21 -07005066static int
5067bnx2_set_tso(struct net_device *dev, u32 data)
5068{
5069 if (data)
5070 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5071 else
5072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5073 return 0;
5074}
5075
Michael Chancea94db2006-06-12 22:16:13 -07005076#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005077
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005078static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005079 char string[ETH_GSTRING_LEN];
5080} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5081 { "rx_bytes" },
5082 { "rx_error_bytes" },
5083 { "tx_bytes" },
5084 { "tx_error_bytes" },
5085 { "rx_ucast_packets" },
5086 { "rx_mcast_packets" },
5087 { "rx_bcast_packets" },
5088 { "tx_ucast_packets" },
5089 { "tx_mcast_packets" },
5090 { "tx_bcast_packets" },
5091 { "tx_mac_errors" },
5092 { "tx_carrier_errors" },
5093 { "rx_crc_errors" },
5094 { "rx_align_errors" },
5095 { "tx_single_collisions" },
5096 { "tx_multi_collisions" },
5097 { "tx_deferred" },
5098 { "tx_excess_collisions" },
5099 { "tx_late_collisions" },
5100 { "tx_total_collisions" },
5101 { "rx_fragments" },
5102 { "rx_jabbers" },
5103 { "rx_undersize_packets" },
5104 { "rx_oversize_packets" },
5105 { "rx_64_byte_packets" },
5106 { "rx_65_to_127_byte_packets" },
5107 { "rx_128_to_255_byte_packets" },
5108 { "rx_256_to_511_byte_packets" },
5109 { "rx_512_to_1023_byte_packets" },
5110 { "rx_1024_to_1522_byte_packets" },
5111 { "rx_1523_to_9022_byte_packets" },
5112 { "tx_64_byte_packets" },
5113 { "tx_65_to_127_byte_packets" },
5114 { "tx_128_to_255_byte_packets" },
5115 { "tx_256_to_511_byte_packets" },
5116 { "tx_512_to_1023_byte_packets" },
5117 { "tx_1024_to_1522_byte_packets" },
5118 { "tx_1523_to_9022_byte_packets" },
5119 { "rx_xon_frames" },
5120 { "rx_xoff_frames" },
5121 { "tx_xon_frames" },
5122 { "tx_xoff_frames" },
5123 { "rx_mac_ctrl_frames" },
5124 { "rx_filtered_packets" },
5125 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005126 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005127};
5128
5129#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5130
Arjan van de Venf71e1302006-03-03 21:33:57 -05005131static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005132 STATS_OFFSET32(stat_IfHCInOctets_hi),
5133 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5134 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5135 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5136 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5137 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5138 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5139 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5140 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5141 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5142 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005143 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5144 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5145 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5146 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5147 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5148 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5149 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5150 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5151 STATS_OFFSET32(stat_EtherStatsCollisions),
5152 STATS_OFFSET32(stat_EtherStatsFragments),
5153 STATS_OFFSET32(stat_EtherStatsJabbers),
5154 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5155 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5156 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5157 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5158 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5159 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5160 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5161 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5162 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5163 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5164 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5165 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5166 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5167 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5168 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5169 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5170 STATS_OFFSET32(stat_XonPauseFramesReceived),
5171 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5172 STATS_OFFSET32(stat_OutXonSent),
5173 STATS_OFFSET32(stat_OutXoffSent),
5174 STATS_OFFSET32(stat_MacControlFramesReceived),
5175 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5176 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005177 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005178};
5179
5180/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5181 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005182 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005183static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005184 8,0,8,8,8,8,8,8,8,8,
5185 4,0,4,4,4,4,4,4,4,4,
5186 4,4,4,4,4,4,4,4,4,4,
5187 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005188 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005189};
5190
Michael Chan5b0c76a2005-11-04 08:45:49 -08005191static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5192 8,0,8,8,8,8,8,8,8,8,
5193 4,4,4,4,4,4,4,4,4,4,
5194 4,4,4,4,4,4,4,4,4,4,
5195 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005196 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005197};
5198
Michael Chanb6016b72005-05-26 13:03:09 -07005199#define BNX2_NUM_TESTS 6
5200
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005201static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005202 char string[ETH_GSTRING_LEN];
5203} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5204 { "register_test (offline)" },
5205 { "memory_test (offline)" },
5206 { "loopback_test (offline)" },
5207 { "nvram_test (online)" },
5208 { "interrupt_test (online)" },
5209 { "link_test (online)" },
5210};
5211
5212static int
5213bnx2_self_test_count(struct net_device *dev)
5214{
5215 return BNX2_NUM_TESTS;
5216}
5217
5218static void
5219bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5220{
Michael Chan972ec0d2006-01-23 16:12:43 -08005221 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005222
5223 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5224 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005225 int i;
5226
Michael Chanb6016b72005-05-26 13:03:09 -07005227 bnx2_netif_stop(bp);
5228 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5229 bnx2_free_skbs(bp);
5230
5231 if (bnx2_test_registers(bp) != 0) {
5232 buf[0] = 1;
5233 etest->flags |= ETH_TEST_FL_FAILED;
5234 }
5235 if (bnx2_test_memory(bp) != 0) {
5236 buf[1] = 1;
5237 etest->flags |= ETH_TEST_FL_FAILED;
5238 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005239 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005240 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005241
5242 if (!netif_running(bp->dev)) {
5243 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5244 }
5245 else {
5246 bnx2_init_nic(bp);
5247 bnx2_netif_start(bp);
5248 }
5249
5250 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005251 for (i = 0; i < 7; i++) {
5252 if (bp->link_up)
5253 break;
5254 msleep_interruptible(1000);
5255 }
Michael Chanb6016b72005-05-26 13:03:09 -07005256 }
5257
5258 if (bnx2_test_nvram(bp) != 0) {
5259 buf[3] = 1;
5260 etest->flags |= ETH_TEST_FL_FAILED;
5261 }
5262 if (bnx2_test_intr(bp) != 0) {
5263 buf[4] = 1;
5264 etest->flags |= ETH_TEST_FL_FAILED;
5265 }
5266
5267 if (bnx2_test_link(bp) != 0) {
5268 buf[5] = 1;
5269 etest->flags |= ETH_TEST_FL_FAILED;
5270
5271 }
5272}
5273
5274static void
5275bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5276{
5277 switch (stringset) {
5278 case ETH_SS_STATS:
5279 memcpy(buf, bnx2_stats_str_arr,
5280 sizeof(bnx2_stats_str_arr));
5281 break;
5282 case ETH_SS_TEST:
5283 memcpy(buf, bnx2_tests_str_arr,
5284 sizeof(bnx2_tests_str_arr));
5285 break;
5286 }
5287}
5288
5289static int
5290bnx2_get_stats_count(struct net_device *dev)
5291{
5292 return BNX2_NUM_STATS;
5293}
5294
5295static void
5296bnx2_get_ethtool_stats(struct net_device *dev,
5297 struct ethtool_stats *stats, u64 *buf)
5298{
Michael Chan972ec0d2006-01-23 16:12:43 -08005299 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005300 int i;
5301 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005302 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005303
5304 if (hw_stats == NULL) {
5305 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5306 return;
5307 }
5308
Michael Chan5b0c76a2005-11-04 08:45:49 -08005309 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5310 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5311 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5312 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005313 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005314 else
5315 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005316
5317 for (i = 0; i < BNX2_NUM_STATS; i++) {
5318 if (stats_len_arr[i] == 0) {
5319 /* skip this counter */
5320 buf[i] = 0;
5321 continue;
5322 }
5323 if (stats_len_arr[i] == 4) {
5324 /* 4-byte counter */
5325 buf[i] = (u64)
5326 *(hw_stats + bnx2_stats_offset_arr[i]);
5327 continue;
5328 }
5329 /* 8-byte counter */
5330 buf[i] = (((u64) *(hw_stats +
5331 bnx2_stats_offset_arr[i])) << 32) +
5332 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5333 }
5334}
5335
5336static int
5337bnx2_phys_id(struct net_device *dev, u32 data)
5338{
Michael Chan972ec0d2006-01-23 16:12:43 -08005339 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005340 int i;
5341 u32 save;
5342
5343 if (data == 0)
5344 data = 2;
5345
5346 save = REG_RD(bp, BNX2_MISC_CFG);
5347 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5348
5349 for (i = 0; i < (data * 2); i++) {
5350 if ((i % 2) == 0) {
5351 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5352 }
5353 else {
5354 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5355 BNX2_EMAC_LED_1000MB_OVERRIDE |
5356 BNX2_EMAC_LED_100MB_OVERRIDE |
5357 BNX2_EMAC_LED_10MB_OVERRIDE |
5358 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5359 BNX2_EMAC_LED_TRAFFIC);
5360 }
5361 msleep_interruptible(500);
5362 if (signal_pending(current))
5363 break;
5364 }
5365 REG_WR(bp, BNX2_EMAC_LED, 0);
5366 REG_WR(bp, BNX2_MISC_CFG, save);
5367 return 0;
5368}
5369
Jeff Garzik7282d492006-09-13 14:30:00 -04005370static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005371 .get_settings = bnx2_get_settings,
5372 .set_settings = bnx2_set_settings,
5373 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005374 .get_regs_len = bnx2_get_regs_len,
5375 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005376 .get_wol = bnx2_get_wol,
5377 .set_wol = bnx2_set_wol,
5378 .nway_reset = bnx2_nway_reset,
5379 .get_link = ethtool_op_get_link,
5380 .get_eeprom_len = bnx2_get_eeprom_len,
5381 .get_eeprom = bnx2_get_eeprom,
5382 .set_eeprom = bnx2_set_eeprom,
5383 .get_coalesce = bnx2_get_coalesce,
5384 .set_coalesce = bnx2_set_coalesce,
5385 .get_ringparam = bnx2_get_ringparam,
5386 .set_ringparam = bnx2_set_ringparam,
5387 .get_pauseparam = bnx2_get_pauseparam,
5388 .set_pauseparam = bnx2_set_pauseparam,
5389 .get_rx_csum = bnx2_get_rx_csum,
5390 .set_rx_csum = bnx2_set_rx_csum,
5391 .get_tx_csum = ethtool_op_get_tx_csum,
5392 .set_tx_csum = ethtool_op_set_tx_csum,
5393 .get_sg = ethtool_op_get_sg,
5394 .set_sg = ethtool_op_set_sg,
5395#ifdef BCM_TSO
5396 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005397 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005398#endif
5399 .self_test_count = bnx2_self_test_count,
5400 .self_test = bnx2_self_test,
5401 .get_strings = bnx2_get_strings,
5402 .phys_id = bnx2_phys_id,
5403 .get_stats_count = bnx2_get_stats_count,
5404 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005405 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005406};
5407
5408/* Called with rtnl_lock */
5409static int
5410bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5411{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005412 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005413 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005414 int err;
5415
5416 switch(cmd) {
5417 case SIOCGMIIPHY:
5418 data->phy_id = bp->phy_addr;
5419
5420 /* fallthru */
5421 case SIOCGMIIREG: {
5422 u32 mii_regval;
5423
Michael Chanc770a652005-08-25 15:38:39 -07005424 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005425 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005426 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005427
5428 data->val_out = mii_regval;
5429
5430 return err;
5431 }
5432
5433 case SIOCSMIIREG:
5434 if (!capable(CAP_NET_ADMIN))
5435 return -EPERM;
5436
Michael Chanc770a652005-08-25 15:38:39 -07005437 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005438 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005439 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005440
5441 return err;
5442
5443 default:
5444 /* do nothing */
5445 break;
5446 }
5447 return -EOPNOTSUPP;
5448}
5449
5450/* Called with rtnl_lock */
5451static int
5452bnx2_change_mac_addr(struct net_device *dev, void *p)
5453{
5454 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005455 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005456
Michael Chan73eef4c2005-08-25 15:39:15 -07005457 if (!is_valid_ether_addr(addr->sa_data))
5458 return -EINVAL;
5459
Michael Chanb6016b72005-05-26 13:03:09 -07005460 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5461 if (netif_running(dev))
5462 bnx2_set_mac_addr(bp);
5463
5464 return 0;
5465}
5466
5467/* Called with rtnl_lock */
5468static int
5469bnx2_change_mtu(struct net_device *dev, int new_mtu)
5470{
Michael Chan972ec0d2006-01-23 16:12:43 -08005471 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005472
5473 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5474 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5475 return -EINVAL;
5476
5477 dev->mtu = new_mtu;
5478 if (netif_running(dev)) {
5479 bnx2_netif_stop(bp);
5480
5481 bnx2_init_nic(bp);
5482
5483 bnx2_netif_start(bp);
5484 }
5485 return 0;
5486}
5487
5488#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5489static void
5490poll_bnx2(struct net_device *dev)
5491{
Michael Chan972ec0d2006-01-23 16:12:43 -08005492 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005493
5494 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005495 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005496 enable_irq(bp->pdev->irq);
5497}
5498#endif
5499
5500static int __devinit
5501bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5502{
5503 struct bnx2 *bp;
5504 unsigned long mem_len;
5505 int rc;
5506 u32 reg;
5507
5508 SET_MODULE_OWNER(dev);
5509 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005510 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005511
5512 bp->flags = 0;
5513 bp->phy_flags = 0;
5514
5515 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5516 rc = pci_enable_device(pdev);
5517 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005518 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005519 goto err_out;
5520 }
5521
5522 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005523 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005524 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005525 rc = -ENODEV;
5526 goto err_out_disable;
5527 }
5528
5529 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5530 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005531 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005532 goto err_out_disable;
5533 }
5534
5535 pci_set_master(pdev);
5536
5537 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5538 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005539 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005540 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005541 rc = -EIO;
5542 goto err_out_release;
5543 }
5544
5545 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5546 if (bp->pcix_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005547 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005548 rc = -EIO;
5549 goto err_out_release;
5550 }
5551
5552 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5553 bp->flags |= USING_DAC_FLAG;
5554 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005555 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005556 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005557 rc = -EIO;
5558 goto err_out_release;
5559 }
5560 }
5561 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005562 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005563 rc = -EIO;
5564 goto err_out_release;
5565 }
5566
5567 bp->dev = dev;
5568 bp->pdev = pdev;
5569
5570 spin_lock_init(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005571 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5572
5573 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5574 mem_len = MB_GET_CID_ADDR(17);
5575 dev->mem_end = dev->mem_start + mem_len;
5576 dev->irq = pdev->irq;
5577
5578 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5579
5580 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005581 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005582 rc = -ENOMEM;
5583 goto err_out_release;
5584 }
5585
5586 /* Configure byte swap and enable write to the reg_window registers.
5587 * Rely on CPU to do target byte swapping on big endian systems
5588 * The chip's target access swapping will not swap all accesses
5589 */
5590 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5591 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5592 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5593
Pavel Machek829ca9a2005-09-03 15:56:56 -07005594 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005595
5596 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5597
Michael Chanb6016b72005-05-26 13:03:09 -07005598 /* Get bus information. */
5599 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5600 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5601 u32 clkreg;
5602
5603 bp->flags |= PCIX_FLAG;
5604
5605 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005606
Michael Chanb6016b72005-05-26 13:03:09 -07005607 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5608 switch (clkreg) {
5609 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5610 bp->bus_speed_mhz = 133;
5611 break;
5612
5613 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5614 bp->bus_speed_mhz = 100;
5615 break;
5616
5617 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5618 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5619 bp->bus_speed_mhz = 66;
5620 break;
5621
5622 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5623 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5624 bp->bus_speed_mhz = 50;
5625 break;
5626
5627 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5628 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5629 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5630 bp->bus_speed_mhz = 33;
5631 break;
5632 }
5633 }
5634 else {
5635 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5636 bp->bus_speed_mhz = 66;
5637 else
5638 bp->bus_speed_mhz = 33;
5639 }
5640
5641 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5642 bp->flags |= PCI_32BIT_FLAG;
5643
5644 /* 5706A0 may falsely detect SERR and PERR. */
5645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5646 reg = REG_RD(bp, PCI_COMMAND);
5647 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5648 REG_WR(bp, PCI_COMMAND, reg);
5649 }
5650 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5651 !(bp->flags & PCIX_FLAG)) {
5652
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005653 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005654 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005655 goto err_out_unmap;
5656 }
5657
5658 bnx2_init_nvram(bp);
5659
Michael Chane3648b32005-11-04 08:51:21 -08005660 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5661
5662 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5663 BNX2_SHM_HDR_SIGNATURE_SIG)
5664 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5665 else
5666 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5667
Michael Chanb6016b72005-05-26 13:03:09 -07005668 /* Get the permanent MAC address. First we need to make sure the
5669 * firmware is actually running.
5670 */
Michael Chane3648b32005-11-04 08:51:21 -08005671 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005672
5673 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5674 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005675 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005676 rc = -ENODEV;
5677 goto err_out_unmap;
5678 }
5679
Michael Chane3648b32005-11-04 08:51:21 -08005680 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005681
Michael Chane3648b32005-11-04 08:51:21 -08005682 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005683 bp->mac_addr[0] = (u8) (reg >> 8);
5684 bp->mac_addr[1] = (u8) reg;
5685
Michael Chane3648b32005-11-04 08:51:21 -08005686 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005687 bp->mac_addr[2] = (u8) (reg >> 24);
5688 bp->mac_addr[3] = (u8) (reg >> 16);
5689 bp->mac_addr[4] = (u8) (reg >> 8);
5690 bp->mac_addr[5] = (u8) reg;
5691
5692 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005693 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005694
5695 bp->rx_csum = 1;
5696
5697 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5698
5699 bp->tx_quick_cons_trip_int = 20;
5700 bp->tx_quick_cons_trip = 20;
5701 bp->tx_ticks_int = 80;
5702 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005703
Michael Chanb6016b72005-05-26 13:03:09 -07005704 bp->rx_quick_cons_trip_int = 6;
5705 bp->rx_quick_cons_trip = 6;
5706 bp->rx_ticks_int = 18;
5707 bp->rx_ticks = 18;
5708
5709 bp->stats_ticks = 1000000 & 0xffff00;
5710
5711 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005712 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005713
Michael Chan5b0c76a2005-11-04 08:45:49 -08005714 bp->phy_addr = 1;
5715
Michael Chanb6016b72005-05-26 13:03:09 -07005716 /* Disable WOL support if we are running on a SERDES chip. */
5717 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5718 bp->phy_flags |= PHY_SERDES_FLAG;
5719 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005720 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5721 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005722 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005723 BNX2_SHARED_HW_CFG_CONFIG);
5724 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5725 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5726 }
Michael Chanb6016b72005-05-26 13:03:09 -07005727 }
5728
Michael Chan16088272006-06-12 22:16:43 -07005729 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5730 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5731 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005732 bp->flags |= NO_WOL_FLAG;
5733
Michael Chanb6016b72005-05-26 13:03:09 -07005734 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5735 bp->tx_quick_cons_trip_int =
5736 bp->tx_quick_cons_trip;
5737 bp->tx_ticks_int = bp->tx_ticks;
5738 bp->rx_quick_cons_trip_int =
5739 bp->rx_quick_cons_trip;
5740 bp->rx_ticks_int = bp->rx_ticks;
5741 bp->comp_prod_trip_int = bp->comp_prod_trip;
5742 bp->com_ticks_int = bp->com_ticks;
5743 bp->cmd_ticks_int = bp->cmd_ticks;
5744 }
5745
Michael Chanf9317a42006-09-29 17:06:23 -07005746 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5747 *
5748 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5749 * with byte enables disabled on the unused 32-bit word. This is legal
5750 * but causes problems on the AMD 8132 which will eventually stop
5751 * responding after a while.
5752 *
5753 * AMD believes this incompatibility is unique to the 5706, and
5754 * prefers to locally disable MSI rather than globally disabling it
5755 * using pci_msi_quirk.
5756 */
5757 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5758 struct pci_dev *amd_8132 = NULL;
5759
5760 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5761 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5762 amd_8132))) {
5763 u8 rev;
5764
5765 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5766 if (rev >= 0x10 && rev <= 0x13) {
5767 disable_msi = 1;
5768 pci_dev_put(amd_8132);
5769 break;
5770 }
5771 }
5772 }
5773
Michael Chanb6016b72005-05-26 13:03:09 -07005774 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5775 bp->req_line_speed = 0;
5776 if (bp->phy_flags & PHY_SERDES_FLAG) {
5777 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005778
Michael Chane3648b32005-11-04 08:51:21 -08005779 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005780 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5781 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5782 bp->autoneg = 0;
5783 bp->req_line_speed = bp->line_speed = SPEED_1000;
5784 bp->req_duplex = DUPLEX_FULL;
5785 }
Michael Chanb6016b72005-05-26 13:03:09 -07005786 }
5787 else {
5788 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5789 }
5790
5791 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5792
Michael Chancd339a02005-08-25 15:35:24 -07005793 init_timer(&bp->timer);
5794 bp->timer.expires = RUN_AT(bp->timer_interval);
5795 bp->timer.data = (unsigned long) bp;
5796 bp->timer.function = bnx2_timer;
5797
Michael Chanb6016b72005-05-26 13:03:09 -07005798 return 0;
5799
5800err_out_unmap:
5801 if (bp->regview) {
5802 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005803 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005804 }
5805
5806err_out_release:
5807 pci_release_regions(pdev);
5808
5809err_out_disable:
5810 pci_disable_device(pdev);
5811 pci_set_drvdata(pdev, NULL);
5812
5813err_out:
5814 return rc;
5815}
5816
5817static int __devinit
5818bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5819{
5820 static int version_printed = 0;
5821 struct net_device *dev = NULL;
5822 struct bnx2 *bp;
5823 int rc, i;
5824
5825 if (version_printed++ == 0)
5826 printk(KERN_INFO "%s", version);
5827
5828 /* dev zeroed in init_etherdev */
5829 dev = alloc_etherdev(sizeof(*bp));
5830
5831 if (!dev)
5832 return -ENOMEM;
5833
5834 rc = bnx2_init_board(pdev, dev);
5835 if (rc < 0) {
5836 free_netdev(dev);
5837 return rc;
5838 }
5839
5840 dev->open = bnx2_open;
5841 dev->hard_start_xmit = bnx2_start_xmit;
5842 dev->stop = bnx2_close;
5843 dev->get_stats = bnx2_get_stats;
5844 dev->set_multicast_list = bnx2_set_rx_mode;
5845 dev->do_ioctl = bnx2_ioctl;
5846 dev->set_mac_address = bnx2_change_mac_addr;
5847 dev->change_mtu = bnx2_change_mtu;
5848 dev->tx_timeout = bnx2_tx_timeout;
5849 dev->watchdog_timeo = TX_TIMEOUT;
5850#ifdef BCM_VLAN
5851 dev->vlan_rx_register = bnx2_vlan_rx_register;
5852 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5853#endif
5854 dev->poll = bnx2_poll;
5855 dev->ethtool_ops = &bnx2_ethtool_ops;
5856 dev->weight = 64;
5857
Michael Chan972ec0d2006-01-23 16:12:43 -08005858 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005859
5860#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5861 dev->poll_controller = poll_bnx2;
5862#endif
5863
5864 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005865 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005866 if (bp->regview)
5867 iounmap(bp->regview);
5868 pci_release_regions(pdev);
5869 pci_disable_device(pdev);
5870 pci_set_drvdata(pdev, NULL);
5871 free_netdev(dev);
5872 return rc;
5873 }
5874
5875 pci_set_drvdata(pdev, dev);
5876
5877 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07005878 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07005879 bp->name = board_info[ent->driver_data].name,
5880 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5881 "IRQ %d, ",
5882 dev->name,
5883 bp->name,
5884 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5885 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5886 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5887 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5888 bp->bus_speed_mhz,
5889 dev->base_addr,
5890 bp->pdev->irq);
5891
5892 printk("node addr ");
5893 for (i = 0; i < 6; i++)
5894 printk("%2.2x", dev->dev_addr[i]);
5895 printk("\n");
5896
5897 dev->features |= NETIF_F_SG;
5898 if (bp->flags & USING_DAC_FLAG)
5899 dev->features |= NETIF_F_HIGHDMA;
5900 dev->features |= NETIF_F_IP_CSUM;
5901#ifdef BCM_VLAN
5902 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5903#endif
5904#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07005905 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07005906#endif
5907
5908 netif_carrier_off(bp->dev);
5909
5910 return 0;
5911}
5912
5913static void __devexit
5914bnx2_remove_one(struct pci_dev *pdev)
5915{
5916 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005917 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005918
Michael Chanafdc08b2005-08-25 15:34:29 -07005919 flush_scheduled_work();
5920
Michael Chanb6016b72005-05-26 13:03:09 -07005921 unregister_netdev(dev);
5922
5923 if (bp->regview)
5924 iounmap(bp->regview);
5925
5926 free_netdev(dev);
5927 pci_release_regions(pdev);
5928 pci_disable_device(pdev);
5929 pci_set_drvdata(pdev, NULL);
5930}
5931
5932static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07005933bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07005934{
5935 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005936 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005937 u32 reset_code;
5938
5939 if (!netif_running(dev))
5940 return 0;
5941
Michael Chan1d60290f2006-03-20 17:50:08 -08005942 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07005943 bnx2_netif_stop(bp);
5944 netif_device_detach(dev);
5945 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005946 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07005947 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005948 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005949 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5950 else
5951 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5952 bnx2_reset_chip(bp, reset_code);
5953 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005954 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07005955 return 0;
5956}
5957
5958static int
5959bnx2_resume(struct pci_dev *pdev)
5960{
5961 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005962 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005963
5964 if (!netif_running(dev))
5965 return 0;
5966
Pavel Machek829ca9a2005-09-03 15:56:56 -07005967 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005968 netif_device_attach(dev);
5969 bnx2_init_nic(bp);
5970 bnx2_netif_start(bp);
5971 return 0;
5972}
5973
5974static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005975 .name = DRV_MODULE_NAME,
5976 .id_table = bnx2_pci_tbl,
5977 .probe = bnx2_init_one,
5978 .remove = __devexit_p(bnx2_remove_one),
5979 .suspend = bnx2_suspend,
5980 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07005981};
5982
5983static int __init bnx2_init(void)
5984{
Jeff Garzik29917622006-08-19 17:48:59 -04005985 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07005986}
5987
5988static void __exit bnx2_cleanup(void)
5989{
5990 pci_unregister_driver(&bnx2_pci_driver);
5991}
5992
5993module_init(bnx2_init);
5994module_exit(bnx2_cleanup);
5995
5996
5997