blob: 52fe620e1a4c11e8d528310ae72deefdd9955087 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
56
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chanf9317a42006-09-29 17:06:23 -070059#define DRV_MODULE_VERSION "1.4.45"
60#define DRV_MODULE_RELDATE "September 29, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -0700118 { 0, }
119};
120
121static struct flash_spec flash_table[] =
122{
123 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700206};
207
208MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
Michael Chane89bbf12005-08-25 15:36:58 -0700210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{
Michael Chan2f8af122006-08-15 01:39:10 -0700212 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700213
Michael Chan2f8af122006-08-15 01:39:10 -0700214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
Michael Chane89bbf12005-08-25 15:36:58 -0700216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219}
220
Michael Chanb6016b72005-05-26 13:03:09 -0700221static u32
222bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223{
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226}
227
228static void
229bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230{
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233}
234
235static void
236bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237{
238 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800239 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
240 int i;
241
242 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
243 REG_WR(bp, BNX2_CTX_CTX_CTRL,
244 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
245 for (i = 0; i < 5; i++) {
246 u32 val;
247 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
248 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
249 break;
250 udelay(5);
251 }
252 } else {
253 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
254 REG_WR(bp, BNX2_CTX_DATA, val);
255 }
Michael Chanb6016b72005-05-26 13:03:09 -0700256}
257
258static int
259bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
260{
261 u32 val1;
262 int i, ret;
263
264 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
265 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
266 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
267
268 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
269 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
270
271 udelay(40);
272 }
273
274 val1 = (bp->phy_addr << 21) | (reg << 16) |
275 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
276 BNX2_EMAC_MDIO_COMM_START_BUSY;
277 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
278
279 for (i = 0; i < 50; i++) {
280 udelay(10);
281
282 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
283 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
284 udelay(5);
285
286 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
287 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
288
289 break;
290 }
291 }
292
293 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
294 *val = 0x0;
295 ret = -EBUSY;
296 }
297 else {
298 *val = val1;
299 ret = 0;
300 }
301
302 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
303 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
304 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
305
306 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
307 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308
309 udelay(40);
310 }
311
312 return ret;
313}
314
315static int
316bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
317{
318 u32 val1;
319 int i, ret;
320
321 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
322 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
323 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
324
325 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
326 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327
328 udelay(40);
329 }
330
331 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
332 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
333 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
334 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400335
Michael Chanb6016b72005-05-26 13:03:09 -0700336 for (i = 0; i < 50; i++) {
337 udelay(10);
338
339 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
340 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
341 udelay(5);
342 break;
343 }
344 }
345
346 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
347 ret = -EBUSY;
348 else
349 ret = 0;
350
351 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
352 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
354
355 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
356 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
357
358 udelay(40);
359 }
360
361 return ret;
362}
363
364static void
365bnx2_disable_int(struct bnx2 *bp)
366{
367 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
368 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
369 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
370}
371
372static void
373bnx2_enable_int(struct bnx2 *bp)
374{
Michael Chanb6016b72005-05-26 13:03:09 -0700375 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800376 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
378
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700380 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
381
Michael Chanbf5295b2006-03-23 01:11:56 -0800382 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700383}
384
385static void
386bnx2_disable_int_sync(struct bnx2 *bp)
387{
388 atomic_inc(&bp->intr_sem);
389 bnx2_disable_int(bp);
390 synchronize_irq(bp->pdev->irq);
391}
392
393static void
394bnx2_netif_stop(struct bnx2 *bp)
395{
396 bnx2_disable_int_sync(bp);
397 if (netif_running(bp->dev)) {
398 netif_poll_disable(bp->dev);
399 netif_tx_disable(bp->dev);
400 bp->dev->trans_start = jiffies; /* prevent tx timeout */
401 }
402}
403
404static void
405bnx2_netif_start(struct bnx2 *bp)
406{
407 if (atomic_dec_and_test(&bp->intr_sem)) {
408 if (netif_running(bp->dev)) {
409 netif_wake_queue(bp->dev);
410 netif_poll_enable(bp->dev);
411 bnx2_enable_int(bp);
412 }
413 }
414}
415
416static void
417bnx2_free_mem(struct bnx2 *bp)
418{
Michael Chan13daffa2006-03-20 17:49:20 -0800419 int i;
420
Michael Chan59b47d82006-11-19 14:10:45 -0800421 for (i = 0; i < bp->ctx_pages; i++) {
422 if (bp->ctx_blk[i]) {
423 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
424 bp->ctx_blk[i],
425 bp->ctx_blk_mapping[i]);
426 bp->ctx_blk[i] = NULL;
427 }
428 }
Michael Chanb6016b72005-05-26 13:03:09 -0700429 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800430 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700431 bp->status_blk, bp->status_blk_mapping);
432 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800433 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700434 }
435 if (bp->tx_desc_ring) {
436 pci_free_consistent(bp->pdev,
437 sizeof(struct tx_bd) * TX_DESC_CNT,
438 bp->tx_desc_ring, bp->tx_desc_mapping);
439 bp->tx_desc_ring = NULL;
440 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400441 kfree(bp->tx_buf_ring);
442 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800443 for (i = 0; i < bp->rx_max_ring; i++) {
444 if (bp->rx_desc_ring[i])
445 pci_free_consistent(bp->pdev,
446 sizeof(struct rx_bd) * RX_DESC_CNT,
447 bp->rx_desc_ring[i],
448 bp->rx_desc_mapping[i]);
449 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700450 }
Michael Chan13daffa2006-03-20 17:49:20 -0800451 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400452 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700453}
454
455static int
456bnx2_alloc_mem(struct bnx2 *bp)
457{
Michael Chan0f31f992006-03-23 01:12:38 -0800458 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800459
Michael Chan0f31f992006-03-23 01:12:38 -0800460 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
461 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700462 if (bp->tx_buf_ring == NULL)
463 return -ENOMEM;
464
Michael Chanb6016b72005-05-26 13:03:09 -0700465 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
466 sizeof(struct tx_bd) *
467 TX_DESC_CNT,
468 &bp->tx_desc_mapping);
469 if (bp->tx_desc_ring == NULL)
470 goto alloc_mem_err;
471
Michael Chan13daffa2006-03-20 17:49:20 -0800472 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
473 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700474 if (bp->rx_buf_ring == NULL)
475 goto alloc_mem_err;
476
Michael Chan13daffa2006-03-20 17:49:20 -0800477 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
478 bp->rx_max_ring);
479
480 for (i = 0; i < bp->rx_max_ring; i++) {
481 bp->rx_desc_ring[i] =
482 pci_alloc_consistent(bp->pdev,
483 sizeof(struct rx_bd) * RX_DESC_CNT,
484 &bp->rx_desc_mapping[i]);
485 if (bp->rx_desc_ring[i] == NULL)
486 goto alloc_mem_err;
487
488 }
Michael Chanb6016b72005-05-26 13:03:09 -0700489
Michael Chan0f31f992006-03-23 01:12:38 -0800490 /* Combine status and statistics blocks into one allocation. */
491 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
492 bp->status_stats_size = status_blk_size +
493 sizeof(struct statistics_block);
494
495 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700496 &bp->status_blk_mapping);
497 if (bp->status_blk == NULL)
498 goto alloc_mem_err;
499
Michael Chan0f31f992006-03-23 01:12:38 -0800500 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700501
Michael Chan0f31f992006-03-23 01:12:38 -0800502 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
503 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700504
Michael Chan0f31f992006-03-23 01:12:38 -0800505 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700506
Michael Chan59b47d82006-11-19 14:10:45 -0800507 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
508 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
509 if (bp->ctx_pages == 0)
510 bp->ctx_pages = 1;
511 for (i = 0; i < bp->ctx_pages; i++) {
512 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
513 BCM_PAGE_SIZE,
514 &bp->ctx_blk_mapping[i]);
515 if (bp->ctx_blk[i] == NULL)
516 goto alloc_mem_err;
517 }
518 }
Michael Chanb6016b72005-05-26 13:03:09 -0700519 return 0;
520
521alloc_mem_err:
522 bnx2_free_mem(bp);
523 return -ENOMEM;
524}
525
526static void
Michael Chane3648b32005-11-04 08:51:21 -0800527bnx2_report_fw_link(struct bnx2 *bp)
528{
529 u32 fw_link_status = 0;
530
531 if (bp->link_up) {
532 u32 bmsr;
533
534 switch (bp->line_speed) {
535 case SPEED_10:
536 if (bp->duplex == DUPLEX_HALF)
537 fw_link_status = BNX2_LINK_STATUS_10HALF;
538 else
539 fw_link_status = BNX2_LINK_STATUS_10FULL;
540 break;
541 case SPEED_100:
542 if (bp->duplex == DUPLEX_HALF)
543 fw_link_status = BNX2_LINK_STATUS_100HALF;
544 else
545 fw_link_status = BNX2_LINK_STATUS_100FULL;
546 break;
547 case SPEED_1000:
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_1000HALF;
550 else
551 fw_link_status = BNX2_LINK_STATUS_1000FULL;
552 break;
553 case SPEED_2500:
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_2500HALF;
556 else
557 fw_link_status = BNX2_LINK_STATUS_2500FULL;
558 break;
559 }
560
561 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
562
563 if (bp->autoneg) {
564 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
565
566 bnx2_read_phy(bp, MII_BMSR, &bmsr);
567 bnx2_read_phy(bp, MII_BMSR, &bmsr);
568
569 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
570 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
571 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
572 else
573 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
574 }
575 }
576 else
577 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
578
579 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
580}
581
582static void
Michael Chanb6016b72005-05-26 13:03:09 -0700583bnx2_report_link(struct bnx2 *bp)
584{
585 if (bp->link_up) {
586 netif_carrier_on(bp->dev);
587 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
588
589 printk("%d Mbps ", bp->line_speed);
590
591 if (bp->duplex == DUPLEX_FULL)
592 printk("full duplex");
593 else
594 printk("half duplex");
595
596 if (bp->flow_ctrl) {
597 if (bp->flow_ctrl & FLOW_CTRL_RX) {
598 printk(", receive ");
599 if (bp->flow_ctrl & FLOW_CTRL_TX)
600 printk("& transmit ");
601 }
602 else {
603 printk(", transmit ");
604 }
605 printk("flow control ON");
606 }
607 printk("\n");
608 }
609 else {
610 netif_carrier_off(bp->dev);
611 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
612 }
Michael Chane3648b32005-11-04 08:51:21 -0800613
614 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700615}
616
617static void
618bnx2_resolve_flow_ctrl(struct bnx2 *bp)
619{
620 u32 local_adv, remote_adv;
621
622 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400623 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700624 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
625
626 if (bp->duplex == DUPLEX_FULL) {
627 bp->flow_ctrl = bp->req_flow_ctrl;
628 }
629 return;
630 }
631
632 if (bp->duplex != DUPLEX_FULL) {
633 return;
634 }
635
Michael Chan5b0c76a2005-11-04 08:45:49 -0800636 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
637 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
638 u32 val;
639
640 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
641 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
642 bp->flow_ctrl |= FLOW_CTRL_TX;
643 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
644 bp->flow_ctrl |= FLOW_CTRL_RX;
645 return;
646 }
647
Michael Chanb6016b72005-05-26 13:03:09 -0700648 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
649 bnx2_read_phy(bp, MII_LPA, &remote_adv);
650
651 if (bp->phy_flags & PHY_SERDES_FLAG) {
652 u32 new_local_adv = 0;
653 u32 new_remote_adv = 0;
654
655 if (local_adv & ADVERTISE_1000XPAUSE)
656 new_local_adv |= ADVERTISE_PAUSE_CAP;
657 if (local_adv & ADVERTISE_1000XPSE_ASYM)
658 new_local_adv |= ADVERTISE_PAUSE_ASYM;
659 if (remote_adv & ADVERTISE_1000XPAUSE)
660 new_remote_adv |= ADVERTISE_PAUSE_CAP;
661 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
662 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
663
664 local_adv = new_local_adv;
665 remote_adv = new_remote_adv;
666 }
667
668 /* See Table 28B-3 of 802.3ab-1999 spec. */
669 if (local_adv & ADVERTISE_PAUSE_CAP) {
670 if(local_adv & ADVERTISE_PAUSE_ASYM) {
671 if (remote_adv & ADVERTISE_PAUSE_CAP) {
672 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
673 }
674 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
675 bp->flow_ctrl = FLOW_CTRL_RX;
676 }
677 }
678 else {
679 if (remote_adv & ADVERTISE_PAUSE_CAP) {
680 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
681 }
682 }
683 }
684 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
685 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
686 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
687
688 bp->flow_ctrl = FLOW_CTRL_TX;
689 }
690 }
691}
692
693static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800694bnx2_5708s_linkup(struct bnx2 *bp)
695{
696 u32 val;
697
698 bp->link_up = 1;
699 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
700 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
701 case BCM5708S_1000X_STAT1_SPEED_10:
702 bp->line_speed = SPEED_10;
703 break;
704 case BCM5708S_1000X_STAT1_SPEED_100:
705 bp->line_speed = SPEED_100;
706 break;
707 case BCM5708S_1000X_STAT1_SPEED_1G:
708 bp->line_speed = SPEED_1000;
709 break;
710 case BCM5708S_1000X_STAT1_SPEED_2G5:
711 bp->line_speed = SPEED_2500;
712 break;
713 }
714 if (val & BCM5708S_1000X_STAT1_FD)
715 bp->duplex = DUPLEX_FULL;
716 else
717 bp->duplex = DUPLEX_HALF;
718
719 return 0;
720}
721
722static int
723bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700724{
725 u32 bmcr, local_adv, remote_adv, common;
726
727 bp->link_up = 1;
728 bp->line_speed = SPEED_1000;
729
730 bnx2_read_phy(bp, MII_BMCR, &bmcr);
731 if (bmcr & BMCR_FULLDPLX) {
732 bp->duplex = DUPLEX_FULL;
733 }
734 else {
735 bp->duplex = DUPLEX_HALF;
736 }
737
738 if (!(bmcr & BMCR_ANENABLE)) {
739 return 0;
740 }
741
742 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
743 bnx2_read_phy(bp, MII_LPA, &remote_adv);
744
745 common = local_adv & remote_adv;
746 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
747
748 if (common & ADVERTISE_1000XFULL) {
749 bp->duplex = DUPLEX_FULL;
750 }
751 else {
752 bp->duplex = DUPLEX_HALF;
753 }
754 }
755
756 return 0;
757}
758
759static int
760bnx2_copper_linkup(struct bnx2 *bp)
761{
762 u32 bmcr;
763
764 bnx2_read_phy(bp, MII_BMCR, &bmcr);
765 if (bmcr & BMCR_ANENABLE) {
766 u32 local_adv, remote_adv, common;
767
768 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
769 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
770
771 common = local_adv & (remote_adv >> 2);
772 if (common & ADVERTISE_1000FULL) {
773 bp->line_speed = SPEED_1000;
774 bp->duplex = DUPLEX_FULL;
775 }
776 else if (common & ADVERTISE_1000HALF) {
777 bp->line_speed = SPEED_1000;
778 bp->duplex = DUPLEX_HALF;
779 }
780 else {
781 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
782 bnx2_read_phy(bp, MII_LPA, &remote_adv);
783
784 common = local_adv & remote_adv;
785 if (common & ADVERTISE_100FULL) {
786 bp->line_speed = SPEED_100;
787 bp->duplex = DUPLEX_FULL;
788 }
789 else if (common & ADVERTISE_100HALF) {
790 bp->line_speed = SPEED_100;
791 bp->duplex = DUPLEX_HALF;
792 }
793 else if (common & ADVERTISE_10FULL) {
794 bp->line_speed = SPEED_10;
795 bp->duplex = DUPLEX_FULL;
796 }
797 else if (common & ADVERTISE_10HALF) {
798 bp->line_speed = SPEED_10;
799 bp->duplex = DUPLEX_HALF;
800 }
801 else {
802 bp->line_speed = 0;
803 bp->link_up = 0;
804 }
805 }
806 }
807 else {
808 if (bmcr & BMCR_SPEED100) {
809 bp->line_speed = SPEED_100;
810 }
811 else {
812 bp->line_speed = SPEED_10;
813 }
814 if (bmcr & BMCR_FULLDPLX) {
815 bp->duplex = DUPLEX_FULL;
816 }
817 else {
818 bp->duplex = DUPLEX_HALF;
819 }
820 }
821
822 return 0;
823}
824
825static int
826bnx2_set_mac_link(struct bnx2 *bp)
827{
828 u32 val;
829
830 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
831 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
832 (bp->duplex == DUPLEX_HALF)) {
833 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
834 }
835
836 /* Configure the EMAC mode register. */
837 val = REG_RD(bp, BNX2_EMAC_MODE);
838
839 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800840 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800841 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700842
843 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800844 switch (bp->line_speed) {
845 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800846 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
847 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800848 break;
849 }
850 /* fall through */
851 case SPEED_100:
852 val |= BNX2_EMAC_MODE_PORT_MII;
853 break;
854 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800855 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800856 /* fall through */
857 case SPEED_1000:
858 val |= BNX2_EMAC_MODE_PORT_GMII;
859 break;
860 }
Michael Chanb6016b72005-05-26 13:03:09 -0700861 }
862 else {
863 val |= BNX2_EMAC_MODE_PORT_GMII;
864 }
865
866 /* Set the MAC to operate in the appropriate duplex mode. */
867 if (bp->duplex == DUPLEX_HALF)
868 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
869 REG_WR(bp, BNX2_EMAC_MODE, val);
870
871 /* Enable/disable rx PAUSE. */
872 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
873
874 if (bp->flow_ctrl & FLOW_CTRL_RX)
875 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
876 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
877
878 /* Enable/disable tx PAUSE. */
879 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
880 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
881
882 if (bp->flow_ctrl & FLOW_CTRL_TX)
883 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
884 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
885
886 /* Acknowledge the interrupt. */
887 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
888
889 return 0;
890}
891
892static int
893bnx2_set_link(struct bnx2 *bp)
894{
895 u32 bmsr;
896 u8 link_up;
897
Michael Chan80be4432006-11-19 14:07:28 -0800898 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700899 bp->link_up = 1;
900 return 0;
901 }
902
903 link_up = bp->link_up;
904
905 bnx2_read_phy(bp, MII_BMSR, &bmsr);
906 bnx2_read_phy(bp, MII_BMSR, &bmsr);
907
908 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
909 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
910 u32 val;
911
912 val = REG_RD(bp, BNX2_EMAC_STATUS);
913 if (val & BNX2_EMAC_STATUS_LINK)
914 bmsr |= BMSR_LSTATUS;
915 else
916 bmsr &= ~BMSR_LSTATUS;
917 }
918
919 if (bmsr & BMSR_LSTATUS) {
920 bp->link_up = 1;
921
922 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800923 if (CHIP_NUM(bp) == CHIP_NUM_5706)
924 bnx2_5706s_linkup(bp);
925 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
926 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700927 }
928 else {
929 bnx2_copper_linkup(bp);
930 }
931 bnx2_resolve_flow_ctrl(bp);
932 }
933 else {
934 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
935 (bp->autoneg & AUTONEG_SPEED)) {
936
937 u32 bmcr;
938
939 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800940 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700941 if (!(bmcr & BMCR_ANENABLE)) {
942 bnx2_write_phy(bp, MII_BMCR, bmcr |
943 BMCR_ANENABLE);
944 }
945 }
946 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
947 bp->link_up = 0;
948 }
949
950 if (bp->link_up != link_up) {
951 bnx2_report_link(bp);
952 }
953
954 bnx2_set_mac_link(bp);
955
956 return 0;
957}
958
959static int
960bnx2_reset_phy(struct bnx2 *bp)
961{
962 int i;
963 u32 reg;
964
965 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
966
967#define PHY_RESET_MAX_WAIT 100
968 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
969 udelay(10);
970
971 bnx2_read_phy(bp, MII_BMCR, &reg);
972 if (!(reg & BMCR_RESET)) {
973 udelay(20);
974 break;
975 }
976 }
977 if (i == PHY_RESET_MAX_WAIT) {
978 return -EBUSY;
979 }
980 return 0;
981}
982
983static u32
984bnx2_phy_get_pause_adv(struct bnx2 *bp)
985{
986 u32 adv = 0;
987
988 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
989 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
990
991 if (bp->phy_flags & PHY_SERDES_FLAG) {
992 adv = ADVERTISE_1000XPAUSE;
993 }
994 else {
995 adv = ADVERTISE_PAUSE_CAP;
996 }
997 }
998 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
999 if (bp->phy_flags & PHY_SERDES_FLAG) {
1000 adv = ADVERTISE_1000XPSE_ASYM;
1001 }
1002 else {
1003 adv = ADVERTISE_PAUSE_ASYM;
1004 }
1005 }
1006 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1007 if (bp->phy_flags & PHY_SERDES_FLAG) {
1008 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1009 }
1010 else {
1011 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1012 }
1013 }
1014 return adv;
1015}
1016
1017static int
1018bnx2_setup_serdes_phy(struct bnx2 *bp)
1019{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001020 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001021 u32 new_adv = 0;
1022
1023 if (!(bp->autoneg & AUTONEG_SPEED)) {
1024 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001025 int force_link_down = 0;
1026
Michael Chan80be4432006-11-19 14:07:28 -08001027 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1028 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1029
1030 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1031 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1032 new_bmcr |= BMCR_SPEED1000;
1033 if (bp->req_line_speed == SPEED_2500) {
1034 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1035 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1036 if (!(up1 & BCM5708S_UP1_2G5)) {
1037 up1 |= BCM5708S_UP1_2G5;
1038 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1039 force_link_down = 1;
1040 }
1041 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001042 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1043 if (up1 & BCM5708S_UP1_2G5) {
1044 up1 &= ~BCM5708S_UP1_2G5;
1045 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1046 force_link_down = 1;
1047 }
1048 }
1049
Michael Chanb6016b72005-05-26 13:03:09 -07001050 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001051 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001052 new_bmcr |= BMCR_FULLDPLX;
1053 }
1054 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001055 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001056 new_bmcr &= ~BMCR_FULLDPLX;
1057 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001058 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001059 /* Force a link down visible on the other side */
1060 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001061 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1062 ~(ADVERTISE_1000XFULL |
1063 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001064 bnx2_write_phy(bp, MII_BMCR, bmcr |
1065 BMCR_ANRESTART | BMCR_ANENABLE);
1066
1067 bp->link_up = 0;
1068 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001069 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001070 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001071 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001072 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001073 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1074 }
1075 return 0;
1076 }
1077
Michael Chan5b0c76a2005-11-04 08:45:49 -08001078 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1079 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1080 up1 |= BCM5708S_UP1_2G5;
1081 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1082 }
1083
Michael Chanb6016b72005-05-26 13:03:09 -07001084 if (bp->advertising & ADVERTISED_1000baseT_Full)
1085 new_adv |= ADVERTISE_1000XFULL;
1086
1087 new_adv |= bnx2_phy_get_pause_adv(bp);
1088
1089 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1090 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1091
1092 bp->serdes_an_pending = 0;
1093 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1094 /* Force a link down visible on the other side */
1095 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001096 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001097 spin_unlock_bh(&bp->phy_lock);
1098 msleep(20);
1099 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001100 }
1101
1102 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1103 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1104 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001105 /* Speed up link-up time when the link partner
1106 * does not autonegotiate which is very common
1107 * in blade servers. Some blade servers use
1108 * IPMI for kerboard input and it's important
1109 * to minimize link disruptions. Autoneg. involves
1110 * exchanging base pages plus 3 next pages and
1111 * normally completes in about 120 msec.
1112 */
1113 bp->current_interval = SERDES_AN_TIMEOUT;
1114 bp->serdes_an_pending = 1;
1115 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001116 }
1117
1118 return 0;
1119}
1120
1121#define ETHTOOL_ALL_FIBRE_SPEED \
1122 (ADVERTISED_1000baseT_Full)
1123
1124#define ETHTOOL_ALL_COPPER_SPEED \
1125 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1126 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1127 ADVERTISED_1000baseT_Full)
1128
1129#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1130 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001131
Michael Chanb6016b72005-05-26 13:03:09 -07001132#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1133
1134static int
1135bnx2_setup_copper_phy(struct bnx2 *bp)
1136{
1137 u32 bmcr;
1138 u32 new_bmcr;
1139
1140 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1141
1142 if (bp->autoneg & AUTONEG_SPEED) {
1143 u32 adv_reg, adv1000_reg;
1144 u32 new_adv_reg = 0;
1145 u32 new_adv1000_reg = 0;
1146
1147 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1148 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1149 ADVERTISE_PAUSE_ASYM);
1150
1151 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1152 adv1000_reg &= PHY_ALL_1000_SPEED;
1153
1154 if (bp->advertising & ADVERTISED_10baseT_Half)
1155 new_adv_reg |= ADVERTISE_10HALF;
1156 if (bp->advertising & ADVERTISED_10baseT_Full)
1157 new_adv_reg |= ADVERTISE_10FULL;
1158 if (bp->advertising & ADVERTISED_100baseT_Half)
1159 new_adv_reg |= ADVERTISE_100HALF;
1160 if (bp->advertising & ADVERTISED_100baseT_Full)
1161 new_adv_reg |= ADVERTISE_100FULL;
1162 if (bp->advertising & ADVERTISED_1000baseT_Full)
1163 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001164
Michael Chanb6016b72005-05-26 13:03:09 -07001165 new_adv_reg |= ADVERTISE_CSMA;
1166
1167 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1168
1169 if ((adv1000_reg != new_adv1000_reg) ||
1170 (adv_reg != new_adv_reg) ||
1171 ((bmcr & BMCR_ANENABLE) == 0)) {
1172
1173 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1174 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1175 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1176 BMCR_ANENABLE);
1177 }
1178 else if (bp->link_up) {
1179 /* Flow ctrl may have changed from auto to forced */
1180 /* or vice-versa. */
1181
1182 bnx2_resolve_flow_ctrl(bp);
1183 bnx2_set_mac_link(bp);
1184 }
1185 return 0;
1186 }
1187
1188 new_bmcr = 0;
1189 if (bp->req_line_speed == SPEED_100) {
1190 new_bmcr |= BMCR_SPEED100;
1191 }
1192 if (bp->req_duplex == DUPLEX_FULL) {
1193 new_bmcr |= BMCR_FULLDPLX;
1194 }
1195 if (new_bmcr != bmcr) {
1196 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001197
1198 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1199 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001200
Michael Chanb6016b72005-05-26 13:03:09 -07001201 if (bmsr & BMSR_LSTATUS) {
1202 /* Force link down */
1203 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001204 spin_unlock_bh(&bp->phy_lock);
1205 msleep(50);
1206 spin_lock_bh(&bp->phy_lock);
1207
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001210 }
1211
1212 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1213
1214 /* Normally, the new speed is setup after the link has
1215 * gone down and up again. In some cases, link will not go
1216 * down so we need to set up the new speed here.
1217 */
1218 if (bmsr & BMSR_LSTATUS) {
1219 bp->line_speed = bp->req_line_speed;
1220 bp->duplex = bp->req_duplex;
1221 bnx2_resolve_flow_ctrl(bp);
1222 bnx2_set_mac_link(bp);
1223 }
1224 }
1225 return 0;
1226}
1227
1228static int
1229bnx2_setup_phy(struct bnx2 *bp)
1230{
1231 if (bp->loopback == MAC_LOOPBACK)
1232 return 0;
1233
1234 if (bp->phy_flags & PHY_SERDES_FLAG) {
1235 return (bnx2_setup_serdes_phy(bp));
1236 }
1237 else {
1238 return (bnx2_setup_copper_phy(bp));
1239 }
1240}
1241
1242static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001243bnx2_init_5708s_phy(struct bnx2 *bp)
1244{
1245 u32 val;
1246
1247 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1248 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1250
1251 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1252 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1253 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1254
1255 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1256 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1257 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1258
1259 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1260 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1261 val |= BCM5708S_UP1_2G5;
1262 bnx2_write_phy(bp, BCM5708S_UP1, val);
1263 }
1264
1265 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001266 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1267 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001268 /* increase tx signal amplitude */
1269 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1270 BCM5708S_BLK_ADDR_TX_MISC);
1271 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1272 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1273 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1274 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1275 }
1276
Michael Chane3648b32005-11-04 08:51:21 -08001277 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001278 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1279
1280 if (val) {
1281 u32 is_backplane;
1282
Michael Chane3648b32005-11-04 08:51:21 -08001283 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001284 BNX2_SHARED_HW_CFG_CONFIG);
1285 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1287 BCM5708S_BLK_ADDR_TX_MISC);
1288 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1289 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1290 BCM5708S_BLK_ADDR_DIG);
1291 }
1292 }
1293 return 0;
1294}
1295
1296static int
1297bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001298{
1299 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1300
Michael Chan59b47d82006-11-19 14:10:45 -08001301 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1302 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001303
1304 if (bp->dev->mtu > 1500) {
1305 u32 val;
1306
1307 /* Set extended packet length bit */
1308 bnx2_write_phy(bp, 0x18, 0x7);
1309 bnx2_read_phy(bp, 0x18, &val);
1310 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1311
1312 bnx2_write_phy(bp, 0x1c, 0x6c00);
1313 bnx2_read_phy(bp, 0x1c, &val);
1314 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1315 }
1316 else {
1317 u32 val;
1318
1319 bnx2_write_phy(bp, 0x18, 0x7);
1320 bnx2_read_phy(bp, 0x18, &val);
1321 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1322
1323 bnx2_write_phy(bp, 0x1c, 0x6c00);
1324 bnx2_read_phy(bp, 0x1c, &val);
1325 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1326 }
1327
1328 return 0;
1329}
1330
1331static int
1332bnx2_init_copper_phy(struct bnx2 *bp)
1333{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001334 u32 val;
1335
Michael Chanb6016b72005-05-26 13:03:09 -07001336 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1337
1338 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1339 bnx2_write_phy(bp, 0x18, 0x0c00);
1340 bnx2_write_phy(bp, 0x17, 0x000a);
1341 bnx2_write_phy(bp, 0x15, 0x310b);
1342 bnx2_write_phy(bp, 0x17, 0x201f);
1343 bnx2_write_phy(bp, 0x15, 0x9506);
1344 bnx2_write_phy(bp, 0x17, 0x401f);
1345 bnx2_write_phy(bp, 0x15, 0x14e2);
1346 bnx2_write_phy(bp, 0x18, 0x0400);
1347 }
1348
1349 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001350 /* Set extended packet length bit */
1351 bnx2_write_phy(bp, 0x18, 0x7);
1352 bnx2_read_phy(bp, 0x18, &val);
1353 bnx2_write_phy(bp, 0x18, val | 0x4000);
1354
1355 bnx2_read_phy(bp, 0x10, &val);
1356 bnx2_write_phy(bp, 0x10, val | 0x1);
1357 }
1358 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001359 bnx2_write_phy(bp, 0x18, 0x7);
1360 bnx2_read_phy(bp, 0x18, &val);
1361 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1362
1363 bnx2_read_phy(bp, 0x10, &val);
1364 bnx2_write_phy(bp, 0x10, val & ~0x1);
1365 }
1366
Michael Chan5b0c76a2005-11-04 08:45:49 -08001367 /* ethernet@wirespeed */
1368 bnx2_write_phy(bp, 0x18, 0x7007);
1369 bnx2_read_phy(bp, 0x18, &val);
1370 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001371 return 0;
1372}
1373
1374
1375static int
1376bnx2_init_phy(struct bnx2 *bp)
1377{
1378 u32 val;
1379 int rc = 0;
1380
1381 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1382 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1383
1384 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1385
1386 bnx2_reset_phy(bp);
1387
1388 bnx2_read_phy(bp, MII_PHYSID1, &val);
1389 bp->phy_id = val << 16;
1390 bnx2_read_phy(bp, MII_PHYSID2, &val);
1391 bp->phy_id |= val & 0xffff;
1392
1393 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001394 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395 rc = bnx2_init_5706s_phy(bp);
1396 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001398 }
1399 else {
1400 rc = bnx2_init_copper_phy(bp);
1401 }
1402
1403 bnx2_setup_phy(bp);
1404
1405 return rc;
1406}
1407
1408static int
1409bnx2_set_mac_loopback(struct bnx2 *bp)
1410{
1411 u32 mac_mode;
1412
1413 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1414 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1415 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1416 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1417 bp->link_up = 1;
1418 return 0;
1419}
1420
Michael Chanbc5a0692006-01-23 16:13:22 -08001421static int bnx2_test_link(struct bnx2 *);
1422
1423static int
1424bnx2_set_phy_loopback(struct bnx2 *bp)
1425{
1426 u32 mac_mode;
1427 int rc, i;
1428
1429 spin_lock_bh(&bp->phy_lock);
1430 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1431 BMCR_SPEED1000);
1432 spin_unlock_bh(&bp->phy_lock);
1433 if (rc)
1434 return rc;
1435
1436 for (i = 0; i < 10; i++) {
1437 if (bnx2_test_link(bp) == 0)
1438 break;
Michael Chan80be4432006-11-19 14:07:28 -08001439 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001440 }
1441
1442 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1443 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1444 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001445 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001446
1447 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1448 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1449 bp->link_up = 1;
1450 return 0;
1451}
1452
Michael Chanb6016b72005-05-26 13:03:09 -07001453static int
Michael Chanb090ae22006-01-23 16:07:10 -08001454bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001455{
1456 int i;
1457 u32 val;
1458
Michael Chanb6016b72005-05-26 13:03:09 -07001459 bp->fw_wr_seq++;
1460 msg_data |= bp->fw_wr_seq;
1461
Michael Chane3648b32005-11-04 08:51:21 -08001462 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001463
1464 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001465 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1466 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001467
Michael Chane3648b32005-11-04 08:51:21 -08001468 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001469
1470 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1471 break;
1472 }
Michael Chanb090ae22006-01-23 16:07:10 -08001473 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1474 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001475
1476 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001477 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1478 if (!silent)
1479 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1480 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001481
1482 msg_data &= ~BNX2_DRV_MSG_CODE;
1483 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1484
Michael Chane3648b32005-11-04 08:51:21 -08001485 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001486
Michael Chanb6016b72005-05-26 13:03:09 -07001487 return -EBUSY;
1488 }
1489
Michael Chanb090ae22006-01-23 16:07:10 -08001490 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1491 return -EIO;
1492
Michael Chanb6016b72005-05-26 13:03:09 -07001493 return 0;
1494}
1495
Michael Chan59b47d82006-11-19 14:10:45 -08001496static int
1497bnx2_init_5709_context(struct bnx2 *bp)
1498{
1499 int i, ret = 0;
1500 u32 val;
1501
1502 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1503 val |= (BCM_PAGE_BITS - 8) << 16;
1504 REG_WR(bp, BNX2_CTX_COMMAND, val);
1505 for (i = 0; i < bp->ctx_pages; i++) {
1506 int j;
1507
1508 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1509 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1510 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1511 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1512 (u64) bp->ctx_blk_mapping[i] >> 32);
1513 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1514 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1515 for (j = 0; j < 10; j++) {
1516
1517 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1518 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1519 break;
1520 udelay(5);
1521 }
1522 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1523 ret = -EBUSY;
1524 break;
1525 }
1526 }
1527 return ret;
1528}
1529
Michael Chanb6016b72005-05-26 13:03:09 -07001530static void
1531bnx2_init_context(struct bnx2 *bp)
1532{
1533 u32 vcid;
1534
1535 vcid = 96;
1536 while (vcid) {
1537 u32 vcid_addr, pcid_addr, offset;
1538
1539 vcid--;
1540
1541 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1542 u32 new_vcid;
1543
1544 vcid_addr = GET_PCID_ADDR(vcid);
1545 if (vcid & 0x8) {
1546 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1547 }
1548 else {
1549 new_vcid = vcid;
1550 }
1551 pcid_addr = GET_PCID_ADDR(new_vcid);
1552 }
1553 else {
1554 vcid_addr = GET_CID_ADDR(vcid);
1555 pcid_addr = vcid_addr;
1556 }
1557
1558 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1559 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1560
1561 /* Zero out the context. */
1562 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1563 CTX_WR(bp, 0x00, offset, 0);
1564 }
1565
1566 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1567 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1568 }
1569}
1570
1571static int
1572bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1573{
1574 u16 *good_mbuf;
1575 u32 good_mbuf_cnt;
1576 u32 val;
1577
1578 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1579 if (good_mbuf == NULL) {
1580 printk(KERN_ERR PFX "Failed to allocate memory in "
1581 "bnx2_alloc_bad_rbuf\n");
1582 return -ENOMEM;
1583 }
1584
1585 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1586 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1587
1588 good_mbuf_cnt = 0;
1589
1590 /* Allocate a bunch of mbufs and save the good ones in an array. */
1591 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1592 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1593 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1594
1595 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1596
1597 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1598
1599 /* The addresses with Bit 9 set are bad memory blocks. */
1600 if (!(val & (1 << 9))) {
1601 good_mbuf[good_mbuf_cnt] = (u16) val;
1602 good_mbuf_cnt++;
1603 }
1604
1605 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1606 }
1607
1608 /* Free the good ones back to the mbuf pool thus discarding
1609 * all the bad ones. */
1610 while (good_mbuf_cnt) {
1611 good_mbuf_cnt--;
1612
1613 val = good_mbuf[good_mbuf_cnt];
1614 val = (val << 9) | val | 1;
1615
1616 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1617 }
1618 kfree(good_mbuf);
1619 return 0;
1620}
1621
1622static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001623bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001624{
1625 u32 val;
1626 u8 *mac_addr = bp->dev->dev_addr;
1627
1628 val = (mac_addr[0] << 8) | mac_addr[1];
1629
1630 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1631
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001632 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001633 (mac_addr[4] << 8) | mac_addr[5];
1634
1635 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1636}
1637
1638static inline int
1639bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1640{
1641 struct sk_buff *skb;
1642 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1643 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001644 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001645 unsigned long align;
1646
Michael Chan932f3772006-08-15 01:39:36 -07001647 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001648 if (skb == NULL) {
1649 return -ENOMEM;
1650 }
1651
Michael Chan59b47d82006-11-19 14:10:45 -08001652 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1653 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001654
Michael Chanb6016b72005-05-26 13:03:09 -07001655 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1656 PCI_DMA_FROMDEVICE);
1657
1658 rx_buf->skb = skb;
1659 pci_unmap_addr_set(rx_buf, mapping, mapping);
1660
1661 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1662 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1663
1664 bp->rx_prod_bseq += bp->rx_buf_use_size;
1665
1666 return 0;
1667}
1668
1669static void
1670bnx2_phy_int(struct bnx2 *bp)
1671{
1672 u32 new_link_state, old_link_state;
1673
1674 new_link_state = bp->status_blk->status_attn_bits &
1675 STATUS_ATTN_BITS_LINK_STATE;
1676 old_link_state = bp->status_blk->status_attn_bits_ack &
1677 STATUS_ATTN_BITS_LINK_STATE;
1678 if (new_link_state != old_link_state) {
1679 if (new_link_state) {
1680 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1681 STATUS_ATTN_BITS_LINK_STATE);
1682 }
1683 else {
1684 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1685 STATUS_ATTN_BITS_LINK_STATE);
1686 }
1687 bnx2_set_link(bp);
1688 }
1689}
1690
1691static void
1692bnx2_tx_int(struct bnx2 *bp)
1693{
Michael Chanf4e418f2005-11-04 08:53:48 -08001694 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001695 u16 hw_cons, sw_cons, sw_ring_cons;
1696 int tx_free_bd = 0;
1697
Michael Chanf4e418f2005-11-04 08:53:48 -08001698 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001699 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1700 hw_cons++;
1701 }
1702 sw_cons = bp->tx_cons;
1703
1704 while (sw_cons != hw_cons) {
1705 struct sw_bd *tx_buf;
1706 struct sk_buff *skb;
1707 int i, last;
1708
1709 sw_ring_cons = TX_RING_IDX(sw_cons);
1710
1711 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1712 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001713#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001714 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001715 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001716 u16 last_idx, last_ring_idx;
1717
1718 last_idx = sw_cons +
1719 skb_shinfo(skb)->nr_frags + 1;
1720 last_ring_idx = sw_ring_cons +
1721 skb_shinfo(skb)->nr_frags + 1;
1722 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1723 last_idx++;
1724 }
1725 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1726 break;
1727 }
1728 }
1729#endif
1730 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1731 skb_headlen(skb), PCI_DMA_TODEVICE);
1732
1733 tx_buf->skb = NULL;
1734 last = skb_shinfo(skb)->nr_frags;
1735
1736 for (i = 0; i < last; i++) {
1737 sw_cons = NEXT_TX_BD(sw_cons);
1738
1739 pci_unmap_page(bp->pdev,
1740 pci_unmap_addr(
1741 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1742 mapping),
1743 skb_shinfo(skb)->frags[i].size,
1744 PCI_DMA_TODEVICE);
1745 }
1746
1747 sw_cons = NEXT_TX_BD(sw_cons);
1748
1749 tx_free_bd += last + 1;
1750
Michael Chan745720e2006-06-29 12:37:41 -07001751 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001752
Michael Chanf4e418f2005-11-04 08:53:48 -08001753 hw_cons = bp->hw_tx_cons =
1754 sblk->status_tx_quick_consumer_index0;
1755
Michael Chanb6016b72005-05-26 13:03:09 -07001756 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1757 hw_cons++;
1758 }
1759 }
1760
Michael Chane89bbf12005-08-25 15:36:58 -07001761 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001762 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1763 * before checking for netif_queue_stopped(). Without the
1764 * memory barrier, there is a small possibility that bnx2_start_xmit()
1765 * will miss it and cause the queue to be stopped forever.
1766 */
1767 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001768
Michael Chan2f8af122006-08-15 01:39:10 -07001769 if (unlikely(netif_queue_stopped(bp->dev)) &&
1770 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1771 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001772 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001773 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001774 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001775 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001776 }
Michael Chanb6016b72005-05-26 13:03:09 -07001777}
1778
1779static inline void
1780bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1781 u16 cons, u16 prod)
1782{
Michael Chan236b6392006-03-20 17:49:02 -08001783 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1784 struct rx_bd *cons_bd, *prod_bd;
1785
1786 cons_rx_buf = &bp->rx_buf_ring[cons];
1787 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001788
1789 pci_dma_sync_single_for_device(bp->pdev,
1790 pci_unmap_addr(cons_rx_buf, mapping),
1791 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1792
Michael Chan236b6392006-03-20 17:49:02 -08001793 bp->rx_prod_bseq += bp->rx_buf_use_size;
1794
1795 prod_rx_buf->skb = skb;
1796
1797 if (cons == prod)
1798 return;
1799
Michael Chanb6016b72005-05-26 13:03:09 -07001800 pci_unmap_addr_set(prod_rx_buf, mapping,
1801 pci_unmap_addr(cons_rx_buf, mapping));
1802
Michael Chan3fdfcc22006-03-20 17:49:49 -08001803 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1804 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001805 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1806 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001807}
1808
1809static int
1810bnx2_rx_int(struct bnx2 *bp, int budget)
1811{
Michael Chanf4e418f2005-11-04 08:53:48 -08001812 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001813 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1814 struct l2_fhdr *rx_hdr;
1815 int rx_pkt = 0;
1816
Michael Chanf4e418f2005-11-04 08:53:48 -08001817 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001818 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1819 hw_cons++;
1820 }
1821 sw_cons = bp->rx_cons;
1822 sw_prod = bp->rx_prod;
1823
1824 /* Memory barrier necessary as speculative reads of the rx
1825 * buffer can be ahead of the index in the status block
1826 */
1827 rmb();
1828 while (sw_cons != hw_cons) {
1829 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001830 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001831 struct sw_bd *rx_buf;
1832 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001833 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001834
1835 sw_ring_cons = RX_RING_IDX(sw_cons);
1836 sw_ring_prod = RX_RING_IDX(sw_prod);
1837
1838 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1839 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001840
1841 rx_buf->skb = NULL;
1842
1843 dma_addr = pci_unmap_addr(rx_buf, mapping);
1844
1845 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001846 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1847
1848 rx_hdr = (struct l2_fhdr *) skb->data;
1849 len = rx_hdr->l2_fhdr_pkt_len - 4;
1850
Michael Chanade2bfe2006-01-23 16:09:51 -08001851 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001852 (L2_FHDR_ERRORS_BAD_CRC |
1853 L2_FHDR_ERRORS_PHY_DECODE |
1854 L2_FHDR_ERRORS_ALIGNMENT |
1855 L2_FHDR_ERRORS_TOO_SHORT |
1856 L2_FHDR_ERRORS_GIANT_FRAME)) {
1857
1858 goto reuse_rx;
1859 }
1860
1861 /* Since we don't have a jumbo ring, copy small packets
1862 * if mtu > 1500
1863 */
1864 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1865 struct sk_buff *new_skb;
1866
Michael Chan932f3772006-08-15 01:39:36 -07001867 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001868 if (new_skb == NULL)
1869 goto reuse_rx;
1870
1871 /* aligned copy */
1872 memcpy(new_skb->data,
1873 skb->data + bp->rx_offset - 2,
1874 len + 2);
1875
1876 skb_reserve(new_skb, 2);
1877 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001878
1879 bnx2_reuse_rx_skb(bp, skb,
1880 sw_ring_cons, sw_ring_prod);
1881
1882 skb = new_skb;
1883 }
1884 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001885 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001886 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1887
1888 skb_reserve(skb, bp->rx_offset);
1889 skb_put(skb, len);
1890 }
1891 else {
1892reuse_rx:
1893 bnx2_reuse_rx_skb(bp, skb,
1894 sw_ring_cons, sw_ring_prod);
1895 goto next_rx;
1896 }
1897
1898 skb->protocol = eth_type_trans(skb, bp->dev);
1899
1900 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001901 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001902
Michael Chan745720e2006-06-29 12:37:41 -07001903 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001904 goto next_rx;
1905
1906 }
1907
Michael Chanb6016b72005-05-26 13:03:09 -07001908 skb->ip_summed = CHECKSUM_NONE;
1909 if (bp->rx_csum &&
1910 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1911 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1912
Michael Chanade2bfe2006-01-23 16:09:51 -08001913 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1914 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001915 skb->ip_summed = CHECKSUM_UNNECESSARY;
1916 }
1917
1918#ifdef BCM_VLAN
1919 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1920 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1921 rx_hdr->l2_fhdr_vlan_tag);
1922 }
1923 else
1924#endif
1925 netif_receive_skb(skb);
1926
1927 bp->dev->last_rx = jiffies;
1928 rx_pkt++;
1929
1930next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001931 sw_cons = NEXT_RX_BD(sw_cons);
1932 sw_prod = NEXT_RX_BD(sw_prod);
1933
1934 if ((rx_pkt == budget))
1935 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001936
1937 /* Refresh hw_cons to see if there is new work */
1938 if (sw_cons == hw_cons) {
1939 hw_cons = bp->hw_rx_cons =
1940 sblk->status_rx_quick_consumer_index0;
1941 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1942 hw_cons++;
1943 rmb();
1944 }
Michael Chanb6016b72005-05-26 13:03:09 -07001945 }
1946 bp->rx_cons = sw_cons;
1947 bp->rx_prod = sw_prod;
1948
1949 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1950
1951 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1952
1953 mmiowb();
1954
1955 return rx_pkt;
1956
1957}
1958
1959/* MSI ISR - The only difference between this and the INTx ISR
1960 * is that the MSI interrupt is always serviced.
1961 */
1962static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001963bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001964{
1965 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001966 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001967
Michael Chanc921e4c2005-09-08 13:15:32 -07001968 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001969 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1970 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1971 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1972
1973 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001974 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1975 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001976
Michael Chan73eef4c2005-08-25 15:39:15 -07001977 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001978
Michael Chan73eef4c2005-08-25 15:39:15 -07001979 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001980}
1981
1982static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001983bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001984{
1985 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001986 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001987
1988 /* When using INTx, it is possible for the interrupt to arrive
1989 * at the CPU before the status block posted prior to the
1990 * interrupt. Reading a register will flush the status block.
1991 * When using MSI, the MSI message will always complete after
1992 * the status block write.
1993 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001994 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001995 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1996 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001997 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001998
1999 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2000 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2001 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2002
2003 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002004 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2005 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002006
Michael Chan73eef4c2005-08-25 15:39:15 -07002007 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002008
Michael Chan73eef4c2005-08-25 15:39:15 -07002009 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002010}
2011
Michael Chanf4e418f2005-11-04 08:53:48 -08002012static inline int
2013bnx2_has_work(struct bnx2 *bp)
2014{
2015 struct status_block *sblk = bp->status_blk;
2016
2017 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2018 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2019 return 1;
2020
2021 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2022 bp->link_up)
2023 return 1;
2024
2025 return 0;
2026}
2027
Michael Chanb6016b72005-05-26 13:03:09 -07002028static int
2029bnx2_poll(struct net_device *dev, int *budget)
2030{
Michael Chan972ec0d2006-01-23 16:12:43 -08002031 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002032
Michael Chanb6016b72005-05-26 13:03:09 -07002033 if ((bp->status_blk->status_attn_bits &
2034 STATUS_ATTN_BITS_LINK_STATE) !=
2035 (bp->status_blk->status_attn_bits_ack &
2036 STATUS_ATTN_BITS_LINK_STATE)) {
2037
Michael Chanc770a652005-08-25 15:38:39 -07002038 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002039 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002040 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002041
2042 /* This is needed to take care of transient status
2043 * during link changes.
2044 */
2045 REG_WR(bp, BNX2_HC_COMMAND,
2046 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2047 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002048 }
2049
Michael Chanf4e418f2005-11-04 08:53:48 -08002050 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002051 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002052
Michael Chanf4e418f2005-11-04 08:53:48 -08002053 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002054 int orig_budget = *budget;
2055 int work_done;
2056
2057 if (orig_budget > dev->quota)
2058 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002059
Michael Chanb6016b72005-05-26 13:03:09 -07002060 work_done = bnx2_rx_int(bp, orig_budget);
2061 *budget -= work_done;
2062 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002063 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002064
Michael Chanf4e418f2005-11-04 08:53:48 -08002065 bp->last_status_idx = bp->status_blk->status_idx;
2066 rmb();
2067
2068 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002069 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002070 if (likely(bp->flags & USING_MSI_FLAG)) {
2071 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2072 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2073 bp->last_status_idx);
2074 return 0;
2075 }
Michael Chanb6016b72005-05-26 13:03:09 -07002076 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002077 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2078 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2079 bp->last_status_idx);
2080
2081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002084 return 0;
2085 }
2086
2087 return 1;
2088}
2089
Herbert Xu932ff272006-06-09 12:20:56 -07002090/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002091 * from set_multicast.
2092 */
2093static void
2094bnx2_set_rx_mode(struct net_device *dev)
2095{
Michael Chan972ec0d2006-01-23 16:12:43 -08002096 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002097 u32 rx_mode, sort_mode;
2098 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002099
Michael Chanc770a652005-08-25 15:38:39 -07002100 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002101
2102 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2103 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2104 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2105#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002106 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002107 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002108#else
Michael Chane29054f2006-01-23 16:06:06 -08002109 if (!(bp->flags & ASF_ENABLE_FLAG))
2110 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002111#endif
2112 if (dev->flags & IFF_PROMISC) {
2113 /* Promiscuous mode. */
2114 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002115 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2116 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002117 }
2118 else if (dev->flags & IFF_ALLMULTI) {
2119 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2120 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2121 0xffffffff);
2122 }
2123 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2124 }
2125 else {
2126 /* Accept one or more multicast(s). */
2127 struct dev_mc_list *mclist;
2128 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2129 u32 regidx;
2130 u32 bit;
2131 u32 crc;
2132
2133 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2134
2135 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2136 i++, mclist = mclist->next) {
2137
2138 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2139 bit = crc & 0xff;
2140 regidx = (bit & 0xe0) >> 5;
2141 bit &= 0x1f;
2142 mc_filter[regidx] |= (1 << bit);
2143 }
2144
2145 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2146 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2147 mc_filter[i]);
2148 }
2149
2150 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2151 }
2152
2153 if (rx_mode != bp->rx_mode) {
2154 bp->rx_mode = rx_mode;
2155 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2156 }
2157
2158 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2159 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2160 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2161
Michael Chanc770a652005-08-25 15:38:39 -07002162 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002163}
2164
Michael Chanfba9fe92006-06-12 22:21:25 -07002165#define FW_BUF_SIZE 0x8000
2166
2167static int
2168bnx2_gunzip_init(struct bnx2 *bp)
2169{
2170 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2171 goto gunzip_nomem1;
2172
2173 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2174 goto gunzip_nomem2;
2175
2176 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2177 if (bp->strm->workspace == NULL)
2178 goto gunzip_nomem3;
2179
2180 return 0;
2181
2182gunzip_nomem3:
2183 kfree(bp->strm);
2184 bp->strm = NULL;
2185
2186gunzip_nomem2:
2187 vfree(bp->gunzip_buf);
2188 bp->gunzip_buf = NULL;
2189
2190gunzip_nomem1:
2191 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2192 "uncompression.\n", bp->dev->name);
2193 return -ENOMEM;
2194}
2195
2196static void
2197bnx2_gunzip_end(struct bnx2 *bp)
2198{
2199 kfree(bp->strm->workspace);
2200
2201 kfree(bp->strm);
2202 bp->strm = NULL;
2203
2204 if (bp->gunzip_buf) {
2205 vfree(bp->gunzip_buf);
2206 bp->gunzip_buf = NULL;
2207 }
2208}
2209
2210static int
2211bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2212{
2213 int n, rc;
2214
2215 /* check gzip header */
2216 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2217 return -EINVAL;
2218
2219 n = 10;
2220
2221#define FNAME 0x8
2222 if (zbuf[3] & FNAME)
2223 while ((zbuf[n++] != 0) && (n < len));
2224
2225 bp->strm->next_in = zbuf + n;
2226 bp->strm->avail_in = len - n;
2227 bp->strm->next_out = bp->gunzip_buf;
2228 bp->strm->avail_out = FW_BUF_SIZE;
2229
2230 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2231 if (rc != Z_OK)
2232 return rc;
2233
2234 rc = zlib_inflate(bp->strm, Z_FINISH);
2235
2236 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2237 *outbuf = bp->gunzip_buf;
2238
2239 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2240 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2241 bp->dev->name, bp->strm->msg);
2242
2243 zlib_inflateEnd(bp->strm);
2244
2245 if (rc == Z_STREAM_END)
2246 return 0;
2247
2248 return rc;
2249}
2250
Michael Chanb6016b72005-05-26 13:03:09 -07002251static void
2252load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2253 u32 rv2p_proc)
2254{
2255 int i;
2256 u32 val;
2257
2258
2259 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002260 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002261 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002262 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002263 rv2p_code++;
2264
2265 if (rv2p_proc == RV2P_PROC1) {
2266 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2267 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2268 }
2269 else {
2270 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2271 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2272 }
2273 }
2274
2275 /* Reset the processor, un-stall is done later. */
2276 if (rv2p_proc == RV2P_PROC1) {
2277 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2278 }
2279 else {
2280 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2281 }
2282}
2283
Michael Chanaf3ee512006-11-19 14:09:25 -08002284static int
Michael Chanb6016b72005-05-26 13:03:09 -07002285load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2286{
2287 u32 offset;
2288 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002289 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002290
2291 /* Halt the CPU. */
2292 val = REG_RD_IND(bp, cpu_reg->mode);
2293 val |= cpu_reg->mode_value_halt;
2294 REG_WR_IND(bp, cpu_reg->mode, val);
2295 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2296
2297 /* Load the Text area. */
2298 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002299 if (fw->gz_text) {
2300 u32 text_len;
2301 void *text;
2302
2303 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2304 &text_len);
2305 if (rc)
2306 return rc;
2307
2308 fw->text = text;
2309 }
2310 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002311 int j;
2312
2313 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002314 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002315 }
2316 }
2317
2318 /* Load the Data area. */
2319 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2320 if (fw->data) {
2321 int j;
2322
2323 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2324 REG_WR_IND(bp, offset, fw->data[j]);
2325 }
2326 }
2327
2328 /* Load the SBSS area. */
2329 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2330 if (fw->sbss) {
2331 int j;
2332
2333 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2334 REG_WR_IND(bp, offset, fw->sbss[j]);
2335 }
2336 }
2337
2338 /* Load the BSS area. */
2339 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2340 if (fw->bss) {
2341 int j;
2342
2343 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2344 REG_WR_IND(bp, offset, fw->bss[j]);
2345 }
2346 }
2347
2348 /* Load the Read-Only area. */
2349 offset = cpu_reg->spad_base +
2350 (fw->rodata_addr - cpu_reg->mips_view_base);
2351 if (fw->rodata) {
2352 int j;
2353
2354 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2355 REG_WR_IND(bp, offset, fw->rodata[j]);
2356 }
2357 }
2358
2359 /* Clear the pre-fetch instruction. */
2360 REG_WR_IND(bp, cpu_reg->inst, 0);
2361 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2362
2363 /* Start the CPU. */
2364 val = REG_RD_IND(bp, cpu_reg->mode);
2365 val &= ~cpu_reg->mode_value_halt;
2366 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2367 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002368
2369 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002370}
2371
Michael Chanfba9fe92006-06-12 22:21:25 -07002372static int
Michael Chanb6016b72005-05-26 13:03:09 -07002373bnx2_init_cpus(struct bnx2 *bp)
2374{
2375 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002376 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002377 int rc = 0;
2378 void *text;
2379 u32 text_len;
2380
2381 if ((rc = bnx2_gunzip_init(bp)) != 0)
2382 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002383
2384 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002385 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2386 &text_len);
2387 if (rc)
2388 goto init_cpu_err;
2389
2390 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2391
2392 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2393 &text_len);
2394 if (rc)
2395 goto init_cpu_err;
2396
2397 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002398
2399 /* Initialize the RX Processor. */
2400 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2401 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2402 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2403 cpu_reg.state = BNX2_RXP_CPU_STATE;
2404 cpu_reg.state_value_clear = 0xffffff;
2405 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2406 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2407 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2408 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2409 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2410 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2411 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002412
Michael Chanaf3ee512006-11-19 14:09:25 -08002413 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002414
Michael Chanaf3ee512006-11-19 14:09:25 -08002415 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002416 if (rc)
2417 goto init_cpu_err;
2418
Michael Chanb6016b72005-05-26 13:03:09 -07002419 /* Initialize the TX Processor. */
2420 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2421 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2422 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2423 cpu_reg.state = BNX2_TXP_CPU_STATE;
2424 cpu_reg.state_value_clear = 0xffffff;
2425 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2426 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2427 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2428 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2429 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2430 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2431 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002432
Michael Chanaf3ee512006-11-19 14:09:25 -08002433 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002434
Michael Chanaf3ee512006-11-19 14:09:25 -08002435 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002436 if (rc)
2437 goto init_cpu_err;
2438
Michael Chanb6016b72005-05-26 13:03:09 -07002439 /* Initialize the TX Patch-up Processor. */
2440 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2441 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2442 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2443 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2444 cpu_reg.state_value_clear = 0xffffff;
2445 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2446 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2447 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2448 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2449 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2450 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2451 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002452
Michael Chanaf3ee512006-11-19 14:09:25 -08002453 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002454
Michael Chanaf3ee512006-11-19 14:09:25 -08002455 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002456 if (rc)
2457 goto init_cpu_err;
2458
Michael Chanb6016b72005-05-26 13:03:09 -07002459 /* Initialize the Completion Processor. */
2460 cpu_reg.mode = BNX2_COM_CPU_MODE;
2461 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2462 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2463 cpu_reg.state = BNX2_COM_CPU_STATE;
2464 cpu_reg.state_value_clear = 0xffffff;
2465 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2466 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2467 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2468 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2469 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2470 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2471 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002472
Michael Chanaf3ee512006-11-19 14:09:25 -08002473 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002474
Michael Chanaf3ee512006-11-19 14:09:25 -08002475 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002476 if (rc)
2477 goto init_cpu_err;
2478
Michael Chanfba9fe92006-06-12 22:21:25 -07002479init_cpu_err:
2480 bnx2_gunzip_end(bp);
2481 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002482}
2483
2484static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002485bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002486{
2487 u16 pmcsr;
2488
2489 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2490
2491 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002492 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002493 u32 val;
2494
2495 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2496 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2497 PCI_PM_CTRL_PME_STATUS);
2498
2499 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2500 /* delay required during transition out of D3hot */
2501 msleep(20);
2502
2503 val = REG_RD(bp, BNX2_EMAC_MODE);
2504 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2505 val &= ~BNX2_EMAC_MODE_MPKT;
2506 REG_WR(bp, BNX2_EMAC_MODE, val);
2507
2508 val = REG_RD(bp, BNX2_RPM_CONFIG);
2509 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2510 REG_WR(bp, BNX2_RPM_CONFIG, val);
2511 break;
2512 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002513 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002514 int i;
2515 u32 val, wol_msg;
2516
2517 if (bp->wol) {
2518 u32 advertising;
2519 u8 autoneg;
2520
2521 autoneg = bp->autoneg;
2522 advertising = bp->advertising;
2523
2524 bp->autoneg = AUTONEG_SPEED;
2525 bp->advertising = ADVERTISED_10baseT_Half |
2526 ADVERTISED_10baseT_Full |
2527 ADVERTISED_100baseT_Half |
2528 ADVERTISED_100baseT_Full |
2529 ADVERTISED_Autoneg;
2530
2531 bnx2_setup_copper_phy(bp);
2532
2533 bp->autoneg = autoneg;
2534 bp->advertising = advertising;
2535
2536 bnx2_set_mac_addr(bp);
2537
2538 val = REG_RD(bp, BNX2_EMAC_MODE);
2539
2540 /* Enable port mode. */
2541 val &= ~BNX2_EMAC_MODE_PORT;
2542 val |= BNX2_EMAC_MODE_PORT_MII |
2543 BNX2_EMAC_MODE_MPKT_RCVD |
2544 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002545 BNX2_EMAC_MODE_MPKT;
2546
2547 REG_WR(bp, BNX2_EMAC_MODE, val);
2548
2549 /* receive all multicast */
2550 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2551 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2552 0xffffffff);
2553 }
2554 REG_WR(bp, BNX2_EMAC_RX_MODE,
2555 BNX2_EMAC_RX_MODE_SORT_MODE);
2556
2557 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2558 BNX2_RPM_SORT_USER0_MC_EN;
2559 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2560 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2561 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2562 BNX2_RPM_SORT_USER0_ENA);
2563
2564 /* Need to enable EMAC and RPM for WOL. */
2565 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2566 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2567 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2568 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2569
2570 val = REG_RD(bp, BNX2_RPM_CONFIG);
2571 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2572 REG_WR(bp, BNX2_RPM_CONFIG, val);
2573
2574 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2575 }
2576 else {
2577 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2578 }
2579
Michael Chandda1e392006-01-23 16:08:14 -08002580 if (!(bp->flags & NO_WOL_FLAG))
2581 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002582
2583 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2584 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2585 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2586
2587 if (bp->wol)
2588 pmcsr |= 3;
2589 }
2590 else {
2591 pmcsr |= 3;
2592 }
2593 if (bp->wol) {
2594 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2595 }
2596 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2597 pmcsr);
2598
2599 /* No more memory access after this point until
2600 * device is brought back to D0.
2601 */
2602 udelay(50);
2603 break;
2604 }
2605 default:
2606 return -EINVAL;
2607 }
2608 return 0;
2609}
2610
2611static int
2612bnx2_acquire_nvram_lock(struct bnx2 *bp)
2613{
2614 u32 val;
2615 int j;
2616
2617 /* Request access to the flash interface. */
2618 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2619 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2620 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2621 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2622 break;
2623
2624 udelay(5);
2625 }
2626
2627 if (j >= NVRAM_TIMEOUT_COUNT)
2628 return -EBUSY;
2629
2630 return 0;
2631}
2632
2633static int
2634bnx2_release_nvram_lock(struct bnx2 *bp)
2635{
2636 int j;
2637 u32 val;
2638
2639 /* Relinquish nvram interface. */
2640 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2641
2642 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2643 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2644 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2645 break;
2646
2647 udelay(5);
2648 }
2649
2650 if (j >= NVRAM_TIMEOUT_COUNT)
2651 return -EBUSY;
2652
2653 return 0;
2654}
2655
2656
2657static int
2658bnx2_enable_nvram_write(struct bnx2 *bp)
2659{
2660 u32 val;
2661
2662 val = REG_RD(bp, BNX2_MISC_CFG);
2663 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2664
2665 if (!bp->flash_info->buffered) {
2666 int j;
2667
2668 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2669 REG_WR(bp, BNX2_NVM_COMMAND,
2670 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2671
2672 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2673 udelay(5);
2674
2675 val = REG_RD(bp, BNX2_NVM_COMMAND);
2676 if (val & BNX2_NVM_COMMAND_DONE)
2677 break;
2678 }
2679
2680 if (j >= NVRAM_TIMEOUT_COUNT)
2681 return -EBUSY;
2682 }
2683 return 0;
2684}
2685
2686static void
2687bnx2_disable_nvram_write(struct bnx2 *bp)
2688{
2689 u32 val;
2690
2691 val = REG_RD(bp, BNX2_MISC_CFG);
2692 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2693}
2694
2695
2696static void
2697bnx2_enable_nvram_access(struct bnx2 *bp)
2698{
2699 u32 val;
2700
2701 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2702 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002703 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002704 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2705}
2706
2707static void
2708bnx2_disable_nvram_access(struct bnx2 *bp)
2709{
2710 u32 val;
2711
2712 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2713 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002714 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002715 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2716 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2717}
2718
2719static int
2720bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2721{
2722 u32 cmd;
2723 int j;
2724
2725 if (bp->flash_info->buffered)
2726 /* Buffered flash, no erase needed */
2727 return 0;
2728
2729 /* Build an erase command */
2730 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2731 BNX2_NVM_COMMAND_DOIT;
2732
2733 /* Need to clear DONE bit separately. */
2734 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2735
2736 /* Address of the NVRAM to read from. */
2737 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2738
2739 /* Issue an erase command. */
2740 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2741
2742 /* Wait for completion. */
2743 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2744 u32 val;
2745
2746 udelay(5);
2747
2748 val = REG_RD(bp, BNX2_NVM_COMMAND);
2749 if (val & BNX2_NVM_COMMAND_DONE)
2750 break;
2751 }
2752
2753 if (j >= NVRAM_TIMEOUT_COUNT)
2754 return -EBUSY;
2755
2756 return 0;
2757}
2758
2759static int
2760bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2761{
2762 u32 cmd;
2763 int j;
2764
2765 /* Build the command word. */
2766 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2767
2768 /* Calculate an offset of a buffered flash. */
2769 if (bp->flash_info->buffered) {
2770 offset = ((offset / bp->flash_info->page_size) <<
2771 bp->flash_info->page_bits) +
2772 (offset % bp->flash_info->page_size);
2773 }
2774
2775 /* Need to clear DONE bit separately. */
2776 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2777
2778 /* Address of the NVRAM to read from. */
2779 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2780
2781 /* Issue a read command. */
2782 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2783
2784 /* Wait for completion. */
2785 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2786 u32 val;
2787
2788 udelay(5);
2789
2790 val = REG_RD(bp, BNX2_NVM_COMMAND);
2791 if (val & BNX2_NVM_COMMAND_DONE) {
2792 val = REG_RD(bp, BNX2_NVM_READ);
2793
2794 val = be32_to_cpu(val);
2795 memcpy(ret_val, &val, 4);
2796 break;
2797 }
2798 }
2799 if (j >= NVRAM_TIMEOUT_COUNT)
2800 return -EBUSY;
2801
2802 return 0;
2803}
2804
2805
2806static int
2807bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2808{
2809 u32 cmd, val32;
2810 int j;
2811
2812 /* Build the command word. */
2813 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2814
2815 /* Calculate an offset of a buffered flash. */
2816 if (bp->flash_info->buffered) {
2817 offset = ((offset / bp->flash_info->page_size) <<
2818 bp->flash_info->page_bits) +
2819 (offset % bp->flash_info->page_size);
2820 }
2821
2822 /* Need to clear DONE bit separately. */
2823 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2824
2825 memcpy(&val32, val, 4);
2826 val32 = cpu_to_be32(val32);
2827
2828 /* Write the data. */
2829 REG_WR(bp, BNX2_NVM_WRITE, val32);
2830
2831 /* Address of the NVRAM to write to. */
2832 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2833
2834 /* Issue the write command. */
2835 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2836
2837 /* Wait for completion. */
2838 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2839 udelay(5);
2840
2841 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2842 break;
2843 }
2844 if (j >= NVRAM_TIMEOUT_COUNT)
2845 return -EBUSY;
2846
2847 return 0;
2848}
2849
2850static int
2851bnx2_init_nvram(struct bnx2 *bp)
2852{
2853 u32 val;
2854 int j, entry_count, rc;
2855 struct flash_spec *flash;
2856
2857 /* Determine the selected interface. */
2858 val = REG_RD(bp, BNX2_NVM_CFG1);
2859
2860 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2861
2862 rc = 0;
2863 if (val & 0x40000000) {
2864
2865 /* Flash interface has been reconfigured */
2866 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002867 j++, flash++) {
2868 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2869 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002870 bp->flash_info = flash;
2871 break;
2872 }
2873 }
2874 }
2875 else {
Michael Chan37137702005-11-04 08:49:17 -08002876 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002877 /* Not yet been reconfigured */
2878
Michael Chan37137702005-11-04 08:49:17 -08002879 if (val & (1 << 23))
2880 mask = FLASH_BACKUP_STRAP_MASK;
2881 else
2882 mask = FLASH_STRAP_MASK;
2883
Michael Chanb6016b72005-05-26 13:03:09 -07002884 for (j = 0, flash = &flash_table[0]; j < entry_count;
2885 j++, flash++) {
2886
Michael Chan37137702005-11-04 08:49:17 -08002887 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002888 bp->flash_info = flash;
2889
2890 /* Request access to the flash interface. */
2891 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2892 return rc;
2893
2894 /* Enable access to flash interface */
2895 bnx2_enable_nvram_access(bp);
2896
2897 /* Reconfigure the flash interface */
2898 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2899 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2900 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2901 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2902
2903 /* Disable access to flash interface */
2904 bnx2_disable_nvram_access(bp);
2905 bnx2_release_nvram_lock(bp);
2906
2907 break;
2908 }
2909 }
2910 } /* if (val & 0x40000000) */
2911
2912 if (j == entry_count) {
2913 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002914 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002915 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002916 }
2917
Michael Chan1122db72006-01-23 16:11:42 -08002918 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2919 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2920 if (val)
2921 bp->flash_size = val;
2922 else
2923 bp->flash_size = bp->flash_info->total_size;
2924
Michael Chanb6016b72005-05-26 13:03:09 -07002925 return rc;
2926}
2927
2928static int
2929bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2930 int buf_size)
2931{
2932 int rc = 0;
2933 u32 cmd_flags, offset32, len32, extra;
2934
2935 if (buf_size == 0)
2936 return 0;
2937
2938 /* Request access to the flash interface. */
2939 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2940 return rc;
2941
2942 /* Enable access to flash interface */
2943 bnx2_enable_nvram_access(bp);
2944
2945 len32 = buf_size;
2946 offset32 = offset;
2947 extra = 0;
2948
2949 cmd_flags = 0;
2950
2951 if (offset32 & 3) {
2952 u8 buf[4];
2953 u32 pre_len;
2954
2955 offset32 &= ~3;
2956 pre_len = 4 - (offset & 3);
2957
2958 if (pre_len >= len32) {
2959 pre_len = len32;
2960 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2961 BNX2_NVM_COMMAND_LAST;
2962 }
2963 else {
2964 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2965 }
2966
2967 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2968
2969 if (rc)
2970 return rc;
2971
2972 memcpy(ret_buf, buf + (offset & 3), pre_len);
2973
2974 offset32 += 4;
2975 ret_buf += pre_len;
2976 len32 -= pre_len;
2977 }
2978 if (len32 & 3) {
2979 extra = 4 - (len32 & 3);
2980 len32 = (len32 + 4) & ~3;
2981 }
2982
2983 if (len32 == 4) {
2984 u8 buf[4];
2985
2986 if (cmd_flags)
2987 cmd_flags = BNX2_NVM_COMMAND_LAST;
2988 else
2989 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2990 BNX2_NVM_COMMAND_LAST;
2991
2992 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2993
2994 memcpy(ret_buf, buf, 4 - extra);
2995 }
2996 else if (len32 > 0) {
2997 u8 buf[4];
2998
2999 /* Read the first word. */
3000 if (cmd_flags)
3001 cmd_flags = 0;
3002 else
3003 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3004
3005 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3006
3007 /* Advance to the next dword. */
3008 offset32 += 4;
3009 ret_buf += 4;
3010 len32 -= 4;
3011
3012 while (len32 > 4 && rc == 0) {
3013 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3014
3015 /* Advance to the next dword. */
3016 offset32 += 4;
3017 ret_buf += 4;
3018 len32 -= 4;
3019 }
3020
3021 if (rc)
3022 return rc;
3023
3024 cmd_flags = BNX2_NVM_COMMAND_LAST;
3025 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3026
3027 memcpy(ret_buf, buf, 4 - extra);
3028 }
3029
3030 /* Disable access to flash interface */
3031 bnx2_disable_nvram_access(bp);
3032
3033 bnx2_release_nvram_lock(bp);
3034
3035 return rc;
3036}
3037
3038static int
3039bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3040 int buf_size)
3041{
3042 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07003043 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003044 int rc = 0;
3045 int align_start, align_end;
3046
3047 buf = data_buf;
3048 offset32 = offset;
3049 len32 = buf_size;
3050 align_start = align_end = 0;
3051
3052 if ((align_start = (offset32 & 3))) {
3053 offset32 &= ~3;
3054 len32 += align_start;
3055 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3056 return rc;
3057 }
3058
3059 if (len32 & 3) {
3060 if ((len32 > 4) || !align_start) {
3061 align_end = 4 - (len32 & 3);
3062 len32 += align_end;
3063 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3064 end, 4))) {
3065 return rc;
3066 }
3067 }
3068 }
3069
3070 if (align_start || align_end) {
3071 buf = kmalloc(len32, GFP_KERNEL);
3072 if (buf == 0)
3073 return -ENOMEM;
3074 if (align_start) {
3075 memcpy(buf, start, 4);
3076 }
3077 if (align_end) {
3078 memcpy(buf + len32 - 4, end, 4);
3079 }
3080 memcpy(buf + align_start, data_buf, buf_size);
3081 }
3082
Michael Chanae181bc2006-05-22 16:39:20 -07003083 if (bp->flash_info->buffered == 0) {
3084 flash_buffer = kmalloc(264, GFP_KERNEL);
3085 if (flash_buffer == NULL) {
3086 rc = -ENOMEM;
3087 goto nvram_write_end;
3088 }
3089 }
3090
Michael Chanb6016b72005-05-26 13:03:09 -07003091 written = 0;
3092 while ((written < len32) && (rc == 0)) {
3093 u32 page_start, page_end, data_start, data_end;
3094 u32 addr, cmd_flags;
3095 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003096
3097 /* Find the page_start addr */
3098 page_start = offset32 + written;
3099 page_start -= (page_start % bp->flash_info->page_size);
3100 /* Find the page_end addr */
3101 page_end = page_start + bp->flash_info->page_size;
3102 /* Find the data_start addr */
3103 data_start = (written == 0) ? offset32 : page_start;
3104 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003105 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003106 (offset32 + len32) : page_end;
3107
3108 /* Request access to the flash interface. */
3109 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3110 goto nvram_write_end;
3111
3112 /* Enable access to flash interface */
3113 bnx2_enable_nvram_access(bp);
3114
3115 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3116 if (bp->flash_info->buffered == 0) {
3117 int j;
3118
3119 /* Read the whole page into the buffer
3120 * (non-buffer flash only) */
3121 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3122 if (j == (bp->flash_info->page_size - 4)) {
3123 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3124 }
3125 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003126 page_start + j,
3127 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003128 cmd_flags);
3129
3130 if (rc)
3131 goto nvram_write_end;
3132
3133 cmd_flags = 0;
3134 }
3135 }
3136
3137 /* Enable writes to flash interface (unlock write-protect) */
3138 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3139 goto nvram_write_end;
3140
3141 /* Erase the page */
3142 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3143 goto nvram_write_end;
3144
3145 /* Re-enable the write again for the actual write */
3146 bnx2_enable_nvram_write(bp);
3147
3148 /* Loop to write back the buffer data from page_start to
3149 * data_start */
3150 i = 0;
3151 if (bp->flash_info->buffered == 0) {
3152 for (addr = page_start; addr < data_start;
3153 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003154
Michael Chanb6016b72005-05-26 13:03:09 -07003155 rc = bnx2_nvram_write_dword(bp, addr,
3156 &flash_buffer[i], cmd_flags);
3157
3158 if (rc != 0)
3159 goto nvram_write_end;
3160
3161 cmd_flags = 0;
3162 }
3163 }
3164
3165 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003166 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003167 if ((addr == page_end - 4) ||
3168 ((bp->flash_info->buffered) &&
3169 (addr == data_end - 4))) {
3170
3171 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3172 }
3173 rc = bnx2_nvram_write_dword(bp, addr, buf,
3174 cmd_flags);
3175
3176 if (rc != 0)
3177 goto nvram_write_end;
3178
3179 cmd_flags = 0;
3180 buf += 4;
3181 }
3182
3183 /* Loop to write back the buffer data from data_end
3184 * to page_end */
3185 if (bp->flash_info->buffered == 0) {
3186 for (addr = data_end; addr < page_end;
3187 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003188
Michael Chanb6016b72005-05-26 13:03:09 -07003189 if (addr == page_end-4) {
3190 cmd_flags = BNX2_NVM_COMMAND_LAST;
3191 }
3192 rc = bnx2_nvram_write_dword(bp, addr,
3193 &flash_buffer[i], cmd_flags);
3194
3195 if (rc != 0)
3196 goto nvram_write_end;
3197
3198 cmd_flags = 0;
3199 }
3200 }
3201
3202 /* Disable writes to flash interface (lock write-protect) */
3203 bnx2_disable_nvram_write(bp);
3204
3205 /* Disable access to flash interface */
3206 bnx2_disable_nvram_access(bp);
3207 bnx2_release_nvram_lock(bp);
3208
3209 /* Increment written */
3210 written += data_end - data_start;
3211 }
3212
3213nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003214 if (bp->flash_info->buffered == 0)
3215 kfree(flash_buffer);
3216
Michael Chanb6016b72005-05-26 13:03:09 -07003217 if (align_start || align_end)
3218 kfree(buf);
3219 return rc;
3220}
3221
3222static int
3223bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3224{
3225 u32 val;
3226 int i, rc = 0;
3227
3228 /* Wait for the current PCI transaction to complete before
3229 * issuing a reset. */
3230 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3231 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3232 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3233 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3234 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3235 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3236 udelay(5);
3237
Michael Chanb090ae22006-01-23 16:07:10 -08003238 /* Wait for the firmware to tell us it is ok to issue a reset. */
3239 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3240
Michael Chanb6016b72005-05-26 13:03:09 -07003241 /* Deposit a driver reset signature so the firmware knows that
3242 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003243 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003244 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3245
Michael Chanb6016b72005-05-26 13:03:09 -07003246 /* Do a dummy read to force the chip to complete all current transaction
3247 * before we issue a reset. */
3248 val = REG_RD(bp, BNX2_MISC_ID);
3249
Michael Chan234754d2006-11-19 14:11:41 -08003250 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3251 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3252 REG_RD(bp, BNX2_MISC_COMMAND);
3253 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003254
Michael Chan234754d2006-11-19 14:11:41 -08003255 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3256 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003257
Michael Chan234754d2006-11-19 14:11:41 -08003258 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003259
Michael Chan234754d2006-11-19 14:11:41 -08003260 } else {
3261 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3262 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3263 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3264
3265 /* Chip reset. */
3266 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3267
3268 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3269 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3270 current->state = TASK_UNINTERRUPTIBLE;
3271 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003272 }
Michael Chanb6016b72005-05-26 13:03:09 -07003273
Michael Chan234754d2006-11-19 14:11:41 -08003274 /* Reset takes approximate 30 usec */
3275 for (i = 0; i < 10; i++) {
3276 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3277 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3278 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3279 break;
3280 udelay(10);
3281 }
3282
3283 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3284 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3285 printk(KERN_ERR PFX "Chip reset did not complete\n");
3286 return -EBUSY;
3287 }
Michael Chanb6016b72005-05-26 13:03:09 -07003288 }
3289
3290 /* Make sure byte swapping is properly configured. */
3291 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3292 if (val != 0x01020304) {
3293 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3294 return -ENODEV;
3295 }
3296
Michael Chanb6016b72005-05-26 13:03:09 -07003297 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003298 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3299 if (rc)
3300 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003301
3302 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3303 /* Adjust the voltage regular to two steps lower. The default
3304 * of this register is 0x0000000e. */
3305 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3306
3307 /* Remove bad rbuf memory from the free pool. */
3308 rc = bnx2_alloc_bad_rbuf(bp);
3309 }
3310
3311 return rc;
3312}
3313
3314static int
3315bnx2_init_chip(struct bnx2 *bp)
3316{
3317 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003318 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003319
3320 /* Make sure the interrupt is not active. */
3321 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3322
3323 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3324 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3325#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003326 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003327#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003328 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003329 DMA_READ_CHANS << 12 |
3330 DMA_WRITE_CHANS << 16;
3331
3332 val |= (0x2 << 20) | (1 << 11);
3333
Michael Chandda1e392006-01-23 16:08:14 -08003334 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003335 val |= (1 << 23);
3336
3337 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3338 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3339 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3340
3341 REG_WR(bp, BNX2_DMA_CONFIG, val);
3342
3343 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3344 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3345 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3346 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3347 }
3348
3349 if (bp->flags & PCIX_FLAG) {
3350 u16 val16;
3351
3352 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3353 &val16);
3354 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3355 val16 & ~PCI_X_CMD_ERO);
3356 }
3357
3358 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3359 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3360 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3361 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3362
3363 /* Initialize context mapping and zero out the quick contexts. The
3364 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003365 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3366 bnx2_init_5709_context(bp);
3367 else
3368 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003369
Michael Chanfba9fe92006-06-12 22:21:25 -07003370 if ((rc = bnx2_init_cpus(bp)) != 0)
3371 return rc;
3372
Michael Chanb6016b72005-05-26 13:03:09 -07003373 bnx2_init_nvram(bp);
3374
3375 bnx2_set_mac_addr(bp);
3376
3377 val = REG_RD(bp, BNX2_MQ_CONFIG);
3378 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3379 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3380 REG_WR(bp, BNX2_MQ_CONFIG, val);
3381
3382 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3383 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3384 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3385
3386 val = (BCM_PAGE_BITS - 8) << 24;
3387 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3388
3389 /* Configure page size. */
3390 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3391 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3392 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3393 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3394
3395 val = bp->mac_addr[0] +
3396 (bp->mac_addr[1] << 8) +
3397 (bp->mac_addr[2] << 16) +
3398 bp->mac_addr[3] +
3399 (bp->mac_addr[4] << 8) +
3400 (bp->mac_addr[5] << 16);
3401 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3402
3403 /* Program the MTU. Also include 4 bytes for CRC32. */
3404 val = bp->dev->mtu + ETH_HLEN + 4;
3405 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3406 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3407 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3408
3409 bp->last_status_idx = 0;
3410 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3411
3412 /* Set up how to generate a link change interrupt. */
3413 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3414
3415 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3416 (u64) bp->status_blk_mapping & 0xffffffff);
3417 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3418
3419 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3420 (u64) bp->stats_blk_mapping & 0xffffffff);
3421 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3422 (u64) bp->stats_blk_mapping >> 32);
3423
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003424 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003425 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3426
3427 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3428 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3429
3430 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3431 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3432
3433 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3434
3435 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3436
3437 REG_WR(bp, BNX2_HC_COM_TICKS,
3438 (bp->com_ticks_int << 16) | bp->com_ticks);
3439
3440 REG_WR(bp, BNX2_HC_CMD_TICKS,
3441 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3442
3443 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3444 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3445
3446 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3447 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3448 else {
3449 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3450 BNX2_HC_CONFIG_TX_TMR_MODE |
3451 BNX2_HC_CONFIG_COLLECT_STATS);
3452 }
3453
3454 /* Clear internal stats counters. */
3455 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3456
3457 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3458
Michael Chane29054f2006-01-23 16:06:06 -08003459 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3460 BNX2_PORT_FEATURE_ASF_ENABLED)
3461 bp->flags |= ASF_ENABLE_FLAG;
3462
Michael Chanb6016b72005-05-26 13:03:09 -07003463 /* Initialize the receive filter. */
3464 bnx2_set_rx_mode(bp->dev);
3465
Michael Chanb090ae22006-01-23 16:07:10 -08003466 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3467 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003468
3469 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3470 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3471
3472 udelay(20);
3473
Michael Chanbf5295b2006-03-23 01:11:56 -08003474 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3475
Michael Chanb090ae22006-01-23 16:07:10 -08003476 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003477}
3478
Michael Chan59b47d82006-11-19 14:10:45 -08003479static void
3480bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3481{
3482 u32 val, offset0, offset1, offset2, offset3;
3483
3484 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3485 offset0 = BNX2_L2CTX_TYPE_XI;
3486 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3487 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3488 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3489 } else {
3490 offset0 = BNX2_L2CTX_TYPE;
3491 offset1 = BNX2_L2CTX_CMD_TYPE;
3492 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3493 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3494 }
3495 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3496 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3497
3498 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3499 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3500
3501 val = (u64) bp->tx_desc_mapping >> 32;
3502 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3503
3504 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3505 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3506}
Michael Chanb6016b72005-05-26 13:03:09 -07003507
3508static void
3509bnx2_init_tx_ring(struct bnx2 *bp)
3510{
3511 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003512 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003513
Michael Chan2f8af122006-08-15 01:39:10 -07003514 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3515
Michael Chanb6016b72005-05-26 13:03:09 -07003516 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003517
Michael Chanb6016b72005-05-26 13:03:09 -07003518 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3519 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3520
3521 bp->tx_prod = 0;
3522 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003523 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003524 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003525
Michael Chan59b47d82006-11-19 14:10:45 -08003526 cid = TX_CID;
3527 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3528 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003529
Michael Chan59b47d82006-11-19 14:10:45 -08003530 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003531}
3532
3533static void
3534bnx2_init_rx_ring(struct bnx2 *bp)
3535{
3536 struct rx_bd *rxbd;
3537 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003538 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003539 u32 val;
3540
3541 /* 8 for CRC and VLAN */
3542 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003543 /* hw alignment */
3544 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003545
3546 ring_prod = prod = bp->rx_prod = 0;
3547 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003548 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003549 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003550
Michael Chan13daffa2006-03-20 17:49:20 -08003551 for (i = 0; i < bp->rx_max_ring; i++) {
3552 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003553
Michael Chan13daffa2006-03-20 17:49:20 -08003554 rxbd = &bp->rx_desc_ring[i][0];
3555 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3556 rxbd->rx_bd_len = bp->rx_buf_use_size;
3557 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3558 }
3559 if (i == (bp->rx_max_ring - 1))
3560 j = 0;
3561 else
3562 j = i + 1;
3563 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3564 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3565 0xffffffff;
3566 }
Michael Chanb6016b72005-05-26 13:03:09 -07003567
3568 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3569 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3570 val |= 0x02 << 8;
3571 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3572
Michael Chan13daffa2006-03-20 17:49:20 -08003573 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003574 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3575
Michael Chan13daffa2006-03-20 17:49:20 -08003576 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003577 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3578
Michael Chan236b6392006-03-20 17:49:02 -08003579 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003580 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3581 break;
3582 }
3583 prod = NEXT_RX_BD(prod);
3584 ring_prod = RX_RING_IDX(prod);
3585 }
3586 bp->rx_prod = prod;
3587
3588 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3589
3590 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3591}
3592
3593static void
Michael Chan13daffa2006-03-20 17:49:20 -08003594bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3595{
3596 u32 num_rings, max;
3597
3598 bp->rx_ring_size = size;
3599 num_rings = 1;
3600 while (size > MAX_RX_DESC_CNT) {
3601 size -= MAX_RX_DESC_CNT;
3602 num_rings++;
3603 }
3604 /* round to next power of 2 */
3605 max = MAX_RX_RINGS;
3606 while ((max & num_rings) == 0)
3607 max >>= 1;
3608
3609 if (num_rings != max)
3610 max <<= 1;
3611
3612 bp->rx_max_ring = max;
3613 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3614}
3615
3616static void
Michael Chanb6016b72005-05-26 13:03:09 -07003617bnx2_free_tx_skbs(struct bnx2 *bp)
3618{
3619 int i;
3620
3621 if (bp->tx_buf_ring == NULL)
3622 return;
3623
3624 for (i = 0; i < TX_DESC_CNT; ) {
3625 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3626 struct sk_buff *skb = tx_buf->skb;
3627 int j, last;
3628
3629 if (skb == NULL) {
3630 i++;
3631 continue;
3632 }
3633
3634 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3635 skb_headlen(skb), PCI_DMA_TODEVICE);
3636
3637 tx_buf->skb = NULL;
3638
3639 last = skb_shinfo(skb)->nr_frags;
3640 for (j = 0; j < last; j++) {
3641 tx_buf = &bp->tx_buf_ring[i + j + 1];
3642 pci_unmap_page(bp->pdev,
3643 pci_unmap_addr(tx_buf, mapping),
3644 skb_shinfo(skb)->frags[j].size,
3645 PCI_DMA_TODEVICE);
3646 }
Michael Chan745720e2006-06-29 12:37:41 -07003647 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003648 i += j + 1;
3649 }
3650
3651}
3652
3653static void
3654bnx2_free_rx_skbs(struct bnx2 *bp)
3655{
3656 int i;
3657
3658 if (bp->rx_buf_ring == NULL)
3659 return;
3660
Michael Chan13daffa2006-03-20 17:49:20 -08003661 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003662 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3663 struct sk_buff *skb = rx_buf->skb;
3664
Michael Chan05d0f1c2005-11-04 08:53:48 -08003665 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003666 continue;
3667
3668 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3669 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3670
3671 rx_buf->skb = NULL;
3672
Michael Chan745720e2006-06-29 12:37:41 -07003673 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003674 }
3675}
3676
3677static void
3678bnx2_free_skbs(struct bnx2 *bp)
3679{
3680 bnx2_free_tx_skbs(bp);
3681 bnx2_free_rx_skbs(bp);
3682}
3683
3684static int
3685bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3686{
3687 int rc;
3688
3689 rc = bnx2_reset_chip(bp, reset_code);
3690 bnx2_free_skbs(bp);
3691 if (rc)
3692 return rc;
3693
Michael Chanfba9fe92006-06-12 22:21:25 -07003694 if ((rc = bnx2_init_chip(bp)) != 0)
3695 return rc;
3696
Michael Chanb6016b72005-05-26 13:03:09 -07003697 bnx2_init_tx_ring(bp);
3698 bnx2_init_rx_ring(bp);
3699 return 0;
3700}
3701
3702static int
3703bnx2_init_nic(struct bnx2 *bp)
3704{
3705 int rc;
3706
3707 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3708 return rc;
3709
Michael Chan80be4432006-11-19 14:07:28 -08003710 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003711 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003712 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003713 bnx2_set_link(bp);
3714 return 0;
3715}
3716
3717static int
3718bnx2_test_registers(struct bnx2 *bp)
3719{
3720 int ret;
3721 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003722 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003723 u16 offset;
3724 u16 flags;
3725 u32 rw_mask;
3726 u32 ro_mask;
3727 } reg_tbl[] = {
3728 { 0x006c, 0, 0x00000000, 0x0000003f },
3729 { 0x0090, 0, 0xffffffff, 0x00000000 },
3730 { 0x0094, 0, 0x00000000, 0x00000000 },
3731
3732 { 0x0404, 0, 0x00003f00, 0x00000000 },
3733 { 0x0418, 0, 0x00000000, 0xffffffff },
3734 { 0x041c, 0, 0x00000000, 0xffffffff },
3735 { 0x0420, 0, 0x00000000, 0x80ffffff },
3736 { 0x0424, 0, 0x00000000, 0x00000000 },
3737 { 0x0428, 0, 0x00000000, 0x00000001 },
3738 { 0x0450, 0, 0x00000000, 0x0000ffff },
3739 { 0x0454, 0, 0x00000000, 0xffffffff },
3740 { 0x0458, 0, 0x00000000, 0xffffffff },
3741
3742 { 0x0808, 0, 0x00000000, 0xffffffff },
3743 { 0x0854, 0, 0x00000000, 0xffffffff },
3744 { 0x0868, 0, 0x00000000, 0x77777777 },
3745 { 0x086c, 0, 0x00000000, 0x77777777 },
3746 { 0x0870, 0, 0x00000000, 0x77777777 },
3747 { 0x0874, 0, 0x00000000, 0x77777777 },
3748
3749 { 0x0c00, 0, 0x00000000, 0x00000001 },
3750 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3751 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003752
3753 { 0x1000, 0, 0x00000000, 0x00000001 },
3754 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003755
3756 { 0x1408, 0, 0x01c00800, 0x00000000 },
3757 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3758 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003759 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003760 { 0x14b0, 0, 0x00000002, 0x00000001 },
3761 { 0x14b8, 0, 0x00000000, 0x00000000 },
3762 { 0x14c0, 0, 0x00000000, 0x00000009 },
3763 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3764 { 0x14cc, 0, 0x00000000, 0x00000001 },
3765 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003766
3767 { 0x1800, 0, 0x00000000, 0x00000001 },
3768 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003769
3770 { 0x2800, 0, 0x00000000, 0x00000001 },
3771 { 0x2804, 0, 0x00000000, 0x00003f01 },
3772 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3773 { 0x2810, 0, 0xffff0000, 0x00000000 },
3774 { 0x2814, 0, 0xffff0000, 0x00000000 },
3775 { 0x2818, 0, 0xffff0000, 0x00000000 },
3776 { 0x281c, 0, 0xffff0000, 0x00000000 },
3777 { 0x2834, 0, 0xffffffff, 0x00000000 },
3778 { 0x2840, 0, 0x00000000, 0xffffffff },
3779 { 0x2844, 0, 0x00000000, 0xffffffff },
3780 { 0x2848, 0, 0xffffffff, 0x00000000 },
3781 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3782
3783 { 0x2c00, 0, 0x00000000, 0x00000011 },
3784 { 0x2c04, 0, 0x00000000, 0x00030007 },
3785
Michael Chanb6016b72005-05-26 13:03:09 -07003786 { 0x3c00, 0, 0x00000000, 0x00000001 },
3787 { 0x3c04, 0, 0x00000000, 0x00070000 },
3788 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3789 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3790 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3791 { 0x3c14, 0, 0x00000000, 0xffffffff },
3792 { 0x3c18, 0, 0x00000000, 0xffffffff },
3793 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3794 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003795
3796 { 0x5004, 0, 0x00000000, 0x0000007f },
3797 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3798 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3799
Michael Chanb6016b72005-05-26 13:03:09 -07003800 { 0x5c00, 0, 0x00000000, 0x00000001 },
3801 { 0x5c04, 0, 0x00000000, 0x0003000f },
3802 { 0x5c08, 0, 0x00000003, 0x00000000 },
3803 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3804 { 0x5c10, 0, 0x00000000, 0xffffffff },
3805 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3806 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3807 { 0x5c88, 0, 0x00000000, 0x00077373 },
3808 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3809
3810 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3811 { 0x680c, 0, 0xffffffff, 0x00000000 },
3812 { 0x6810, 0, 0xffffffff, 0x00000000 },
3813 { 0x6814, 0, 0xffffffff, 0x00000000 },
3814 { 0x6818, 0, 0xffffffff, 0x00000000 },
3815 { 0x681c, 0, 0xffffffff, 0x00000000 },
3816 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3817 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3818 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3819 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3820 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3821 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3822 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3823 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3824 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3825 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3826 { 0x684c, 0, 0xffffffff, 0x00000000 },
3827 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3828 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3829 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3830 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3831 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3832 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3833
3834 { 0xffff, 0, 0x00000000, 0x00000000 },
3835 };
3836
3837 ret = 0;
3838 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3839 u32 offset, rw_mask, ro_mask, save_val, val;
3840
3841 offset = (u32) reg_tbl[i].offset;
3842 rw_mask = reg_tbl[i].rw_mask;
3843 ro_mask = reg_tbl[i].ro_mask;
3844
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003845 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003846
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003847 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003848
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003849 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003850 if ((val & rw_mask) != 0) {
3851 goto reg_test_err;
3852 }
3853
3854 if ((val & ro_mask) != (save_val & ro_mask)) {
3855 goto reg_test_err;
3856 }
3857
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003858 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003859
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003860 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003861 if ((val & rw_mask) != rw_mask) {
3862 goto reg_test_err;
3863 }
3864
3865 if ((val & ro_mask) != (save_val & ro_mask)) {
3866 goto reg_test_err;
3867 }
3868
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003869 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003870 continue;
3871
3872reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003873 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003874 ret = -ENODEV;
3875 break;
3876 }
3877 return ret;
3878}
3879
3880static int
3881bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3882{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003883 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003884 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3885 int i;
3886
3887 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3888 u32 offset;
3889
3890 for (offset = 0; offset < size; offset += 4) {
3891
3892 REG_WR_IND(bp, start + offset, test_pattern[i]);
3893
3894 if (REG_RD_IND(bp, start + offset) !=
3895 test_pattern[i]) {
3896 return -ENODEV;
3897 }
3898 }
3899 }
3900 return 0;
3901}
3902
3903static int
3904bnx2_test_memory(struct bnx2 *bp)
3905{
3906 int ret = 0;
3907 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003908 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003909 u32 offset;
3910 u32 len;
3911 } mem_tbl[] = {
3912 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003913 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003914 { 0xe0000, 0x4000 },
3915 { 0x120000, 0x4000 },
3916 { 0x1a0000, 0x4000 },
3917 { 0x160000, 0x4000 },
3918 { 0xffffffff, 0 },
3919 };
3920
3921 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3922 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3923 mem_tbl[i].len)) != 0) {
3924 return ret;
3925 }
3926 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003927
Michael Chanb6016b72005-05-26 13:03:09 -07003928 return ret;
3929}
3930
Michael Chanbc5a0692006-01-23 16:13:22 -08003931#define BNX2_MAC_LOOPBACK 0
3932#define BNX2_PHY_LOOPBACK 1
3933
Michael Chanb6016b72005-05-26 13:03:09 -07003934static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003935bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003936{
3937 unsigned int pkt_size, num_pkts, i;
3938 struct sk_buff *skb, *rx_skb;
3939 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003940 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003941 dma_addr_t map;
3942 struct tx_bd *txbd;
3943 struct sw_bd *rx_buf;
3944 struct l2_fhdr *rx_hdr;
3945 int ret = -ENODEV;
3946
Michael Chanbc5a0692006-01-23 16:13:22 -08003947 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3948 bp->loopback = MAC_LOOPBACK;
3949 bnx2_set_mac_loopback(bp);
3950 }
3951 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003952 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003953 bnx2_set_phy_loopback(bp);
3954 }
3955 else
3956 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003957
3958 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07003959 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003960 if (!skb)
3961 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003962 packet = skb_put(skb, pkt_size);
3963 memcpy(packet, bp->mac_addr, 6);
3964 memset(packet + 6, 0x0, 8);
3965 for (i = 14; i < pkt_size; i++)
3966 packet[i] = (unsigned char) (i & 0xff);
3967
3968 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3969 PCI_DMA_TODEVICE);
3970
Michael Chanbf5295b2006-03-23 01:11:56 -08003971 REG_WR(bp, BNX2_HC_COMMAND,
3972 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3973
Michael Chanb6016b72005-05-26 13:03:09 -07003974 REG_RD(bp, BNX2_HC_COMMAND);
3975
3976 udelay(5);
3977 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3978
Michael Chanb6016b72005-05-26 13:03:09 -07003979 num_pkts = 0;
3980
Michael Chanbc5a0692006-01-23 16:13:22 -08003981 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07003982
3983 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3984 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3985 txbd->tx_bd_mss_nbytes = pkt_size;
3986 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3987
3988 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08003989 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3990 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07003991
Michael Chan234754d2006-11-19 14:11:41 -08003992 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
3993 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07003994
3995 udelay(100);
3996
Michael Chanbf5295b2006-03-23 01:11:56 -08003997 REG_WR(bp, BNX2_HC_COMMAND,
3998 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3999
Michael Chanb6016b72005-05-26 13:03:09 -07004000 REG_RD(bp, BNX2_HC_COMMAND);
4001
4002 udelay(5);
4003
4004 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004005 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004006
Michael Chanbc5a0692006-01-23 16:13:22 -08004007 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004008 goto loopback_test_done;
4009 }
4010
4011 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4012 if (rx_idx != rx_start_idx + num_pkts) {
4013 goto loopback_test_done;
4014 }
4015
4016 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4017 rx_skb = rx_buf->skb;
4018
4019 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4020 skb_reserve(rx_skb, bp->rx_offset);
4021
4022 pci_dma_sync_single_for_cpu(bp->pdev,
4023 pci_unmap_addr(rx_buf, mapping),
4024 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4025
Michael Chanade2bfe2006-01-23 16:09:51 -08004026 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004027 (L2_FHDR_ERRORS_BAD_CRC |
4028 L2_FHDR_ERRORS_PHY_DECODE |
4029 L2_FHDR_ERRORS_ALIGNMENT |
4030 L2_FHDR_ERRORS_TOO_SHORT |
4031 L2_FHDR_ERRORS_GIANT_FRAME)) {
4032
4033 goto loopback_test_done;
4034 }
4035
4036 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4037 goto loopback_test_done;
4038 }
4039
4040 for (i = 14; i < pkt_size; i++) {
4041 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4042 goto loopback_test_done;
4043 }
4044 }
4045
4046 ret = 0;
4047
4048loopback_test_done:
4049 bp->loopback = 0;
4050 return ret;
4051}
4052
Michael Chanbc5a0692006-01-23 16:13:22 -08004053#define BNX2_MAC_LOOPBACK_FAILED 1
4054#define BNX2_PHY_LOOPBACK_FAILED 2
4055#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4056 BNX2_PHY_LOOPBACK_FAILED)
4057
4058static int
4059bnx2_test_loopback(struct bnx2 *bp)
4060{
4061 int rc = 0;
4062
4063 if (!netif_running(bp->dev))
4064 return BNX2_LOOPBACK_FAILED;
4065
4066 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4067 spin_lock_bh(&bp->phy_lock);
4068 bnx2_init_phy(bp);
4069 spin_unlock_bh(&bp->phy_lock);
4070 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4071 rc |= BNX2_MAC_LOOPBACK_FAILED;
4072 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4073 rc |= BNX2_PHY_LOOPBACK_FAILED;
4074 return rc;
4075}
4076
Michael Chanb6016b72005-05-26 13:03:09 -07004077#define NVRAM_SIZE 0x200
4078#define CRC32_RESIDUAL 0xdebb20e3
4079
4080static int
4081bnx2_test_nvram(struct bnx2 *bp)
4082{
4083 u32 buf[NVRAM_SIZE / 4];
4084 u8 *data = (u8 *) buf;
4085 int rc = 0;
4086 u32 magic, csum;
4087
4088 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4089 goto test_nvram_done;
4090
4091 magic = be32_to_cpu(buf[0]);
4092 if (magic != 0x669955aa) {
4093 rc = -ENODEV;
4094 goto test_nvram_done;
4095 }
4096
4097 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4098 goto test_nvram_done;
4099
4100 csum = ether_crc_le(0x100, data);
4101 if (csum != CRC32_RESIDUAL) {
4102 rc = -ENODEV;
4103 goto test_nvram_done;
4104 }
4105
4106 csum = ether_crc_le(0x100, data + 0x100);
4107 if (csum != CRC32_RESIDUAL) {
4108 rc = -ENODEV;
4109 }
4110
4111test_nvram_done:
4112 return rc;
4113}
4114
4115static int
4116bnx2_test_link(struct bnx2 *bp)
4117{
4118 u32 bmsr;
4119
Michael Chanc770a652005-08-25 15:38:39 -07004120 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004121 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4122 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004123 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004124
Michael Chanb6016b72005-05-26 13:03:09 -07004125 if (bmsr & BMSR_LSTATUS) {
4126 return 0;
4127 }
4128 return -ENODEV;
4129}
4130
4131static int
4132bnx2_test_intr(struct bnx2 *bp)
4133{
4134 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004135 u16 status_idx;
4136
4137 if (!netif_running(bp->dev))
4138 return -ENODEV;
4139
4140 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4141
4142 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004143 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004144 REG_RD(bp, BNX2_HC_COMMAND);
4145
4146 for (i = 0; i < 10; i++) {
4147 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4148 status_idx) {
4149
4150 break;
4151 }
4152
4153 msleep_interruptible(10);
4154 }
4155 if (i < 10)
4156 return 0;
4157
4158 return -ENODEV;
4159}
4160
4161static void
Michael Chan48b01e22006-11-19 14:08:00 -08004162bnx2_5706_serdes_timer(struct bnx2 *bp)
4163{
4164 spin_lock(&bp->phy_lock);
4165 if (bp->serdes_an_pending)
4166 bp->serdes_an_pending--;
4167 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4168 u32 bmcr;
4169
4170 bp->current_interval = bp->timer_interval;
4171
4172 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4173
4174 if (bmcr & BMCR_ANENABLE) {
4175 u32 phy1, phy2;
4176
4177 bnx2_write_phy(bp, 0x1c, 0x7c00);
4178 bnx2_read_phy(bp, 0x1c, &phy1);
4179
4180 bnx2_write_phy(bp, 0x17, 0x0f01);
4181 bnx2_read_phy(bp, 0x15, &phy2);
4182 bnx2_write_phy(bp, 0x17, 0x0f01);
4183 bnx2_read_phy(bp, 0x15, &phy2);
4184
4185 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4186 !(phy2 & 0x20)) { /* no CONFIG */
4187
4188 bmcr &= ~BMCR_ANENABLE;
4189 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4190 bnx2_write_phy(bp, MII_BMCR, bmcr);
4191 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4192 }
4193 }
4194 }
4195 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4196 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4197 u32 phy2;
4198
4199 bnx2_write_phy(bp, 0x17, 0x0f01);
4200 bnx2_read_phy(bp, 0x15, &phy2);
4201 if (phy2 & 0x20) {
4202 u32 bmcr;
4203
4204 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4205 bmcr |= BMCR_ANENABLE;
4206 bnx2_write_phy(bp, MII_BMCR, bmcr);
4207
4208 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4209 }
4210 } else
4211 bp->current_interval = bp->timer_interval;
4212
4213 spin_unlock(&bp->phy_lock);
4214}
4215
4216static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004217bnx2_5708_serdes_timer(struct bnx2 *bp)
4218{
4219 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4220 bp->serdes_an_pending = 0;
4221 return;
4222 }
4223
4224 spin_lock(&bp->phy_lock);
4225 if (bp->serdes_an_pending)
4226 bp->serdes_an_pending--;
4227 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4228 u32 bmcr;
4229
4230 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4231
4232 if (bmcr & BMCR_ANENABLE) {
4233 bmcr &= ~BMCR_ANENABLE;
4234 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4235 bnx2_write_phy(bp, MII_BMCR, bmcr);
4236 bp->current_interval = SERDES_FORCED_TIMEOUT;
4237 } else {
4238 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4239 bmcr |= BMCR_ANENABLE;
4240 bnx2_write_phy(bp, MII_BMCR, bmcr);
4241 bp->serdes_an_pending = 2;
4242 bp->current_interval = bp->timer_interval;
4243 }
4244
4245 } else
4246 bp->current_interval = bp->timer_interval;
4247
4248 spin_unlock(&bp->phy_lock);
4249}
4250
4251static void
Michael Chanb6016b72005-05-26 13:03:09 -07004252bnx2_timer(unsigned long data)
4253{
4254 struct bnx2 *bp = (struct bnx2 *) data;
4255 u32 msg;
4256
Michael Chancd339a02005-08-25 15:35:24 -07004257 if (!netif_running(bp->dev))
4258 return;
4259
Michael Chanb6016b72005-05-26 13:03:09 -07004260 if (atomic_read(&bp->intr_sem) != 0)
4261 goto bnx2_restart_timer;
4262
4263 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004264 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004265
Michael Chancea94db2006-06-12 22:16:13 -07004266 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4267
Michael Chanf8dd0642006-11-19 14:08:29 -08004268 if (bp->phy_flags & PHY_SERDES_FLAG) {
4269 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4270 bnx2_5706_serdes_timer(bp);
4271 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4272 bnx2_5708_serdes_timer(bp);
4273 }
Michael Chanb6016b72005-05-26 13:03:09 -07004274
4275bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004276 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004277}
4278
4279/* Called with rtnl_lock */
4280static int
4281bnx2_open(struct net_device *dev)
4282{
Michael Chan972ec0d2006-01-23 16:12:43 -08004283 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004284 int rc;
4285
Pavel Machek829ca9a2005-09-03 15:56:56 -07004286 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004287 bnx2_disable_int(bp);
4288
4289 rc = bnx2_alloc_mem(bp);
4290 if (rc)
4291 return rc;
4292
4293 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4294 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4295 !disable_msi) {
4296
4297 if (pci_enable_msi(bp->pdev) == 0) {
4298 bp->flags |= USING_MSI_FLAG;
4299 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4300 dev);
4301 }
4302 else {
4303 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004304 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004305 }
4306 }
4307 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004308 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004309 dev->name, dev);
4310 }
4311 if (rc) {
4312 bnx2_free_mem(bp);
4313 return rc;
4314 }
4315
4316 rc = bnx2_init_nic(bp);
4317
4318 if (rc) {
4319 free_irq(bp->pdev->irq, dev);
4320 if (bp->flags & USING_MSI_FLAG) {
4321 pci_disable_msi(bp->pdev);
4322 bp->flags &= ~USING_MSI_FLAG;
4323 }
4324 bnx2_free_skbs(bp);
4325 bnx2_free_mem(bp);
4326 return rc;
4327 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004328
Michael Chancd339a02005-08-25 15:35:24 -07004329 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004330
4331 atomic_set(&bp->intr_sem, 0);
4332
4333 bnx2_enable_int(bp);
4334
4335 if (bp->flags & USING_MSI_FLAG) {
4336 /* Test MSI to make sure it is working
4337 * If MSI test fails, go back to INTx mode
4338 */
4339 if (bnx2_test_intr(bp) != 0) {
4340 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4341 " using MSI, switching to INTx mode. Please"
4342 " report this failure to the PCI maintainer"
4343 " and include system chipset information.\n",
4344 bp->dev->name);
4345
4346 bnx2_disable_int(bp);
4347 free_irq(bp->pdev->irq, dev);
4348 pci_disable_msi(bp->pdev);
4349 bp->flags &= ~USING_MSI_FLAG;
4350
4351 rc = bnx2_init_nic(bp);
4352
4353 if (!rc) {
4354 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004355 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004356 }
4357 if (rc) {
4358 bnx2_free_skbs(bp);
4359 bnx2_free_mem(bp);
4360 del_timer_sync(&bp->timer);
4361 return rc;
4362 }
4363 bnx2_enable_int(bp);
4364 }
4365 }
4366 if (bp->flags & USING_MSI_FLAG) {
4367 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4368 }
4369
4370 netif_start_queue(dev);
4371
4372 return 0;
4373}
4374
4375static void
4376bnx2_reset_task(void *data)
4377{
4378 struct bnx2 *bp = data;
4379
Michael Chanafdc08b2005-08-25 15:34:29 -07004380 if (!netif_running(bp->dev))
4381 return;
4382
4383 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004384 bnx2_netif_stop(bp);
4385
4386 bnx2_init_nic(bp);
4387
4388 atomic_set(&bp->intr_sem, 1);
4389 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004390 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004391}
4392
4393static void
4394bnx2_tx_timeout(struct net_device *dev)
4395{
Michael Chan972ec0d2006-01-23 16:12:43 -08004396 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004397
4398 /* This allows the netif to be shutdown gracefully before resetting */
4399 schedule_work(&bp->reset_task);
4400}
4401
4402#ifdef BCM_VLAN
4403/* Called with rtnl_lock */
4404static void
4405bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4406{
Michael Chan972ec0d2006-01-23 16:12:43 -08004407 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004408
4409 bnx2_netif_stop(bp);
4410
4411 bp->vlgrp = vlgrp;
4412 bnx2_set_rx_mode(dev);
4413
4414 bnx2_netif_start(bp);
4415}
4416
4417/* Called with rtnl_lock */
4418static void
4419bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4420{
Michael Chan972ec0d2006-01-23 16:12:43 -08004421 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004422
4423 bnx2_netif_stop(bp);
4424
4425 if (bp->vlgrp)
4426 bp->vlgrp->vlan_devices[vid] = NULL;
4427 bnx2_set_rx_mode(dev);
4428
4429 bnx2_netif_start(bp);
4430}
4431#endif
4432
Herbert Xu932ff272006-06-09 12:20:56 -07004433/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004434 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4435 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004436 */
4437static int
4438bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4439{
Michael Chan972ec0d2006-01-23 16:12:43 -08004440 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004441 dma_addr_t mapping;
4442 struct tx_bd *txbd;
4443 struct sw_bd *tx_buf;
4444 u32 len, vlan_tag_flags, last_frag, mss;
4445 u16 prod, ring_prod;
4446 int i;
4447
Michael Chane89bbf12005-08-25 15:36:58 -07004448 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004449 netif_stop_queue(dev);
4450 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4451 dev->name);
4452
4453 return NETDEV_TX_BUSY;
4454 }
4455 len = skb_headlen(skb);
4456 prod = bp->tx_prod;
4457 ring_prod = TX_RING_IDX(prod);
4458
4459 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004460 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004461 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4462 }
4463
4464 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4465 vlan_tag_flags |=
4466 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4467 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004468#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004469 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004470 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4471 u32 tcp_opt_len, ip_tcp_len;
4472
4473 if (skb_header_cloned(skb) &&
4474 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4475 dev_kfree_skb(skb);
4476 return NETDEV_TX_OK;
4477 }
4478
4479 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4480 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4481
4482 tcp_opt_len = 0;
4483 if (skb->h.th->doff > 5) {
4484 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4485 }
4486 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4487
4488 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004489 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004490 skb->h.th->check =
4491 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4492 skb->nh.iph->daddr,
4493 0, IPPROTO_TCP, 0);
4494
4495 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4496 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4497 (tcp_opt_len >> 2)) << 8;
4498 }
4499 }
4500 else
4501#endif
4502 {
4503 mss = 0;
4504 }
4505
4506 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004507
Michael Chanb6016b72005-05-26 13:03:09 -07004508 tx_buf = &bp->tx_buf_ring[ring_prod];
4509 tx_buf->skb = skb;
4510 pci_unmap_addr_set(tx_buf, mapping, mapping);
4511
4512 txbd = &bp->tx_desc_ring[ring_prod];
4513
4514 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4515 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4516 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4517 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4518
4519 last_frag = skb_shinfo(skb)->nr_frags;
4520
4521 for (i = 0; i < last_frag; i++) {
4522 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4523
4524 prod = NEXT_TX_BD(prod);
4525 ring_prod = TX_RING_IDX(prod);
4526 txbd = &bp->tx_desc_ring[ring_prod];
4527
4528 len = frag->size;
4529 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4530 len, PCI_DMA_TODEVICE);
4531 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4532 mapping, mapping);
4533
4534 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4535 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4536 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4537 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4538
4539 }
4540 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4541
4542 prod = NEXT_TX_BD(prod);
4543 bp->tx_prod_bseq += skb->len;
4544
Michael Chan234754d2006-11-19 14:11:41 -08004545 REG_WR16(bp, bp->tx_bidx_addr, prod);
4546 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004547
4548 mmiowb();
4549
4550 bp->tx_prod = prod;
4551 dev->trans_start = jiffies;
4552
Michael Chane89bbf12005-08-25 15:36:58 -07004553 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004554 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004555 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004556 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004557 }
4558
4559 return NETDEV_TX_OK;
4560}
4561
4562/* Called with rtnl_lock */
4563static int
4564bnx2_close(struct net_device *dev)
4565{
Michael Chan972ec0d2006-01-23 16:12:43 -08004566 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004567 u32 reset_code;
4568
Michael Chanafdc08b2005-08-25 15:34:29 -07004569 /* Calling flush_scheduled_work() may deadlock because
4570 * linkwatch_event() may be on the workqueue and it will try to get
4571 * the rtnl_lock which we are holding.
4572 */
4573 while (bp->in_reset_task)
4574 msleep(1);
4575
Michael Chanb6016b72005-05-26 13:03:09 -07004576 bnx2_netif_stop(bp);
4577 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004578 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004579 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004580 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004581 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4582 else
4583 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4584 bnx2_reset_chip(bp, reset_code);
4585 free_irq(bp->pdev->irq, dev);
4586 if (bp->flags & USING_MSI_FLAG) {
4587 pci_disable_msi(bp->pdev);
4588 bp->flags &= ~USING_MSI_FLAG;
4589 }
4590 bnx2_free_skbs(bp);
4591 bnx2_free_mem(bp);
4592 bp->link_up = 0;
4593 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004594 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004595 return 0;
4596}
4597
4598#define GET_NET_STATS64(ctr) \
4599 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4600 (unsigned long) (ctr##_lo)
4601
4602#define GET_NET_STATS32(ctr) \
4603 (ctr##_lo)
4604
4605#if (BITS_PER_LONG == 64)
4606#define GET_NET_STATS GET_NET_STATS64
4607#else
4608#define GET_NET_STATS GET_NET_STATS32
4609#endif
4610
4611static struct net_device_stats *
4612bnx2_get_stats(struct net_device *dev)
4613{
Michael Chan972ec0d2006-01-23 16:12:43 -08004614 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004615 struct statistics_block *stats_blk = bp->stats_blk;
4616 struct net_device_stats *net_stats = &bp->net_stats;
4617
4618 if (bp->stats_blk == NULL) {
4619 return net_stats;
4620 }
4621 net_stats->rx_packets =
4622 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4623 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4624 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4625
4626 net_stats->tx_packets =
4627 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4628 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4629 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4630
4631 net_stats->rx_bytes =
4632 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4633
4634 net_stats->tx_bytes =
4635 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4636
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004637 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004638 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4639
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004640 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004641 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4642
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004643 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004644 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4645 stats_blk->stat_EtherStatsOverrsizePkts);
4646
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004647 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004648 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4649
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004650 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004651 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4652
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004653 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004654 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4655
4656 net_stats->rx_errors = net_stats->rx_length_errors +
4657 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4658 net_stats->rx_crc_errors;
4659
4660 net_stats->tx_aborted_errors =
4661 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4662 stats_blk->stat_Dot3StatsLateCollisions);
4663
Michael Chan5b0c76a2005-11-04 08:45:49 -08004664 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4665 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004666 net_stats->tx_carrier_errors = 0;
4667 else {
4668 net_stats->tx_carrier_errors =
4669 (unsigned long)
4670 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4671 }
4672
4673 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004674 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004675 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4676 +
4677 net_stats->tx_aborted_errors +
4678 net_stats->tx_carrier_errors;
4679
Michael Chancea94db2006-06-12 22:16:13 -07004680 net_stats->rx_missed_errors =
4681 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4682 stats_blk->stat_FwRxDrop);
4683
Michael Chanb6016b72005-05-26 13:03:09 -07004684 return net_stats;
4685}
4686
4687/* All ethtool functions called with rtnl_lock */
4688
4689static int
4690bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4691{
Michael Chan972ec0d2006-01-23 16:12:43 -08004692 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004693
4694 cmd->supported = SUPPORTED_Autoneg;
4695 if (bp->phy_flags & PHY_SERDES_FLAG) {
4696 cmd->supported |= SUPPORTED_1000baseT_Full |
4697 SUPPORTED_FIBRE;
4698
4699 cmd->port = PORT_FIBRE;
4700 }
4701 else {
4702 cmd->supported |= SUPPORTED_10baseT_Half |
4703 SUPPORTED_10baseT_Full |
4704 SUPPORTED_100baseT_Half |
4705 SUPPORTED_100baseT_Full |
4706 SUPPORTED_1000baseT_Full |
4707 SUPPORTED_TP;
4708
4709 cmd->port = PORT_TP;
4710 }
4711
4712 cmd->advertising = bp->advertising;
4713
4714 if (bp->autoneg & AUTONEG_SPEED) {
4715 cmd->autoneg = AUTONEG_ENABLE;
4716 }
4717 else {
4718 cmd->autoneg = AUTONEG_DISABLE;
4719 }
4720
4721 if (netif_carrier_ok(dev)) {
4722 cmd->speed = bp->line_speed;
4723 cmd->duplex = bp->duplex;
4724 }
4725 else {
4726 cmd->speed = -1;
4727 cmd->duplex = -1;
4728 }
4729
4730 cmd->transceiver = XCVR_INTERNAL;
4731 cmd->phy_address = bp->phy_addr;
4732
4733 return 0;
4734}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004735
Michael Chanb6016b72005-05-26 13:03:09 -07004736static int
4737bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4738{
Michael Chan972ec0d2006-01-23 16:12:43 -08004739 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004740 u8 autoneg = bp->autoneg;
4741 u8 req_duplex = bp->req_duplex;
4742 u16 req_line_speed = bp->req_line_speed;
4743 u32 advertising = bp->advertising;
4744
4745 if (cmd->autoneg == AUTONEG_ENABLE) {
4746 autoneg |= AUTONEG_SPEED;
4747
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004748 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004749
4750 /* allow advertising 1 speed */
4751 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4752 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4753 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4754 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4755
4756 if (bp->phy_flags & PHY_SERDES_FLAG)
4757 return -EINVAL;
4758
4759 advertising = cmd->advertising;
4760
4761 }
4762 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4763 advertising = cmd->advertising;
4764 }
4765 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4766 return -EINVAL;
4767 }
4768 else {
4769 if (bp->phy_flags & PHY_SERDES_FLAG) {
4770 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4771 }
4772 else {
4773 advertising = ETHTOOL_ALL_COPPER_SPEED;
4774 }
4775 }
4776 advertising |= ADVERTISED_Autoneg;
4777 }
4778 else {
4779 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004780 if ((cmd->speed != SPEED_1000 &&
4781 cmd->speed != SPEED_2500) ||
4782 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004783 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004784
4785 if (cmd->speed == SPEED_2500 &&
4786 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4787 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004788 }
4789 else if (cmd->speed == SPEED_1000) {
4790 return -EINVAL;
4791 }
4792 autoneg &= ~AUTONEG_SPEED;
4793 req_line_speed = cmd->speed;
4794 req_duplex = cmd->duplex;
4795 advertising = 0;
4796 }
4797
4798 bp->autoneg = autoneg;
4799 bp->advertising = advertising;
4800 bp->req_line_speed = req_line_speed;
4801 bp->req_duplex = req_duplex;
4802
Michael Chanc770a652005-08-25 15:38:39 -07004803 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004804
4805 bnx2_setup_phy(bp);
4806
Michael Chanc770a652005-08-25 15:38:39 -07004807 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004808
4809 return 0;
4810}
4811
4812static void
4813bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4814{
Michael Chan972ec0d2006-01-23 16:12:43 -08004815 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004816
4817 strcpy(info->driver, DRV_MODULE_NAME);
4818 strcpy(info->version, DRV_MODULE_VERSION);
4819 strcpy(info->bus_info, pci_name(bp->pdev));
4820 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4821 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4822 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004823 info->fw_version[1] = info->fw_version[3] = '.';
4824 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004825}
4826
Michael Chan244ac4f2006-03-20 17:48:46 -08004827#define BNX2_REGDUMP_LEN (32 * 1024)
4828
4829static int
4830bnx2_get_regs_len(struct net_device *dev)
4831{
4832 return BNX2_REGDUMP_LEN;
4833}
4834
4835static void
4836bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4837{
4838 u32 *p = _p, i, offset;
4839 u8 *orig_p = _p;
4840 struct bnx2 *bp = netdev_priv(dev);
4841 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4842 0x0800, 0x0880, 0x0c00, 0x0c10,
4843 0x0c30, 0x0d08, 0x1000, 0x101c,
4844 0x1040, 0x1048, 0x1080, 0x10a4,
4845 0x1400, 0x1490, 0x1498, 0x14f0,
4846 0x1500, 0x155c, 0x1580, 0x15dc,
4847 0x1600, 0x1658, 0x1680, 0x16d8,
4848 0x1800, 0x1820, 0x1840, 0x1854,
4849 0x1880, 0x1894, 0x1900, 0x1984,
4850 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4851 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4852 0x2000, 0x2030, 0x23c0, 0x2400,
4853 0x2800, 0x2820, 0x2830, 0x2850,
4854 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4855 0x3c00, 0x3c94, 0x4000, 0x4010,
4856 0x4080, 0x4090, 0x43c0, 0x4458,
4857 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4858 0x4fc0, 0x5010, 0x53c0, 0x5444,
4859 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4860 0x5fc0, 0x6000, 0x6400, 0x6428,
4861 0x6800, 0x6848, 0x684c, 0x6860,
4862 0x6888, 0x6910, 0x8000 };
4863
4864 regs->version = 0;
4865
4866 memset(p, 0, BNX2_REGDUMP_LEN);
4867
4868 if (!netif_running(bp->dev))
4869 return;
4870
4871 i = 0;
4872 offset = reg_boundaries[0];
4873 p += offset;
4874 while (offset < BNX2_REGDUMP_LEN) {
4875 *p++ = REG_RD(bp, offset);
4876 offset += 4;
4877 if (offset == reg_boundaries[i + 1]) {
4878 offset = reg_boundaries[i + 2];
4879 p = (u32 *) (orig_p + offset);
4880 i += 2;
4881 }
4882 }
4883}
4884
Michael Chanb6016b72005-05-26 13:03:09 -07004885static void
4886bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4887{
Michael Chan972ec0d2006-01-23 16:12:43 -08004888 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004889
4890 if (bp->flags & NO_WOL_FLAG) {
4891 wol->supported = 0;
4892 wol->wolopts = 0;
4893 }
4894 else {
4895 wol->supported = WAKE_MAGIC;
4896 if (bp->wol)
4897 wol->wolopts = WAKE_MAGIC;
4898 else
4899 wol->wolopts = 0;
4900 }
4901 memset(&wol->sopass, 0, sizeof(wol->sopass));
4902}
4903
4904static int
4905bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4906{
Michael Chan972ec0d2006-01-23 16:12:43 -08004907 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004908
4909 if (wol->wolopts & ~WAKE_MAGIC)
4910 return -EINVAL;
4911
4912 if (wol->wolopts & WAKE_MAGIC) {
4913 if (bp->flags & NO_WOL_FLAG)
4914 return -EINVAL;
4915
4916 bp->wol = 1;
4917 }
4918 else {
4919 bp->wol = 0;
4920 }
4921 return 0;
4922}
4923
4924static int
4925bnx2_nway_reset(struct net_device *dev)
4926{
Michael Chan972ec0d2006-01-23 16:12:43 -08004927 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004928 u32 bmcr;
4929
4930 if (!(bp->autoneg & AUTONEG_SPEED)) {
4931 return -EINVAL;
4932 }
4933
Michael Chanc770a652005-08-25 15:38:39 -07004934 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004935
4936 /* Force a link down visible on the other side */
4937 if (bp->phy_flags & PHY_SERDES_FLAG) {
4938 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004939 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004940
4941 msleep(20);
4942
Michael Chanc770a652005-08-25 15:38:39 -07004943 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004944
4945 bp->current_interval = SERDES_AN_TIMEOUT;
4946 bp->serdes_an_pending = 1;
4947 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004948 }
4949
4950 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4951 bmcr &= ~BMCR_LOOPBACK;
4952 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4953
Michael Chanc770a652005-08-25 15:38:39 -07004954 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004955
4956 return 0;
4957}
4958
4959static int
4960bnx2_get_eeprom_len(struct net_device *dev)
4961{
Michael Chan972ec0d2006-01-23 16:12:43 -08004962 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004963
Michael Chan1122db72006-01-23 16:11:42 -08004964 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004965 return 0;
4966
Michael Chan1122db72006-01-23 16:11:42 -08004967 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004968}
4969
4970static int
4971bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4972 u8 *eebuf)
4973{
Michael Chan972ec0d2006-01-23 16:12:43 -08004974 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004975 int rc;
4976
John W. Linville1064e942005-11-10 12:58:24 -08004977 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004978
4979 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4980
4981 return rc;
4982}
4983
4984static int
4985bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4986 u8 *eebuf)
4987{
Michael Chan972ec0d2006-01-23 16:12:43 -08004988 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004989 int rc;
4990
John W. Linville1064e942005-11-10 12:58:24 -08004991 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004992
4993 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4994
4995 return rc;
4996}
4997
4998static int
4999bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5000{
Michael Chan972ec0d2006-01-23 16:12:43 -08005001 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005002
5003 memset(coal, 0, sizeof(struct ethtool_coalesce));
5004
5005 coal->rx_coalesce_usecs = bp->rx_ticks;
5006 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5007 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5008 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5009
5010 coal->tx_coalesce_usecs = bp->tx_ticks;
5011 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5012 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5013 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5014
5015 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5016
5017 return 0;
5018}
5019
5020static int
5021bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5022{
Michael Chan972ec0d2006-01-23 16:12:43 -08005023 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005024
5025 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5026 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5027
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005028 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005029 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5030
5031 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5032 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5033
5034 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5035 if (bp->rx_quick_cons_trip_int > 0xff)
5036 bp->rx_quick_cons_trip_int = 0xff;
5037
5038 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5039 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5040
5041 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5042 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5043
5044 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5045 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5046
5047 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5048 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5049 0xff;
5050
5051 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5052 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5053 bp->stats_ticks &= 0xffff00;
5054
5055 if (netif_running(bp->dev)) {
5056 bnx2_netif_stop(bp);
5057 bnx2_init_nic(bp);
5058 bnx2_netif_start(bp);
5059 }
5060
5061 return 0;
5062}
5063
5064static void
5065bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5066{
Michael Chan972ec0d2006-01-23 16:12:43 -08005067 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005068
Michael Chan13daffa2006-03-20 17:49:20 -08005069 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005070 ering->rx_mini_max_pending = 0;
5071 ering->rx_jumbo_max_pending = 0;
5072
5073 ering->rx_pending = bp->rx_ring_size;
5074 ering->rx_mini_pending = 0;
5075 ering->rx_jumbo_pending = 0;
5076
5077 ering->tx_max_pending = MAX_TX_DESC_CNT;
5078 ering->tx_pending = bp->tx_ring_size;
5079}
5080
5081static int
5082bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5083{
Michael Chan972ec0d2006-01-23 16:12:43 -08005084 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005085
Michael Chan13daffa2006-03-20 17:49:20 -08005086 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005087 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5088 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5089
5090 return -EINVAL;
5091 }
Michael Chan13daffa2006-03-20 17:49:20 -08005092 if (netif_running(bp->dev)) {
5093 bnx2_netif_stop(bp);
5094 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5095 bnx2_free_skbs(bp);
5096 bnx2_free_mem(bp);
5097 }
5098
5099 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005100 bp->tx_ring_size = ering->tx_pending;
5101
5102 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005103 int rc;
5104
5105 rc = bnx2_alloc_mem(bp);
5106 if (rc)
5107 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005108 bnx2_init_nic(bp);
5109 bnx2_netif_start(bp);
5110 }
5111
5112 return 0;
5113}
5114
5115static void
5116bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5117{
Michael Chan972ec0d2006-01-23 16:12:43 -08005118 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005119
5120 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5121 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5122 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5123}
5124
5125static int
5126bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5127{
Michael Chan972ec0d2006-01-23 16:12:43 -08005128 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005129
5130 bp->req_flow_ctrl = 0;
5131 if (epause->rx_pause)
5132 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5133 if (epause->tx_pause)
5134 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5135
5136 if (epause->autoneg) {
5137 bp->autoneg |= AUTONEG_FLOW_CTRL;
5138 }
5139 else {
5140 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5141 }
5142
Michael Chanc770a652005-08-25 15:38:39 -07005143 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005144
5145 bnx2_setup_phy(bp);
5146
Michael Chanc770a652005-08-25 15:38:39 -07005147 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005148
5149 return 0;
5150}
5151
5152static u32
5153bnx2_get_rx_csum(struct net_device *dev)
5154{
Michael Chan972ec0d2006-01-23 16:12:43 -08005155 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005156
5157 return bp->rx_csum;
5158}
5159
5160static int
5161bnx2_set_rx_csum(struct net_device *dev, u32 data)
5162{
Michael Chan972ec0d2006-01-23 16:12:43 -08005163 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005164
5165 bp->rx_csum = data;
5166 return 0;
5167}
5168
Michael Chanb11d6212006-06-29 12:31:21 -07005169static int
5170bnx2_set_tso(struct net_device *dev, u32 data)
5171{
5172 if (data)
5173 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5174 else
5175 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5176 return 0;
5177}
5178
Michael Chancea94db2006-06-12 22:16:13 -07005179#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005180
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005181static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005182 char string[ETH_GSTRING_LEN];
5183} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5184 { "rx_bytes" },
5185 { "rx_error_bytes" },
5186 { "tx_bytes" },
5187 { "tx_error_bytes" },
5188 { "rx_ucast_packets" },
5189 { "rx_mcast_packets" },
5190 { "rx_bcast_packets" },
5191 { "tx_ucast_packets" },
5192 { "tx_mcast_packets" },
5193 { "tx_bcast_packets" },
5194 { "tx_mac_errors" },
5195 { "tx_carrier_errors" },
5196 { "rx_crc_errors" },
5197 { "rx_align_errors" },
5198 { "tx_single_collisions" },
5199 { "tx_multi_collisions" },
5200 { "tx_deferred" },
5201 { "tx_excess_collisions" },
5202 { "tx_late_collisions" },
5203 { "tx_total_collisions" },
5204 { "rx_fragments" },
5205 { "rx_jabbers" },
5206 { "rx_undersize_packets" },
5207 { "rx_oversize_packets" },
5208 { "rx_64_byte_packets" },
5209 { "rx_65_to_127_byte_packets" },
5210 { "rx_128_to_255_byte_packets" },
5211 { "rx_256_to_511_byte_packets" },
5212 { "rx_512_to_1023_byte_packets" },
5213 { "rx_1024_to_1522_byte_packets" },
5214 { "rx_1523_to_9022_byte_packets" },
5215 { "tx_64_byte_packets" },
5216 { "tx_65_to_127_byte_packets" },
5217 { "tx_128_to_255_byte_packets" },
5218 { "tx_256_to_511_byte_packets" },
5219 { "tx_512_to_1023_byte_packets" },
5220 { "tx_1024_to_1522_byte_packets" },
5221 { "tx_1523_to_9022_byte_packets" },
5222 { "rx_xon_frames" },
5223 { "rx_xoff_frames" },
5224 { "tx_xon_frames" },
5225 { "tx_xoff_frames" },
5226 { "rx_mac_ctrl_frames" },
5227 { "rx_filtered_packets" },
5228 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005229 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005230};
5231
5232#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5233
Arjan van de Venf71e1302006-03-03 21:33:57 -05005234static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005235 STATS_OFFSET32(stat_IfHCInOctets_hi),
5236 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5237 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5238 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5239 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5240 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5241 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5242 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5243 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5244 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5245 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005246 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5247 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5248 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5249 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5250 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5251 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5252 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5253 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5254 STATS_OFFSET32(stat_EtherStatsCollisions),
5255 STATS_OFFSET32(stat_EtherStatsFragments),
5256 STATS_OFFSET32(stat_EtherStatsJabbers),
5257 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5258 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5259 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5260 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5261 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5262 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5263 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5264 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5265 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5266 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5267 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5268 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5269 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5270 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5271 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5272 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5273 STATS_OFFSET32(stat_XonPauseFramesReceived),
5274 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5275 STATS_OFFSET32(stat_OutXonSent),
5276 STATS_OFFSET32(stat_OutXoffSent),
5277 STATS_OFFSET32(stat_MacControlFramesReceived),
5278 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5279 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005280 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005281};
5282
5283/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5284 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005285 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005286static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005287 8,0,8,8,8,8,8,8,8,8,
5288 4,0,4,4,4,4,4,4,4,4,
5289 4,4,4,4,4,4,4,4,4,4,
5290 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005291 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005292};
5293
Michael Chan5b0c76a2005-11-04 08:45:49 -08005294static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5295 8,0,8,8,8,8,8,8,8,8,
5296 4,4,4,4,4,4,4,4,4,4,
5297 4,4,4,4,4,4,4,4,4,4,
5298 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005299 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005300};
5301
Michael Chanb6016b72005-05-26 13:03:09 -07005302#define BNX2_NUM_TESTS 6
5303
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005304static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005305 char string[ETH_GSTRING_LEN];
5306} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5307 { "register_test (offline)" },
5308 { "memory_test (offline)" },
5309 { "loopback_test (offline)" },
5310 { "nvram_test (online)" },
5311 { "interrupt_test (online)" },
5312 { "link_test (online)" },
5313};
5314
5315static int
5316bnx2_self_test_count(struct net_device *dev)
5317{
5318 return BNX2_NUM_TESTS;
5319}
5320
5321static void
5322bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5323{
Michael Chan972ec0d2006-01-23 16:12:43 -08005324 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005325
5326 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5327 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005328 int i;
5329
Michael Chanb6016b72005-05-26 13:03:09 -07005330 bnx2_netif_stop(bp);
5331 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5332 bnx2_free_skbs(bp);
5333
5334 if (bnx2_test_registers(bp) != 0) {
5335 buf[0] = 1;
5336 etest->flags |= ETH_TEST_FL_FAILED;
5337 }
5338 if (bnx2_test_memory(bp) != 0) {
5339 buf[1] = 1;
5340 etest->flags |= ETH_TEST_FL_FAILED;
5341 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005342 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005343 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005344
5345 if (!netif_running(bp->dev)) {
5346 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5347 }
5348 else {
5349 bnx2_init_nic(bp);
5350 bnx2_netif_start(bp);
5351 }
5352
5353 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005354 for (i = 0; i < 7; i++) {
5355 if (bp->link_up)
5356 break;
5357 msleep_interruptible(1000);
5358 }
Michael Chanb6016b72005-05-26 13:03:09 -07005359 }
5360
5361 if (bnx2_test_nvram(bp) != 0) {
5362 buf[3] = 1;
5363 etest->flags |= ETH_TEST_FL_FAILED;
5364 }
5365 if (bnx2_test_intr(bp) != 0) {
5366 buf[4] = 1;
5367 etest->flags |= ETH_TEST_FL_FAILED;
5368 }
5369
5370 if (bnx2_test_link(bp) != 0) {
5371 buf[5] = 1;
5372 etest->flags |= ETH_TEST_FL_FAILED;
5373
5374 }
5375}
5376
5377static void
5378bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5379{
5380 switch (stringset) {
5381 case ETH_SS_STATS:
5382 memcpy(buf, bnx2_stats_str_arr,
5383 sizeof(bnx2_stats_str_arr));
5384 break;
5385 case ETH_SS_TEST:
5386 memcpy(buf, bnx2_tests_str_arr,
5387 sizeof(bnx2_tests_str_arr));
5388 break;
5389 }
5390}
5391
5392static int
5393bnx2_get_stats_count(struct net_device *dev)
5394{
5395 return BNX2_NUM_STATS;
5396}
5397
5398static void
5399bnx2_get_ethtool_stats(struct net_device *dev,
5400 struct ethtool_stats *stats, u64 *buf)
5401{
Michael Chan972ec0d2006-01-23 16:12:43 -08005402 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005403 int i;
5404 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005405 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005406
5407 if (hw_stats == NULL) {
5408 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5409 return;
5410 }
5411
Michael Chan5b0c76a2005-11-04 08:45:49 -08005412 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5413 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5414 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5415 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005416 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005417 else
5418 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005419
5420 for (i = 0; i < BNX2_NUM_STATS; i++) {
5421 if (stats_len_arr[i] == 0) {
5422 /* skip this counter */
5423 buf[i] = 0;
5424 continue;
5425 }
5426 if (stats_len_arr[i] == 4) {
5427 /* 4-byte counter */
5428 buf[i] = (u64)
5429 *(hw_stats + bnx2_stats_offset_arr[i]);
5430 continue;
5431 }
5432 /* 8-byte counter */
5433 buf[i] = (((u64) *(hw_stats +
5434 bnx2_stats_offset_arr[i])) << 32) +
5435 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5436 }
5437}
5438
5439static int
5440bnx2_phys_id(struct net_device *dev, u32 data)
5441{
Michael Chan972ec0d2006-01-23 16:12:43 -08005442 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005443 int i;
5444 u32 save;
5445
5446 if (data == 0)
5447 data = 2;
5448
5449 save = REG_RD(bp, BNX2_MISC_CFG);
5450 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5451
5452 for (i = 0; i < (data * 2); i++) {
5453 if ((i % 2) == 0) {
5454 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5455 }
5456 else {
5457 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5458 BNX2_EMAC_LED_1000MB_OVERRIDE |
5459 BNX2_EMAC_LED_100MB_OVERRIDE |
5460 BNX2_EMAC_LED_10MB_OVERRIDE |
5461 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5462 BNX2_EMAC_LED_TRAFFIC);
5463 }
5464 msleep_interruptible(500);
5465 if (signal_pending(current))
5466 break;
5467 }
5468 REG_WR(bp, BNX2_EMAC_LED, 0);
5469 REG_WR(bp, BNX2_MISC_CFG, save);
5470 return 0;
5471}
5472
Jeff Garzik7282d492006-09-13 14:30:00 -04005473static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005474 .get_settings = bnx2_get_settings,
5475 .set_settings = bnx2_set_settings,
5476 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005477 .get_regs_len = bnx2_get_regs_len,
5478 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005479 .get_wol = bnx2_get_wol,
5480 .set_wol = bnx2_set_wol,
5481 .nway_reset = bnx2_nway_reset,
5482 .get_link = ethtool_op_get_link,
5483 .get_eeprom_len = bnx2_get_eeprom_len,
5484 .get_eeprom = bnx2_get_eeprom,
5485 .set_eeprom = bnx2_set_eeprom,
5486 .get_coalesce = bnx2_get_coalesce,
5487 .set_coalesce = bnx2_set_coalesce,
5488 .get_ringparam = bnx2_get_ringparam,
5489 .set_ringparam = bnx2_set_ringparam,
5490 .get_pauseparam = bnx2_get_pauseparam,
5491 .set_pauseparam = bnx2_set_pauseparam,
5492 .get_rx_csum = bnx2_get_rx_csum,
5493 .set_rx_csum = bnx2_set_rx_csum,
5494 .get_tx_csum = ethtool_op_get_tx_csum,
5495 .set_tx_csum = ethtool_op_set_tx_csum,
5496 .get_sg = ethtool_op_get_sg,
5497 .set_sg = ethtool_op_set_sg,
5498#ifdef BCM_TSO
5499 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005500 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005501#endif
5502 .self_test_count = bnx2_self_test_count,
5503 .self_test = bnx2_self_test,
5504 .get_strings = bnx2_get_strings,
5505 .phys_id = bnx2_phys_id,
5506 .get_stats_count = bnx2_get_stats_count,
5507 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005508 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005509};
5510
5511/* Called with rtnl_lock */
5512static int
5513bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5514{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005515 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005516 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005517 int err;
5518
5519 switch(cmd) {
5520 case SIOCGMIIPHY:
5521 data->phy_id = bp->phy_addr;
5522
5523 /* fallthru */
5524 case SIOCGMIIREG: {
5525 u32 mii_regval;
5526
Michael Chanc770a652005-08-25 15:38:39 -07005527 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005528 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005529 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005530
5531 data->val_out = mii_regval;
5532
5533 return err;
5534 }
5535
5536 case SIOCSMIIREG:
5537 if (!capable(CAP_NET_ADMIN))
5538 return -EPERM;
5539
Michael Chanc770a652005-08-25 15:38:39 -07005540 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005541 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005542 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005543
5544 return err;
5545
5546 default:
5547 /* do nothing */
5548 break;
5549 }
5550 return -EOPNOTSUPP;
5551}
5552
5553/* Called with rtnl_lock */
5554static int
5555bnx2_change_mac_addr(struct net_device *dev, void *p)
5556{
5557 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005558 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005559
Michael Chan73eef4c2005-08-25 15:39:15 -07005560 if (!is_valid_ether_addr(addr->sa_data))
5561 return -EINVAL;
5562
Michael Chanb6016b72005-05-26 13:03:09 -07005563 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5564 if (netif_running(dev))
5565 bnx2_set_mac_addr(bp);
5566
5567 return 0;
5568}
5569
5570/* Called with rtnl_lock */
5571static int
5572bnx2_change_mtu(struct net_device *dev, int new_mtu)
5573{
Michael Chan972ec0d2006-01-23 16:12:43 -08005574 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005575
5576 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5577 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5578 return -EINVAL;
5579
5580 dev->mtu = new_mtu;
5581 if (netif_running(dev)) {
5582 bnx2_netif_stop(bp);
5583
5584 bnx2_init_nic(bp);
5585
5586 bnx2_netif_start(bp);
5587 }
5588 return 0;
5589}
5590
5591#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5592static void
5593poll_bnx2(struct net_device *dev)
5594{
Michael Chan972ec0d2006-01-23 16:12:43 -08005595 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005596
5597 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005598 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005599 enable_irq(bp->pdev->irq);
5600}
5601#endif
5602
5603static int __devinit
5604bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5605{
5606 struct bnx2 *bp;
5607 unsigned long mem_len;
5608 int rc;
5609 u32 reg;
5610
5611 SET_MODULE_OWNER(dev);
5612 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005613 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005614
5615 bp->flags = 0;
5616 bp->phy_flags = 0;
5617
5618 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5619 rc = pci_enable_device(pdev);
5620 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005621 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005622 goto err_out;
5623 }
5624
5625 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005626 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005627 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005628 rc = -ENODEV;
5629 goto err_out_disable;
5630 }
5631
5632 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5633 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005634 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005635 goto err_out_disable;
5636 }
5637
5638 pci_set_master(pdev);
5639
5640 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5641 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005642 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005643 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005644 rc = -EIO;
5645 goto err_out_release;
5646 }
5647
Michael Chanb6016b72005-05-26 13:03:09 -07005648 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5649 bp->flags |= USING_DAC_FLAG;
5650 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005651 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005652 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005653 rc = -EIO;
5654 goto err_out_release;
5655 }
5656 }
5657 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005658 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005659 rc = -EIO;
5660 goto err_out_release;
5661 }
5662
5663 bp->dev = dev;
5664 bp->pdev = pdev;
5665
5666 spin_lock_init(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005667 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5668
5669 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005670 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005671 dev->mem_end = dev->mem_start + mem_len;
5672 dev->irq = pdev->irq;
5673
5674 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5675
5676 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005677 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005678 rc = -ENOMEM;
5679 goto err_out_release;
5680 }
5681
5682 /* Configure byte swap and enable write to the reg_window registers.
5683 * Rely on CPU to do target byte swapping on big endian systems
5684 * The chip's target access swapping will not swap all accesses
5685 */
5686 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5687 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5688 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5689
Pavel Machek829ca9a2005-09-03 15:56:56 -07005690 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005691
5692 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5693
Michael Chan59b47d82006-11-19 14:10:45 -08005694 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5695 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5696 if (bp->pcix_cap == 0) {
5697 dev_err(&pdev->dev,
5698 "Cannot find PCIX capability, aborting.\n");
5699 rc = -EIO;
5700 goto err_out_unmap;
5701 }
5702 }
5703
Michael Chanb6016b72005-05-26 13:03:09 -07005704 /* Get bus information. */
5705 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5706 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5707 u32 clkreg;
5708
5709 bp->flags |= PCIX_FLAG;
5710
5711 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005712
Michael Chanb6016b72005-05-26 13:03:09 -07005713 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5714 switch (clkreg) {
5715 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5716 bp->bus_speed_mhz = 133;
5717 break;
5718
5719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5720 bp->bus_speed_mhz = 100;
5721 break;
5722
5723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5725 bp->bus_speed_mhz = 66;
5726 break;
5727
5728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5730 bp->bus_speed_mhz = 50;
5731 break;
5732
5733 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5734 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5735 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5736 bp->bus_speed_mhz = 33;
5737 break;
5738 }
5739 }
5740 else {
5741 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5742 bp->bus_speed_mhz = 66;
5743 else
5744 bp->bus_speed_mhz = 33;
5745 }
5746
5747 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5748 bp->flags |= PCI_32BIT_FLAG;
5749
5750 /* 5706A0 may falsely detect SERR and PERR. */
5751 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5752 reg = REG_RD(bp, PCI_COMMAND);
5753 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5754 REG_WR(bp, PCI_COMMAND, reg);
5755 }
5756 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5757 !(bp->flags & PCIX_FLAG)) {
5758
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005759 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005760 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005761 goto err_out_unmap;
5762 }
5763
5764 bnx2_init_nvram(bp);
5765
Michael Chane3648b32005-11-04 08:51:21 -08005766 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5767
5768 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5769 BNX2_SHM_HDR_SIGNATURE_SIG)
5770 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5771 else
5772 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5773
Michael Chanb6016b72005-05-26 13:03:09 -07005774 /* Get the permanent MAC address. First we need to make sure the
5775 * firmware is actually running.
5776 */
Michael Chane3648b32005-11-04 08:51:21 -08005777 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005778
5779 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5780 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005781 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005782 rc = -ENODEV;
5783 goto err_out_unmap;
5784 }
5785
Michael Chane3648b32005-11-04 08:51:21 -08005786 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005787
Michael Chane3648b32005-11-04 08:51:21 -08005788 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005789 bp->mac_addr[0] = (u8) (reg >> 8);
5790 bp->mac_addr[1] = (u8) reg;
5791
Michael Chane3648b32005-11-04 08:51:21 -08005792 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005793 bp->mac_addr[2] = (u8) (reg >> 24);
5794 bp->mac_addr[3] = (u8) (reg >> 16);
5795 bp->mac_addr[4] = (u8) (reg >> 8);
5796 bp->mac_addr[5] = (u8) reg;
5797
5798 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005799 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005800
5801 bp->rx_csum = 1;
5802
5803 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5804
5805 bp->tx_quick_cons_trip_int = 20;
5806 bp->tx_quick_cons_trip = 20;
5807 bp->tx_ticks_int = 80;
5808 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005809
Michael Chanb6016b72005-05-26 13:03:09 -07005810 bp->rx_quick_cons_trip_int = 6;
5811 bp->rx_quick_cons_trip = 6;
5812 bp->rx_ticks_int = 18;
5813 bp->rx_ticks = 18;
5814
5815 bp->stats_ticks = 1000000 & 0xffff00;
5816
5817 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005818 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005819
Michael Chan5b0c76a2005-11-04 08:45:49 -08005820 bp->phy_addr = 1;
5821
Michael Chanb6016b72005-05-26 13:03:09 -07005822 /* Disable WOL support if we are running on a SERDES chip. */
5823 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5824 bp->phy_flags |= PHY_SERDES_FLAG;
5825 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005826 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5827 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005828 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005829 BNX2_SHARED_HW_CFG_CONFIG);
5830 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5831 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5832 }
Michael Chanb6016b72005-05-26 13:03:09 -07005833 }
5834
Michael Chan16088272006-06-12 22:16:43 -07005835 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5836 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5837 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005838 bp->flags |= NO_WOL_FLAG;
5839
Michael Chanb6016b72005-05-26 13:03:09 -07005840 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5841 bp->tx_quick_cons_trip_int =
5842 bp->tx_quick_cons_trip;
5843 bp->tx_ticks_int = bp->tx_ticks;
5844 bp->rx_quick_cons_trip_int =
5845 bp->rx_quick_cons_trip;
5846 bp->rx_ticks_int = bp->rx_ticks;
5847 bp->comp_prod_trip_int = bp->comp_prod_trip;
5848 bp->com_ticks_int = bp->com_ticks;
5849 bp->cmd_ticks_int = bp->cmd_ticks;
5850 }
5851
Michael Chanf9317a42006-09-29 17:06:23 -07005852 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5853 *
5854 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5855 * with byte enables disabled on the unused 32-bit word. This is legal
5856 * but causes problems on the AMD 8132 which will eventually stop
5857 * responding after a while.
5858 *
5859 * AMD believes this incompatibility is unique to the 5706, and
5860 * prefers to locally disable MSI rather than globally disabling it
5861 * using pci_msi_quirk.
5862 */
5863 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5864 struct pci_dev *amd_8132 = NULL;
5865
5866 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5867 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5868 amd_8132))) {
5869 u8 rev;
5870
5871 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5872 if (rev >= 0x10 && rev <= 0x13) {
5873 disable_msi = 1;
5874 pci_dev_put(amd_8132);
5875 break;
5876 }
5877 }
5878 }
5879
Michael Chanb6016b72005-05-26 13:03:09 -07005880 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5881 bp->req_line_speed = 0;
5882 if (bp->phy_flags & PHY_SERDES_FLAG) {
5883 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005884
Michael Chane3648b32005-11-04 08:51:21 -08005885 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005886 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5887 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5888 bp->autoneg = 0;
5889 bp->req_line_speed = bp->line_speed = SPEED_1000;
5890 bp->req_duplex = DUPLEX_FULL;
5891 }
Michael Chanb6016b72005-05-26 13:03:09 -07005892 }
5893 else {
5894 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5895 }
5896
5897 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5898
Michael Chancd339a02005-08-25 15:35:24 -07005899 init_timer(&bp->timer);
5900 bp->timer.expires = RUN_AT(bp->timer_interval);
5901 bp->timer.data = (unsigned long) bp;
5902 bp->timer.function = bnx2_timer;
5903
Michael Chanb6016b72005-05-26 13:03:09 -07005904 return 0;
5905
5906err_out_unmap:
5907 if (bp->regview) {
5908 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005909 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005910 }
5911
5912err_out_release:
5913 pci_release_regions(pdev);
5914
5915err_out_disable:
5916 pci_disable_device(pdev);
5917 pci_set_drvdata(pdev, NULL);
5918
5919err_out:
5920 return rc;
5921}
5922
5923static int __devinit
5924bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5925{
5926 static int version_printed = 0;
5927 struct net_device *dev = NULL;
5928 struct bnx2 *bp;
5929 int rc, i;
5930
5931 if (version_printed++ == 0)
5932 printk(KERN_INFO "%s", version);
5933
5934 /* dev zeroed in init_etherdev */
5935 dev = alloc_etherdev(sizeof(*bp));
5936
5937 if (!dev)
5938 return -ENOMEM;
5939
5940 rc = bnx2_init_board(pdev, dev);
5941 if (rc < 0) {
5942 free_netdev(dev);
5943 return rc;
5944 }
5945
5946 dev->open = bnx2_open;
5947 dev->hard_start_xmit = bnx2_start_xmit;
5948 dev->stop = bnx2_close;
5949 dev->get_stats = bnx2_get_stats;
5950 dev->set_multicast_list = bnx2_set_rx_mode;
5951 dev->do_ioctl = bnx2_ioctl;
5952 dev->set_mac_address = bnx2_change_mac_addr;
5953 dev->change_mtu = bnx2_change_mtu;
5954 dev->tx_timeout = bnx2_tx_timeout;
5955 dev->watchdog_timeo = TX_TIMEOUT;
5956#ifdef BCM_VLAN
5957 dev->vlan_rx_register = bnx2_vlan_rx_register;
5958 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5959#endif
5960 dev->poll = bnx2_poll;
5961 dev->ethtool_ops = &bnx2_ethtool_ops;
5962 dev->weight = 64;
5963
Michael Chan972ec0d2006-01-23 16:12:43 -08005964 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005965
5966#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5967 dev->poll_controller = poll_bnx2;
5968#endif
5969
5970 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005971 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005972 if (bp->regview)
5973 iounmap(bp->regview);
5974 pci_release_regions(pdev);
5975 pci_disable_device(pdev);
5976 pci_set_drvdata(pdev, NULL);
5977 free_netdev(dev);
5978 return rc;
5979 }
5980
5981 pci_set_drvdata(pdev, dev);
5982
5983 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07005984 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07005985 bp->name = board_info[ent->driver_data].name,
5986 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5987 "IRQ %d, ",
5988 dev->name,
5989 bp->name,
5990 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5991 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5992 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5993 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5994 bp->bus_speed_mhz,
5995 dev->base_addr,
5996 bp->pdev->irq);
5997
5998 printk("node addr ");
5999 for (i = 0; i < 6; i++)
6000 printk("%2.2x", dev->dev_addr[i]);
6001 printk("\n");
6002
6003 dev->features |= NETIF_F_SG;
6004 if (bp->flags & USING_DAC_FLAG)
6005 dev->features |= NETIF_F_HIGHDMA;
6006 dev->features |= NETIF_F_IP_CSUM;
6007#ifdef BCM_VLAN
6008 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6009#endif
6010#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07006011 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006012#endif
6013
6014 netif_carrier_off(bp->dev);
6015
6016 return 0;
6017}
6018
6019static void __devexit
6020bnx2_remove_one(struct pci_dev *pdev)
6021{
6022 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006023 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006024
Michael Chanafdc08b2005-08-25 15:34:29 -07006025 flush_scheduled_work();
6026
Michael Chanb6016b72005-05-26 13:03:09 -07006027 unregister_netdev(dev);
6028
6029 if (bp->regview)
6030 iounmap(bp->regview);
6031
6032 free_netdev(dev);
6033 pci_release_regions(pdev);
6034 pci_disable_device(pdev);
6035 pci_set_drvdata(pdev, NULL);
6036}
6037
6038static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006039bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006040{
6041 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006042 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006043 u32 reset_code;
6044
6045 if (!netif_running(dev))
6046 return 0;
6047
Michael Chan1d60290f2006-03-20 17:50:08 -08006048 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006049 bnx2_netif_stop(bp);
6050 netif_device_detach(dev);
6051 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006052 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006053 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006054 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006055 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6056 else
6057 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6058 bnx2_reset_chip(bp, reset_code);
6059 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006060 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006061 return 0;
6062}
6063
6064static int
6065bnx2_resume(struct pci_dev *pdev)
6066{
6067 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006068 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006069
6070 if (!netif_running(dev))
6071 return 0;
6072
Pavel Machek829ca9a2005-09-03 15:56:56 -07006073 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006074 netif_device_attach(dev);
6075 bnx2_init_nic(bp);
6076 bnx2_netif_start(bp);
6077 return 0;
6078}
6079
6080static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006081 .name = DRV_MODULE_NAME,
6082 .id_table = bnx2_pci_tbl,
6083 .probe = bnx2_init_one,
6084 .remove = __devexit_p(bnx2_remove_one),
6085 .suspend = bnx2_suspend,
6086 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006087};
6088
6089static int __init bnx2_init(void)
6090{
Jeff Garzik29917622006-08-19 17:48:59 -04006091 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006092}
6093
6094static void __exit bnx2_cleanup(void)
6095{
6096 pci_unregister_driver(&bnx2_pci_driver);
6097}
6098
6099module_init(bnx2_init);
6100module_exit(bnx2_cleanup);
6101
6102
6103