blob: ea5daf6efa097986090da2f89d9ff12181810b1a [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
56
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chanf9317a42006-09-29 17:06:23 -070059#define DRV_MODULE_VERSION "1.4.45"
60#define DRV_MODULE_RELDATE "September 29, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -0700118 { 0, }
119};
120
121static struct flash_spec flash_table[] =
122{
123 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700206};
207
208MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
Michael Chane89bbf12005-08-25 15:36:58 -0700210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{
Michael Chan2f8af122006-08-15 01:39:10 -0700212 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700213
Michael Chan2f8af122006-08-15 01:39:10 -0700214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
Michael Chane89bbf12005-08-25 15:36:58 -0700216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219}
220
Michael Chanb6016b72005-05-26 13:03:09 -0700221static u32
222bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223{
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226}
227
228static void
229bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230{
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233}
234
235static void
236bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237{
238 offset += cid_addr;
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
241}
242
243static int
244bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245{
246 u32 val1;
247 int i, ret;
248
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256 udelay(40);
257 }
258
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264 for (i = 0; i < 50; i++) {
265 udelay(10);
266
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269 udelay(5);
270
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274 break;
275 }
276 }
277
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279 *val = 0x0;
280 ret = -EBUSY;
281 }
282 else {
283 *val = val1;
284 ret = 0;
285 }
286
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294 udelay(40);
295 }
296
297 return ret;
298}
299
300static int
301bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302{
303 u32 val1;
304 int i, ret;
305
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313 udelay(40);
314 }
315
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400320
Michael Chanb6016b72005-05-26 13:03:09 -0700321 for (i = 0; i < 50; i++) {
322 udelay(10);
323
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326 udelay(5);
327 break;
328 }
329 }
330
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332 ret = -EBUSY;
333 else
334 ret = 0;
335
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343 udelay(40);
344 }
345
346 return ret;
347}
348
349static void
350bnx2_disable_int(struct bnx2 *bp)
351{
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355}
356
357static void
358bnx2_enable_int(struct bnx2 *bp)
359{
Michael Chanb6016b72005-05-26 13:03:09 -0700360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
Michael Chanbf5295b2006-03-23 01:11:56 -0800367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700368}
369
370static void
371bnx2_disable_int_sync(struct bnx2 *bp)
372{
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
376}
377
378static void
379bnx2_netif_stop(struct bnx2 *bp)
380{
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386 }
387}
388
389static void
390bnx2_netif_start(struct bnx2 *bp)
391{
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
396 bnx2_enable_int(bp);
397 }
398 }
399}
400
401static void
402bnx2_free_mem(struct bnx2 *bp)
403{
Michael Chan13daffa2006-03-20 17:49:20 -0800404 int i;
405
Michael Chanb6016b72005-05-26 13:03:09 -0700406 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800407 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800410 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700411 }
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
417 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_ring[i],
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700427 }
Michael Chan13daffa2006-03-20 17:49:20 -0800428 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400429 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700430}
431
432static int
433bnx2_alloc_mem(struct bnx2 *bp)
434{
Michael Chan0f31f992006-03-23 01:12:38 -0800435 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800436
Michael Chan0f31f992006-03-23 01:12:38 -0800437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700439 if (bp->tx_buf_ring == NULL)
440 return -ENOMEM;
441
Michael Chanb6016b72005-05-26 13:03:09 -0700442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
444 TX_DESC_CNT,
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
447 goto alloc_mem_err;
448
Michael Chan13daffa2006-03-20 17:49:20 -0800449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->rx_buf_ring == NULL)
452 goto alloc_mem_err;
453
Michael Chan13daffa2006-03-20 17:49:20 -0800454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455 bp->rx_max_ring);
456
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
463 goto alloc_mem_err;
464
465 }
Michael Chanb6016b72005-05-26 13:03:09 -0700466
Michael Chan0f31f992006-03-23 01:12:38 -0800467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
471
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
475 goto alloc_mem_err;
476
Michael Chan0f31f992006-03-23 01:12:38 -0800477 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700478
Michael Chan0f31f992006-03-23 01:12:38 -0800479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700483
484 return 0;
485
486alloc_mem_err:
487 bnx2_free_mem(bp);
488 return -ENOMEM;
489}
490
491static void
Michael Chane3648b32005-11-04 08:51:21 -0800492bnx2_report_fw_link(struct bnx2 *bp)
493{
494 u32 fw_link_status = 0;
495
496 if (bp->link_up) {
497 u32 bmsr;
498
499 switch (bp->line_speed) {
500 case SPEED_10:
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 else
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
505 break;
506 case SPEED_100:
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 else
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
511 break;
512 case SPEED_1000:
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 else
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517 break;
518 case SPEED_2500:
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 else
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 break;
524 }
525
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528 if (bp->autoneg) {
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 else
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 }
540 }
541 else
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545}
546
547static void
Michael Chanb6016b72005-05-26 13:03:09 -0700548bnx2_report_link(struct bnx2 *bp)
549{
550 if (bp->link_up) {
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554 printk("%d Mbps ", bp->line_speed);
555
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
558 else
559 printk("half duplex");
560
561 if (bp->flow_ctrl) {
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
566 }
567 else {
568 printk(", transmit ");
569 }
570 printk("flow control ON");
571 }
572 printk("\n");
573 }
574 else {
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577 }
Michael Chane3648b32005-11-04 08:51:21 -0800578
579 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700580}
581
582static void
583bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584{
585 u32 local_adv, remote_adv;
586
587 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
593 }
594 return;
595 }
596
597 if (bp->duplex != DUPLEX_FULL) {
598 return;
599 }
600
Michael Chan5b0c76a2005-11-04 08:45:49 -0800601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603 u32 val;
604
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
610 return;
611 }
612
Michael Chanb6016b72005-05-26 13:03:09 -0700613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
619
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
631 }
632
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 }
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
641 }
642 }
643 else {
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 }
647 }
648 }
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653 bp->flow_ctrl = FLOW_CTRL_TX;
654 }
655 }
656}
657
658static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800659bnx2_5708s_linkup(struct bnx2 *bp)
660{
661 u32 val;
662
663 bp->link_up = 1;
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
674 break;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
677 break;
678 }
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
681 else
682 bp->duplex = DUPLEX_HALF;
683
684 return 0;
685}
686
687static int
688bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700689{
690 u32 bmcr, local_adv, remote_adv, common;
691
692 bp->link_up = 1;
693 bp->line_speed = SPEED_1000;
694
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
698 }
699 else {
700 bp->duplex = DUPLEX_HALF;
701 }
702
703 if (!(bmcr & BMCR_ANENABLE)) {
704 return 0;
705 }
706
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
715 }
716 else {
717 bp->duplex = DUPLEX_HALF;
718 }
719 }
720
721 return 0;
722}
723
724static int
725bnx2_copper_linkup(struct bnx2 *bp)
726{
727 u32 bmcr;
728
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
732
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
740 }
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
744 }
745 else {
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
753 }
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
757 }
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
761 }
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
765 }
766 else {
767 bp->line_speed = 0;
768 bp->link_up = 0;
769 }
770 }
771 }
772 else {
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
775 }
776 else {
777 bp->line_speed = SPEED_10;
778 }
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
781 }
782 else {
783 bp->duplex = DUPLEX_HALF;
784 }
785 }
786
787 return 0;
788}
789
790static int
791bnx2_set_mac_link(struct bnx2 *bp)
792{
793 u32 val;
794
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799 }
800
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
803
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 BNX2_EMAC_MODE_25G);
Michael Chanb6016b72005-05-26 13:03:09 -0700807
808 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800809 switch (bp->line_speed) {
810 case SPEED_10:
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
813 break;
814 }
815 /* fall through */
816 case SPEED_100:
817 val |= BNX2_EMAC_MODE_PORT_MII;
818 break;
819 case SPEED_2500:
820 val |= BNX2_EMAC_MODE_25G;
821 /* fall through */
822 case SPEED_1000:
823 val |= BNX2_EMAC_MODE_PORT_GMII;
824 break;
825 }
Michael Chanb6016b72005-05-26 13:03:09 -0700826 }
827 else {
828 val |= BNX2_EMAC_MODE_PORT_GMII;
829 }
830
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
835
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854 return 0;
855}
856
857static int
858bnx2_set_link(struct bnx2 *bp)
859{
860 u32 bmsr;
861 u8 link_up;
862
Michael Chan80be4432006-11-19 14:07:28 -0800863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700864 bp->link_up = 1;
865 return 0;
866 }
867
868 link_up = bp->link_up;
869
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875 u32 val;
876
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
880 else
881 bmsr &= ~BMSR_LSTATUS;
882 }
883
884 if (bmsr & BMSR_LSTATUS) {
885 bp->link_up = 1;
886
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700892 }
893 else {
894 bnx2_copper_linkup(bp);
895 }
896 bnx2_resolve_flow_ctrl(bp);
897 }
898 else {
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
901
902 u32 bmcr;
903
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
908 BMCR_ANENABLE);
909 }
910 }
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
912 bp->link_up = 0;
913 }
914
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
917 }
918
919 bnx2_set_mac_link(bp);
920
921 return 0;
922}
923
924static int
925bnx2_reset_phy(struct bnx2 *bp)
926{
927 int i;
928 u32 reg;
929
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931
932#define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
934 udelay(10);
935
936 bnx2_read_phy(bp, MII_BMCR, &reg);
937 if (!(reg & BMCR_RESET)) {
938 udelay(20);
939 break;
940 }
941 }
942 if (i == PHY_RESET_MAX_WAIT) {
943 return -EBUSY;
944 }
945 return 0;
946}
947
948static u32
949bnx2_phy_get_pause_adv(struct bnx2 *bp)
950{
951 u32 adv = 0;
952
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
958 }
959 else {
960 adv = ADVERTISE_PAUSE_CAP;
961 }
962 }
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
966 }
967 else {
968 adv = ADVERTISE_PAUSE_ASYM;
969 }
970 }
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
974 }
975 else {
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
977 }
978 }
979 return adv;
980}
981
982static int
983bnx2_setup_serdes_phy(struct bnx2 *bp)
984{
Michael Chan5b0c76a2005-11-04 08:45:49 -0800985 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -0700986 u32 new_adv = 0;
987
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
989 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800990 int force_link_down = 0;
991
Michael Chan80be4432006-11-19 14:07:28 -0800992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
994
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1005 }
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1012 }
1013 }
1014
Michael Chanb6016b72005-05-26 13:03:09 -07001015 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001016 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001017 new_bmcr |= BMCR_FULLDPLX;
1018 }
1019 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001020 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001021 new_bmcr &= ~BMCR_FULLDPLX;
1022 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001023 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001024 /* Force a link down visible on the other side */
1025 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1031
1032 bp->link_up = 0;
1033 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001035 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001036 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1039 }
1040 return 0;
1041 }
1042
Michael Chan5b0c76a2005-11-04 08:45:49 -08001043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047 }
1048
Michael Chanb6016b72005-05-26 13:03:09 -07001049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1051
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1053
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1056
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1060 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001062 spin_unlock_bh(&bp->phy_lock);
1063 msleep(20);
1064 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001065 }
1066
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1069 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001070 /* Speed up link-up time when the link partner
1071 * does not autonegotiate which is very common
1072 * in blade servers. Some blade servers use
1073 * IPMI for kerboard input and it's important
1074 * to minimize link disruptions. Autoneg. involves
1075 * exchanging base pages plus 3 next pages and
1076 * normally completes in about 120 msec.
1077 */
1078 bp->current_interval = SERDES_AN_TIMEOUT;
1079 bp->serdes_an_pending = 1;
1080 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001081 }
1082
1083 return 0;
1084}
1085
1086#define ETHTOOL_ALL_FIBRE_SPEED \
1087 (ADVERTISED_1000baseT_Full)
1088
1089#define ETHTOOL_ALL_COPPER_SPEED \
1090 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1091 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1092 ADVERTISED_1000baseT_Full)
1093
1094#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1095 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001096
Michael Chanb6016b72005-05-26 13:03:09 -07001097#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1098
1099static int
1100bnx2_setup_copper_phy(struct bnx2 *bp)
1101{
1102 u32 bmcr;
1103 u32 new_bmcr;
1104
1105 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1106
1107 if (bp->autoneg & AUTONEG_SPEED) {
1108 u32 adv_reg, adv1000_reg;
1109 u32 new_adv_reg = 0;
1110 u32 new_adv1000_reg = 0;
1111
1112 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1113 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1114 ADVERTISE_PAUSE_ASYM);
1115
1116 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1117 adv1000_reg &= PHY_ALL_1000_SPEED;
1118
1119 if (bp->advertising & ADVERTISED_10baseT_Half)
1120 new_adv_reg |= ADVERTISE_10HALF;
1121 if (bp->advertising & ADVERTISED_10baseT_Full)
1122 new_adv_reg |= ADVERTISE_10FULL;
1123 if (bp->advertising & ADVERTISED_100baseT_Half)
1124 new_adv_reg |= ADVERTISE_100HALF;
1125 if (bp->advertising & ADVERTISED_100baseT_Full)
1126 new_adv_reg |= ADVERTISE_100FULL;
1127 if (bp->advertising & ADVERTISED_1000baseT_Full)
1128 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001129
Michael Chanb6016b72005-05-26 13:03:09 -07001130 new_adv_reg |= ADVERTISE_CSMA;
1131
1132 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1133
1134 if ((adv1000_reg != new_adv1000_reg) ||
1135 (adv_reg != new_adv_reg) ||
1136 ((bmcr & BMCR_ANENABLE) == 0)) {
1137
1138 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1139 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1140 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1141 BMCR_ANENABLE);
1142 }
1143 else if (bp->link_up) {
1144 /* Flow ctrl may have changed from auto to forced */
1145 /* or vice-versa. */
1146
1147 bnx2_resolve_flow_ctrl(bp);
1148 bnx2_set_mac_link(bp);
1149 }
1150 return 0;
1151 }
1152
1153 new_bmcr = 0;
1154 if (bp->req_line_speed == SPEED_100) {
1155 new_bmcr |= BMCR_SPEED100;
1156 }
1157 if (bp->req_duplex == DUPLEX_FULL) {
1158 new_bmcr |= BMCR_FULLDPLX;
1159 }
1160 if (new_bmcr != bmcr) {
1161 u32 bmsr;
1162 int i = 0;
1163
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1165 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001166
Michael Chanb6016b72005-05-26 13:03:09 -07001167 if (bmsr & BMSR_LSTATUS) {
1168 /* Force link down */
1169 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1170 do {
1171 udelay(100);
1172 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1173 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1174 i++;
1175 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1176 }
1177
1178 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1179
1180 /* Normally, the new speed is setup after the link has
1181 * gone down and up again. In some cases, link will not go
1182 * down so we need to set up the new speed here.
1183 */
1184 if (bmsr & BMSR_LSTATUS) {
1185 bp->line_speed = bp->req_line_speed;
1186 bp->duplex = bp->req_duplex;
1187 bnx2_resolve_flow_ctrl(bp);
1188 bnx2_set_mac_link(bp);
1189 }
1190 }
1191 return 0;
1192}
1193
1194static int
1195bnx2_setup_phy(struct bnx2 *bp)
1196{
1197 if (bp->loopback == MAC_LOOPBACK)
1198 return 0;
1199
1200 if (bp->phy_flags & PHY_SERDES_FLAG) {
1201 return (bnx2_setup_serdes_phy(bp));
1202 }
1203 else {
1204 return (bnx2_setup_copper_phy(bp));
1205 }
1206}
1207
1208static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001209bnx2_init_5708s_phy(struct bnx2 *bp)
1210{
1211 u32 val;
1212
1213 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1214 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1215 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1216
1217 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1218 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1219 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1220
1221 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1222 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1223 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1224
1225 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1226 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1227 val |= BCM5708S_UP1_2G5;
1228 bnx2_write_phy(bp, BCM5708S_UP1, val);
1229 }
1230
1231 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001232 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1233 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001234 /* increase tx signal amplitude */
1235 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1236 BCM5708S_BLK_ADDR_TX_MISC);
1237 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1238 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1239 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1240 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1241 }
1242
Michael Chane3648b32005-11-04 08:51:21 -08001243 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001244 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1245
1246 if (val) {
1247 u32 is_backplane;
1248
Michael Chane3648b32005-11-04 08:51:21 -08001249 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001250 BNX2_SHARED_HW_CFG_CONFIG);
1251 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1252 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1253 BCM5708S_BLK_ADDR_TX_MISC);
1254 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1255 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1256 BCM5708S_BLK_ADDR_DIG);
1257 }
1258 }
1259 return 0;
1260}
1261
1262static int
1263bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001264{
1265 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1266
1267 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1268 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1269 }
1270
1271 if (bp->dev->mtu > 1500) {
1272 u32 val;
1273
1274 /* Set extended packet length bit */
1275 bnx2_write_phy(bp, 0x18, 0x7);
1276 bnx2_read_phy(bp, 0x18, &val);
1277 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1278
1279 bnx2_write_phy(bp, 0x1c, 0x6c00);
1280 bnx2_read_phy(bp, 0x1c, &val);
1281 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1282 }
1283 else {
1284 u32 val;
1285
1286 bnx2_write_phy(bp, 0x18, 0x7);
1287 bnx2_read_phy(bp, 0x18, &val);
1288 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1289
1290 bnx2_write_phy(bp, 0x1c, 0x6c00);
1291 bnx2_read_phy(bp, 0x1c, &val);
1292 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1293 }
1294
1295 return 0;
1296}
1297
1298static int
1299bnx2_init_copper_phy(struct bnx2 *bp)
1300{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001301 u32 val;
1302
Michael Chanb6016b72005-05-26 13:03:09 -07001303 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1304
1305 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1306 bnx2_write_phy(bp, 0x18, 0x0c00);
1307 bnx2_write_phy(bp, 0x17, 0x000a);
1308 bnx2_write_phy(bp, 0x15, 0x310b);
1309 bnx2_write_phy(bp, 0x17, 0x201f);
1310 bnx2_write_phy(bp, 0x15, 0x9506);
1311 bnx2_write_phy(bp, 0x17, 0x401f);
1312 bnx2_write_phy(bp, 0x15, 0x14e2);
1313 bnx2_write_phy(bp, 0x18, 0x0400);
1314 }
1315
1316 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001317 /* Set extended packet length bit */
1318 bnx2_write_phy(bp, 0x18, 0x7);
1319 bnx2_read_phy(bp, 0x18, &val);
1320 bnx2_write_phy(bp, 0x18, val | 0x4000);
1321
1322 bnx2_read_phy(bp, 0x10, &val);
1323 bnx2_write_phy(bp, 0x10, val | 0x1);
1324 }
1325 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001326 bnx2_write_phy(bp, 0x18, 0x7);
1327 bnx2_read_phy(bp, 0x18, &val);
1328 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1329
1330 bnx2_read_phy(bp, 0x10, &val);
1331 bnx2_write_phy(bp, 0x10, val & ~0x1);
1332 }
1333
Michael Chan5b0c76a2005-11-04 08:45:49 -08001334 /* ethernet@wirespeed */
1335 bnx2_write_phy(bp, 0x18, 0x7007);
1336 bnx2_read_phy(bp, 0x18, &val);
1337 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001338 return 0;
1339}
1340
1341
1342static int
1343bnx2_init_phy(struct bnx2 *bp)
1344{
1345 u32 val;
1346 int rc = 0;
1347
1348 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1349 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1350
1351 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1352
1353 bnx2_reset_phy(bp);
1354
1355 bnx2_read_phy(bp, MII_PHYSID1, &val);
1356 bp->phy_id = val << 16;
1357 bnx2_read_phy(bp, MII_PHYSID2, &val);
1358 bp->phy_id |= val & 0xffff;
1359
1360 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001361 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1362 rc = bnx2_init_5706s_phy(bp);
1363 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1364 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001365 }
1366 else {
1367 rc = bnx2_init_copper_phy(bp);
1368 }
1369
1370 bnx2_setup_phy(bp);
1371
1372 return rc;
1373}
1374
1375static int
1376bnx2_set_mac_loopback(struct bnx2 *bp)
1377{
1378 u32 mac_mode;
1379
1380 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1381 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1382 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1383 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1384 bp->link_up = 1;
1385 return 0;
1386}
1387
Michael Chanbc5a0692006-01-23 16:13:22 -08001388static int bnx2_test_link(struct bnx2 *);
1389
1390static int
1391bnx2_set_phy_loopback(struct bnx2 *bp)
1392{
1393 u32 mac_mode;
1394 int rc, i;
1395
1396 spin_lock_bh(&bp->phy_lock);
1397 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1398 BMCR_SPEED1000);
1399 spin_unlock_bh(&bp->phy_lock);
1400 if (rc)
1401 return rc;
1402
1403 for (i = 0; i < 10; i++) {
1404 if (bnx2_test_link(bp) == 0)
1405 break;
Michael Chan80be4432006-11-19 14:07:28 -08001406 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001407 }
1408
1409 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1410 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1411 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1412 BNX2_EMAC_MODE_25G);
1413
1414 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1415 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1416 bp->link_up = 1;
1417 return 0;
1418}
1419
Michael Chanb6016b72005-05-26 13:03:09 -07001420static int
Michael Chanb090ae22006-01-23 16:07:10 -08001421bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001422{
1423 int i;
1424 u32 val;
1425
Michael Chanb6016b72005-05-26 13:03:09 -07001426 bp->fw_wr_seq++;
1427 msg_data |= bp->fw_wr_seq;
1428
Michael Chane3648b32005-11-04 08:51:21 -08001429 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001430
1431 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001432 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1433 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001434
Michael Chane3648b32005-11-04 08:51:21 -08001435 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001436
1437 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1438 break;
1439 }
Michael Chanb090ae22006-01-23 16:07:10 -08001440 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1441 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001442
1443 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001444 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1445 if (!silent)
1446 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1447 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001448
1449 msg_data &= ~BNX2_DRV_MSG_CODE;
1450 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1451
Michael Chane3648b32005-11-04 08:51:21 -08001452 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001453
Michael Chanb6016b72005-05-26 13:03:09 -07001454 return -EBUSY;
1455 }
1456
Michael Chanb090ae22006-01-23 16:07:10 -08001457 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1458 return -EIO;
1459
Michael Chanb6016b72005-05-26 13:03:09 -07001460 return 0;
1461}
1462
1463static void
1464bnx2_init_context(struct bnx2 *bp)
1465{
1466 u32 vcid;
1467
1468 vcid = 96;
1469 while (vcid) {
1470 u32 vcid_addr, pcid_addr, offset;
1471
1472 vcid--;
1473
1474 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1475 u32 new_vcid;
1476
1477 vcid_addr = GET_PCID_ADDR(vcid);
1478 if (vcid & 0x8) {
1479 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1480 }
1481 else {
1482 new_vcid = vcid;
1483 }
1484 pcid_addr = GET_PCID_ADDR(new_vcid);
1485 }
1486 else {
1487 vcid_addr = GET_CID_ADDR(vcid);
1488 pcid_addr = vcid_addr;
1489 }
1490
1491 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1492 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1493
1494 /* Zero out the context. */
1495 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1496 CTX_WR(bp, 0x00, offset, 0);
1497 }
1498
1499 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1500 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1501 }
1502}
1503
1504static int
1505bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1506{
1507 u16 *good_mbuf;
1508 u32 good_mbuf_cnt;
1509 u32 val;
1510
1511 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1512 if (good_mbuf == NULL) {
1513 printk(KERN_ERR PFX "Failed to allocate memory in "
1514 "bnx2_alloc_bad_rbuf\n");
1515 return -ENOMEM;
1516 }
1517
1518 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1519 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1520
1521 good_mbuf_cnt = 0;
1522
1523 /* Allocate a bunch of mbufs and save the good ones in an array. */
1524 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1525 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1526 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1527
1528 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1529
1530 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1531
1532 /* The addresses with Bit 9 set are bad memory blocks. */
1533 if (!(val & (1 << 9))) {
1534 good_mbuf[good_mbuf_cnt] = (u16) val;
1535 good_mbuf_cnt++;
1536 }
1537
1538 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1539 }
1540
1541 /* Free the good ones back to the mbuf pool thus discarding
1542 * all the bad ones. */
1543 while (good_mbuf_cnt) {
1544 good_mbuf_cnt--;
1545
1546 val = good_mbuf[good_mbuf_cnt];
1547 val = (val << 9) | val | 1;
1548
1549 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1550 }
1551 kfree(good_mbuf);
1552 return 0;
1553}
1554
1555static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001556bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001557{
1558 u32 val;
1559 u8 *mac_addr = bp->dev->dev_addr;
1560
1561 val = (mac_addr[0] << 8) | mac_addr[1];
1562
1563 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1564
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001565 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001566 (mac_addr[4] << 8) | mac_addr[5];
1567
1568 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1569}
1570
1571static inline int
1572bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1573{
1574 struct sk_buff *skb;
1575 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1576 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001577 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001578 unsigned long align;
1579
Michael Chan932f3772006-08-15 01:39:36 -07001580 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001581 if (skb == NULL) {
1582 return -ENOMEM;
1583 }
1584
1585 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1586 skb_reserve(skb, 8 - align);
1587 }
1588
Michael Chanb6016b72005-05-26 13:03:09 -07001589 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1590 PCI_DMA_FROMDEVICE);
1591
1592 rx_buf->skb = skb;
1593 pci_unmap_addr_set(rx_buf, mapping, mapping);
1594
1595 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1596 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1597
1598 bp->rx_prod_bseq += bp->rx_buf_use_size;
1599
1600 return 0;
1601}
1602
1603static void
1604bnx2_phy_int(struct bnx2 *bp)
1605{
1606 u32 new_link_state, old_link_state;
1607
1608 new_link_state = bp->status_blk->status_attn_bits &
1609 STATUS_ATTN_BITS_LINK_STATE;
1610 old_link_state = bp->status_blk->status_attn_bits_ack &
1611 STATUS_ATTN_BITS_LINK_STATE;
1612 if (new_link_state != old_link_state) {
1613 if (new_link_state) {
1614 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1615 STATUS_ATTN_BITS_LINK_STATE);
1616 }
1617 else {
1618 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1619 STATUS_ATTN_BITS_LINK_STATE);
1620 }
1621 bnx2_set_link(bp);
1622 }
1623}
1624
1625static void
1626bnx2_tx_int(struct bnx2 *bp)
1627{
Michael Chanf4e418f2005-11-04 08:53:48 -08001628 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001629 u16 hw_cons, sw_cons, sw_ring_cons;
1630 int tx_free_bd = 0;
1631
Michael Chanf4e418f2005-11-04 08:53:48 -08001632 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001633 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1634 hw_cons++;
1635 }
1636 sw_cons = bp->tx_cons;
1637
1638 while (sw_cons != hw_cons) {
1639 struct sw_bd *tx_buf;
1640 struct sk_buff *skb;
1641 int i, last;
1642
1643 sw_ring_cons = TX_RING_IDX(sw_cons);
1644
1645 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1646 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001647#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001648 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001649 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001650 u16 last_idx, last_ring_idx;
1651
1652 last_idx = sw_cons +
1653 skb_shinfo(skb)->nr_frags + 1;
1654 last_ring_idx = sw_ring_cons +
1655 skb_shinfo(skb)->nr_frags + 1;
1656 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1657 last_idx++;
1658 }
1659 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1660 break;
1661 }
1662 }
1663#endif
1664 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1665 skb_headlen(skb), PCI_DMA_TODEVICE);
1666
1667 tx_buf->skb = NULL;
1668 last = skb_shinfo(skb)->nr_frags;
1669
1670 for (i = 0; i < last; i++) {
1671 sw_cons = NEXT_TX_BD(sw_cons);
1672
1673 pci_unmap_page(bp->pdev,
1674 pci_unmap_addr(
1675 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1676 mapping),
1677 skb_shinfo(skb)->frags[i].size,
1678 PCI_DMA_TODEVICE);
1679 }
1680
1681 sw_cons = NEXT_TX_BD(sw_cons);
1682
1683 tx_free_bd += last + 1;
1684
Michael Chan745720e2006-06-29 12:37:41 -07001685 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001686
Michael Chanf4e418f2005-11-04 08:53:48 -08001687 hw_cons = bp->hw_tx_cons =
1688 sblk->status_tx_quick_consumer_index0;
1689
Michael Chanb6016b72005-05-26 13:03:09 -07001690 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1691 hw_cons++;
1692 }
1693 }
1694
Michael Chane89bbf12005-08-25 15:36:58 -07001695 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001696 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1697 * before checking for netif_queue_stopped(). Without the
1698 * memory barrier, there is a small possibility that bnx2_start_xmit()
1699 * will miss it and cause the queue to be stopped forever.
1700 */
1701 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001702
Michael Chan2f8af122006-08-15 01:39:10 -07001703 if (unlikely(netif_queue_stopped(bp->dev)) &&
1704 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1705 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001706 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001707 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001708 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001709 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001710 }
Michael Chanb6016b72005-05-26 13:03:09 -07001711}
1712
1713static inline void
1714bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1715 u16 cons, u16 prod)
1716{
Michael Chan236b6392006-03-20 17:49:02 -08001717 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1718 struct rx_bd *cons_bd, *prod_bd;
1719
1720 cons_rx_buf = &bp->rx_buf_ring[cons];
1721 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001722
1723 pci_dma_sync_single_for_device(bp->pdev,
1724 pci_unmap_addr(cons_rx_buf, mapping),
1725 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1726
Michael Chan236b6392006-03-20 17:49:02 -08001727 bp->rx_prod_bseq += bp->rx_buf_use_size;
1728
1729 prod_rx_buf->skb = skb;
1730
1731 if (cons == prod)
1732 return;
1733
Michael Chanb6016b72005-05-26 13:03:09 -07001734 pci_unmap_addr_set(prod_rx_buf, mapping,
1735 pci_unmap_addr(cons_rx_buf, mapping));
1736
Michael Chan3fdfcc22006-03-20 17:49:49 -08001737 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1738 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001739 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1740 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001741}
1742
1743static int
1744bnx2_rx_int(struct bnx2 *bp, int budget)
1745{
Michael Chanf4e418f2005-11-04 08:53:48 -08001746 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001747 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1748 struct l2_fhdr *rx_hdr;
1749 int rx_pkt = 0;
1750
Michael Chanf4e418f2005-11-04 08:53:48 -08001751 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001752 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1753 hw_cons++;
1754 }
1755 sw_cons = bp->rx_cons;
1756 sw_prod = bp->rx_prod;
1757
1758 /* Memory barrier necessary as speculative reads of the rx
1759 * buffer can be ahead of the index in the status block
1760 */
1761 rmb();
1762 while (sw_cons != hw_cons) {
1763 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001764 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001765 struct sw_bd *rx_buf;
1766 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001767 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001768
1769 sw_ring_cons = RX_RING_IDX(sw_cons);
1770 sw_ring_prod = RX_RING_IDX(sw_prod);
1771
1772 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1773 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001774
1775 rx_buf->skb = NULL;
1776
1777 dma_addr = pci_unmap_addr(rx_buf, mapping);
1778
1779 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001780 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1781
1782 rx_hdr = (struct l2_fhdr *) skb->data;
1783 len = rx_hdr->l2_fhdr_pkt_len - 4;
1784
Michael Chanade2bfe2006-01-23 16:09:51 -08001785 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001786 (L2_FHDR_ERRORS_BAD_CRC |
1787 L2_FHDR_ERRORS_PHY_DECODE |
1788 L2_FHDR_ERRORS_ALIGNMENT |
1789 L2_FHDR_ERRORS_TOO_SHORT |
1790 L2_FHDR_ERRORS_GIANT_FRAME)) {
1791
1792 goto reuse_rx;
1793 }
1794
1795 /* Since we don't have a jumbo ring, copy small packets
1796 * if mtu > 1500
1797 */
1798 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1799 struct sk_buff *new_skb;
1800
Michael Chan932f3772006-08-15 01:39:36 -07001801 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001802 if (new_skb == NULL)
1803 goto reuse_rx;
1804
1805 /* aligned copy */
1806 memcpy(new_skb->data,
1807 skb->data + bp->rx_offset - 2,
1808 len + 2);
1809
1810 skb_reserve(new_skb, 2);
1811 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001812
1813 bnx2_reuse_rx_skb(bp, skb,
1814 sw_ring_cons, sw_ring_prod);
1815
1816 skb = new_skb;
1817 }
1818 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001819 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001820 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1821
1822 skb_reserve(skb, bp->rx_offset);
1823 skb_put(skb, len);
1824 }
1825 else {
1826reuse_rx:
1827 bnx2_reuse_rx_skb(bp, skb,
1828 sw_ring_cons, sw_ring_prod);
1829 goto next_rx;
1830 }
1831
1832 skb->protocol = eth_type_trans(skb, bp->dev);
1833
1834 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001835 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001836
Michael Chan745720e2006-06-29 12:37:41 -07001837 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001838 goto next_rx;
1839
1840 }
1841
Michael Chanb6016b72005-05-26 13:03:09 -07001842 skb->ip_summed = CHECKSUM_NONE;
1843 if (bp->rx_csum &&
1844 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1845 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1846
Michael Chanade2bfe2006-01-23 16:09:51 -08001847 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1848 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001849 skb->ip_summed = CHECKSUM_UNNECESSARY;
1850 }
1851
1852#ifdef BCM_VLAN
1853 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1854 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1855 rx_hdr->l2_fhdr_vlan_tag);
1856 }
1857 else
1858#endif
1859 netif_receive_skb(skb);
1860
1861 bp->dev->last_rx = jiffies;
1862 rx_pkt++;
1863
1864next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001865 sw_cons = NEXT_RX_BD(sw_cons);
1866 sw_prod = NEXT_RX_BD(sw_prod);
1867
1868 if ((rx_pkt == budget))
1869 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001870
1871 /* Refresh hw_cons to see if there is new work */
1872 if (sw_cons == hw_cons) {
1873 hw_cons = bp->hw_rx_cons =
1874 sblk->status_rx_quick_consumer_index0;
1875 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1876 hw_cons++;
1877 rmb();
1878 }
Michael Chanb6016b72005-05-26 13:03:09 -07001879 }
1880 bp->rx_cons = sw_cons;
1881 bp->rx_prod = sw_prod;
1882
1883 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1884
1885 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1886
1887 mmiowb();
1888
1889 return rx_pkt;
1890
1891}
1892
1893/* MSI ISR - The only difference between this and the INTx ISR
1894 * is that the MSI interrupt is always serviced.
1895 */
1896static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001897bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001898{
1899 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001900 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001901
Michael Chanc921e4c2005-09-08 13:15:32 -07001902 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001903 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1904 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1905 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1906
1907 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001908 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1909 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001910
Michael Chan73eef4c2005-08-25 15:39:15 -07001911 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001912
Michael Chan73eef4c2005-08-25 15:39:15 -07001913 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001914}
1915
1916static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001917bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001918{
1919 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001920 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001921
1922 /* When using INTx, it is possible for the interrupt to arrive
1923 * at the CPU before the status block posted prior to the
1924 * interrupt. Reading a register will flush the status block.
1925 * When using MSI, the MSI message will always complete after
1926 * the status block write.
1927 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001928 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001929 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1930 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001931 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001932
1933 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1934 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1935 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1936
1937 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001938 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1939 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001940
Michael Chan73eef4c2005-08-25 15:39:15 -07001941 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001942
Michael Chan73eef4c2005-08-25 15:39:15 -07001943 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001944}
1945
Michael Chanf4e418f2005-11-04 08:53:48 -08001946static inline int
1947bnx2_has_work(struct bnx2 *bp)
1948{
1949 struct status_block *sblk = bp->status_blk;
1950
1951 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1952 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1953 return 1;
1954
1955 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1956 bp->link_up)
1957 return 1;
1958
1959 return 0;
1960}
1961
Michael Chanb6016b72005-05-26 13:03:09 -07001962static int
1963bnx2_poll(struct net_device *dev, int *budget)
1964{
Michael Chan972ec0d2006-01-23 16:12:43 -08001965 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001966
Michael Chanb6016b72005-05-26 13:03:09 -07001967 if ((bp->status_blk->status_attn_bits &
1968 STATUS_ATTN_BITS_LINK_STATE) !=
1969 (bp->status_blk->status_attn_bits_ack &
1970 STATUS_ATTN_BITS_LINK_STATE)) {
1971
Michael Chanc770a652005-08-25 15:38:39 -07001972 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001973 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07001974 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08001975
1976 /* This is needed to take care of transient status
1977 * during link changes.
1978 */
1979 REG_WR(bp, BNX2_HC_COMMAND,
1980 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1981 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07001982 }
1983
Michael Chanf4e418f2005-11-04 08:53:48 -08001984 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07001985 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001986
Michael Chanf4e418f2005-11-04 08:53:48 -08001987 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07001988 int orig_budget = *budget;
1989 int work_done;
1990
1991 if (orig_budget > dev->quota)
1992 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001993
Michael Chanb6016b72005-05-26 13:03:09 -07001994 work_done = bnx2_rx_int(bp, orig_budget);
1995 *budget -= work_done;
1996 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07001997 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001998
Michael Chanf4e418f2005-11-04 08:53:48 -08001999 bp->last_status_idx = bp->status_blk->status_idx;
2000 rmb();
2001
2002 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002003 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002004 if (likely(bp->flags & USING_MSI_FLAG)) {
2005 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2006 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2007 bp->last_status_idx);
2008 return 0;
2009 }
Michael Chanb6016b72005-05-26 13:03:09 -07002010 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002011 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2012 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2013 bp->last_status_idx);
2014
2015 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2016 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2017 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002018 return 0;
2019 }
2020
2021 return 1;
2022}
2023
Herbert Xu932ff272006-06-09 12:20:56 -07002024/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002025 * from set_multicast.
2026 */
2027static void
2028bnx2_set_rx_mode(struct net_device *dev)
2029{
Michael Chan972ec0d2006-01-23 16:12:43 -08002030 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002031 u32 rx_mode, sort_mode;
2032 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002033
Michael Chanc770a652005-08-25 15:38:39 -07002034 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002035
2036 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2037 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2038 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2039#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002040 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002041 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002042#else
Michael Chane29054f2006-01-23 16:06:06 -08002043 if (!(bp->flags & ASF_ENABLE_FLAG))
2044 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002045#endif
2046 if (dev->flags & IFF_PROMISC) {
2047 /* Promiscuous mode. */
2048 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002049 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2050 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002051 }
2052 else if (dev->flags & IFF_ALLMULTI) {
2053 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2054 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2055 0xffffffff);
2056 }
2057 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2058 }
2059 else {
2060 /* Accept one or more multicast(s). */
2061 struct dev_mc_list *mclist;
2062 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2063 u32 regidx;
2064 u32 bit;
2065 u32 crc;
2066
2067 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2068
2069 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2070 i++, mclist = mclist->next) {
2071
2072 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2073 bit = crc & 0xff;
2074 regidx = (bit & 0xe0) >> 5;
2075 bit &= 0x1f;
2076 mc_filter[regidx] |= (1 << bit);
2077 }
2078
2079 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2080 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2081 mc_filter[i]);
2082 }
2083
2084 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2085 }
2086
2087 if (rx_mode != bp->rx_mode) {
2088 bp->rx_mode = rx_mode;
2089 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2090 }
2091
2092 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2093 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2094 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2095
Michael Chanc770a652005-08-25 15:38:39 -07002096 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002097}
2098
Michael Chanfba9fe92006-06-12 22:21:25 -07002099#define FW_BUF_SIZE 0x8000
2100
2101static int
2102bnx2_gunzip_init(struct bnx2 *bp)
2103{
2104 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2105 goto gunzip_nomem1;
2106
2107 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2108 goto gunzip_nomem2;
2109
2110 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2111 if (bp->strm->workspace == NULL)
2112 goto gunzip_nomem3;
2113
2114 return 0;
2115
2116gunzip_nomem3:
2117 kfree(bp->strm);
2118 bp->strm = NULL;
2119
2120gunzip_nomem2:
2121 vfree(bp->gunzip_buf);
2122 bp->gunzip_buf = NULL;
2123
2124gunzip_nomem1:
2125 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2126 "uncompression.\n", bp->dev->name);
2127 return -ENOMEM;
2128}
2129
2130static void
2131bnx2_gunzip_end(struct bnx2 *bp)
2132{
2133 kfree(bp->strm->workspace);
2134
2135 kfree(bp->strm);
2136 bp->strm = NULL;
2137
2138 if (bp->gunzip_buf) {
2139 vfree(bp->gunzip_buf);
2140 bp->gunzip_buf = NULL;
2141 }
2142}
2143
2144static int
2145bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2146{
2147 int n, rc;
2148
2149 /* check gzip header */
2150 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2151 return -EINVAL;
2152
2153 n = 10;
2154
2155#define FNAME 0x8
2156 if (zbuf[3] & FNAME)
2157 while ((zbuf[n++] != 0) && (n < len));
2158
2159 bp->strm->next_in = zbuf + n;
2160 bp->strm->avail_in = len - n;
2161 bp->strm->next_out = bp->gunzip_buf;
2162 bp->strm->avail_out = FW_BUF_SIZE;
2163
2164 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2165 if (rc != Z_OK)
2166 return rc;
2167
2168 rc = zlib_inflate(bp->strm, Z_FINISH);
2169
2170 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2171 *outbuf = bp->gunzip_buf;
2172
2173 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2174 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2175 bp->dev->name, bp->strm->msg);
2176
2177 zlib_inflateEnd(bp->strm);
2178
2179 if (rc == Z_STREAM_END)
2180 return 0;
2181
2182 return rc;
2183}
2184
Michael Chanb6016b72005-05-26 13:03:09 -07002185static void
2186load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2187 u32 rv2p_proc)
2188{
2189 int i;
2190 u32 val;
2191
2192
2193 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002194 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002195 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002196 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002197 rv2p_code++;
2198
2199 if (rv2p_proc == RV2P_PROC1) {
2200 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2201 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2202 }
2203 else {
2204 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2205 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2206 }
2207 }
2208
2209 /* Reset the processor, un-stall is done later. */
2210 if (rv2p_proc == RV2P_PROC1) {
2211 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2212 }
2213 else {
2214 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2215 }
2216}
2217
2218static void
2219load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2220{
2221 u32 offset;
2222 u32 val;
2223
2224 /* Halt the CPU. */
2225 val = REG_RD_IND(bp, cpu_reg->mode);
2226 val |= cpu_reg->mode_value_halt;
2227 REG_WR_IND(bp, cpu_reg->mode, val);
2228 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2229
2230 /* Load the Text area. */
2231 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2232 if (fw->text) {
2233 int j;
2234
2235 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002236 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002237 }
2238 }
2239
2240 /* Load the Data area. */
2241 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2242 if (fw->data) {
2243 int j;
2244
2245 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2246 REG_WR_IND(bp, offset, fw->data[j]);
2247 }
2248 }
2249
2250 /* Load the SBSS area. */
2251 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2252 if (fw->sbss) {
2253 int j;
2254
2255 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2256 REG_WR_IND(bp, offset, fw->sbss[j]);
2257 }
2258 }
2259
2260 /* Load the BSS area. */
2261 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2262 if (fw->bss) {
2263 int j;
2264
2265 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2266 REG_WR_IND(bp, offset, fw->bss[j]);
2267 }
2268 }
2269
2270 /* Load the Read-Only area. */
2271 offset = cpu_reg->spad_base +
2272 (fw->rodata_addr - cpu_reg->mips_view_base);
2273 if (fw->rodata) {
2274 int j;
2275
2276 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2277 REG_WR_IND(bp, offset, fw->rodata[j]);
2278 }
2279 }
2280
2281 /* Clear the pre-fetch instruction. */
2282 REG_WR_IND(bp, cpu_reg->inst, 0);
2283 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2284
2285 /* Start the CPU. */
2286 val = REG_RD_IND(bp, cpu_reg->mode);
2287 val &= ~cpu_reg->mode_value_halt;
2288 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2289 REG_WR_IND(bp, cpu_reg->mode, val);
2290}
2291
Michael Chanfba9fe92006-06-12 22:21:25 -07002292static int
Michael Chanb6016b72005-05-26 13:03:09 -07002293bnx2_init_cpus(struct bnx2 *bp)
2294{
2295 struct cpu_reg cpu_reg;
2296 struct fw_info fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002297 int rc = 0;
2298 void *text;
2299 u32 text_len;
2300
2301 if ((rc = bnx2_gunzip_init(bp)) != 0)
2302 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002303
2304 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002305 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2306 &text_len);
2307 if (rc)
2308 goto init_cpu_err;
2309
2310 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2311
2312 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2313 &text_len);
2314 if (rc)
2315 goto init_cpu_err;
2316
2317 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002318
2319 /* Initialize the RX Processor. */
2320 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2321 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2322 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2323 cpu_reg.state = BNX2_RXP_CPU_STATE;
2324 cpu_reg.state_value_clear = 0xffffff;
2325 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2326 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2327 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2328 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2329 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2330 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2331 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002332
Michael Chanb6016b72005-05-26 13:03:09 -07002333 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2334 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2335 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2336 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2337
2338 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2339 fw.text_len = bnx2_RXP_b06FwTextLen;
2340 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002341
2342 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2343 &text, &text_len);
2344 if (rc)
2345 goto init_cpu_err;
2346
2347 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002348
2349 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2350 fw.data_len = bnx2_RXP_b06FwDataLen;
2351 fw.data_index = 0;
2352 fw.data = bnx2_RXP_b06FwData;
2353
2354 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2355 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2356 fw.sbss_index = 0;
2357 fw.sbss = bnx2_RXP_b06FwSbss;
2358
2359 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2360 fw.bss_len = bnx2_RXP_b06FwBssLen;
2361 fw.bss_index = 0;
2362 fw.bss = bnx2_RXP_b06FwBss;
2363
2364 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2365 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2366 fw.rodata_index = 0;
2367 fw.rodata = bnx2_RXP_b06FwRodata;
2368
2369 load_cpu_fw(bp, &cpu_reg, &fw);
2370
2371 /* Initialize the TX Processor. */
2372 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2373 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2374 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2375 cpu_reg.state = BNX2_TXP_CPU_STATE;
2376 cpu_reg.state_value_clear = 0xffffff;
2377 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2378 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2379 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2380 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2381 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2382 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2383 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002384
Michael Chanb6016b72005-05-26 13:03:09 -07002385 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2386 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2387 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2388 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2389
2390 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2391 fw.text_len = bnx2_TXP_b06FwTextLen;
2392 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002393
2394 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2395 &text, &text_len);
2396 if (rc)
2397 goto init_cpu_err;
2398
2399 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002400
2401 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2402 fw.data_len = bnx2_TXP_b06FwDataLen;
2403 fw.data_index = 0;
2404 fw.data = bnx2_TXP_b06FwData;
2405
2406 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2407 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2408 fw.sbss_index = 0;
2409 fw.sbss = bnx2_TXP_b06FwSbss;
2410
2411 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2412 fw.bss_len = bnx2_TXP_b06FwBssLen;
2413 fw.bss_index = 0;
2414 fw.bss = bnx2_TXP_b06FwBss;
2415
2416 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2417 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2418 fw.rodata_index = 0;
2419 fw.rodata = bnx2_TXP_b06FwRodata;
2420
2421 load_cpu_fw(bp, &cpu_reg, &fw);
2422
2423 /* Initialize the TX Patch-up Processor. */
2424 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2425 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2426 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2427 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2428 cpu_reg.state_value_clear = 0xffffff;
2429 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2430 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2431 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2432 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2433 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2434 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2435 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002436
Michael Chanb6016b72005-05-26 13:03:09 -07002437 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2438 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2439 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2440 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2441
2442 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2443 fw.text_len = bnx2_TPAT_b06FwTextLen;
2444 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002445
2446 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2447 &text, &text_len);
2448 if (rc)
2449 goto init_cpu_err;
2450
2451 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002452
2453 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2454 fw.data_len = bnx2_TPAT_b06FwDataLen;
2455 fw.data_index = 0;
2456 fw.data = bnx2_TPAT_b06FwData;
2457
2458 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2459 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2460 fw.sbss_index = 0;
2461 fw.sbss = bnx2_TPAT_b06FwSbss;
2462
2463 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2464 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2465 fw.bss_index = 0;
2466 fw.bss = bnx2_TPAT_b06FwBss;
2467
2468 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2469 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2470 fw.rodata_index = 0;
2471 fw.rodata = bnx2_TPAT_b06FwRodata;
2472
2473 load_cpu_fw(bp, &cpu_reg, &fw);
2474
2475 /* Initialize the Completion Processor. */
2476 cpu_reg.mode = BNX2_COM_CPU_MODE;
2477 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2478 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2479 cpu_reg.state = BNX2_COM_CPU_STATE;
2480 cpu_reg.state_value_clear = 0xffffff;
2481 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2482 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2483 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2484 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2485 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2486 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2487 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002488
Michael Chanb6016b72005-05-26 13:03:09 -07002489 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2490 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2491 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2492 fw.start_addr = bnx2_COM_b06FwStartAddr;
2493
2494 fw.text_addr = bnx2_COM_b06FwTextAddr;
2495 fw.text_len = bnx2_COM_b06FwTextLen;
2496 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002497
2498 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2499 &text, &text_len);
2500 if (rc)
2501 goto init_cpu_err;
2502
2503 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002504
2505 fw.data_addr = bnx2_COM_b06FwDataAddr;
2506 fw.data_len = bnx2_COM_b06FwDataLen;
2507 fw.data_index = 0;
2508 fw.data = bnx2_COM_b06FwData;
2509
2510 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2511 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2512 fw.sbss_index = 0;
2513 fw.sbss = bnx2_COM_b06FwSbss;
2514
2515 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2516 fw.bss_len = bnx2_COM_b06FwBssLen;
2517 fw.bss_index = 0;
2518 fw.bss = bnx2_COM_b06FwBss;
2519
2520 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2521 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2522 fw.rodata_index = 0;
2523 fw.rodata = bnx2_COM_b06FwRodata;
2524
2525 load_cpu_fw(bp, &cpu_reg, &fw);
2526
Michael Chanfba9fe92006-06-12 22:21:25 -07002527init_cpu_err:
2528 bnx2_gunzip_end(bp);
2529 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002530}
2531
2532static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002533bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002534{
2535 u16 pmcsr;
2536
2537 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2538
2539 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002540 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002541 u32 val;
2542
2543 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2544 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2545 PCI_PM_CTRL_PME_STATUS);
2546
2547 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2548 /* delay required during transition out of D3hot */
2549 msleep(20);
2550
2551 val = REG_RD(bp, BNX2_EMAC_MODE);
2552 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2553 val &= ~BNX2_EMAC_MODE_MPKT;
2554 REG_WR(bp, BNX2_EMAC_MODE, val);
2555
2556 val = REG_RD(bp, BNX2_RPM_CONFIG);
2557 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2558 REG_WR(bp, BNX2_RPM_CONFIG, val);
2559 break;
2560 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002561 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002562 int i;
2563 u32 val, wol_msg;
2564
2565 if (bp->wol) {
2566 u32 advertising;
2567 u8 autoneg;
2568
2569 autoneg = bp->autoneg;
2570 advertising = bp->advertising;
2571
2572 bp->autoneg = AUTONEG_SPEED;
2573 bp->advertising = ADVERTISED_10baseT_Half |
2574 ADVERTISED_10baseT_Full |
2575 ADVERTISED_100baseT_Half |
2576 ADVERTISED_100baseT_Full |
2577 ADVERTISED_Autoneg;
2578
2579 bnx2_setup_copper_phy(bp);
2580
2581 bp->autoneg = autoneg;
2582 bp->advertising = advertising;
2583
2584 bnx2_set_mac_addr(bp);
2585
2586 val = REG_RD(bp, BNX2_EMAC_MODE);
2587
2588 /* Enable port mode. */
2589 val &= ~BNX2_EMAC_MODE_PORT;
2590 val |= BNX2_EMAC_MODE_PORT_MII |
2591 BNX2_EMAC_MODE_MPKT_RCVD |
2592 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002593 BNX2_EMAC_MODE_MPKT;
2594
2595 REG_WR(bp, BNX2_EMAC_MODE, val);
2596
2597 /* receive all multicast */
2598 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2599 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2600 0xffffffff);
2601 }
2602 REG_WR(bp, BNX2_EMAC_RX_MODE,
2603 BNX2_EMAC_RX_MODE_SORT_MODE);
2604
2605 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2606 BNX2_RPM_SORT_USER0_MC_EN;
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2609 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2610 BNX2_RPM_SORT_USER0_ENA);
2611
2612 /* Need to enable EMAC and RPM for WOL. */
2613 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2615 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2616 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2617
2618 val = REG_RD(bp, BNX2_RPM_CONFIG);
2619 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2620 REG_WR(bp, BNX2_RPM_CONFIG, val);
2621
2622 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2623 }
2624 else {
2625 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2626 }
2627
Michael Chandda1e392006-01-23 16:08:14 -08002628 if (!(bp->flags & NO_WOL_FLAG))
2629 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002630
2631 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2632 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2633 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2634
2635 if (bp->wol)
2636 pmcsr |= 3;
2637 }
2638 else {
2639 pmcsr |= 3;
2640 }
2641 if (bp->wol) {
2642 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2643 }
2644 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2645 pmcsr);
2646
2647 /* No more memory access after this point until
2648 * device is brought back to D0.
2649 */
2650 udelay(50);
2651 break;
2652 }
2653 default:
2654 return -EINVAL;
2655 }
2656 return 0;
2657}
2658
2659static int
2660bnx2_acquire_nvram_lock(struct bnx2 *bp)
2661{
2662 u32 val;
2663 int j;
2664
2665 /* Request access to the flash interface. */
2666 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2667 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2668 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2669 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2670 break;
2671
2672 udelay(5);
2673 }
2674
2675 if (j >= NVRAM_TIMEOUT_COUNT)
2676 return -EBUSY;
2677
2678 return 0;
2679}
2680
2681static int
2682bnx2_release_nvram_lock(struct bnx2 *bp)
2683{
2684 int j;
2685 u32 val;
2686
2687 /* Relinquish nvram interface. */
2688 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2689
2690 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2691 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2692 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2693 break;
2694
2695 udelay(5);
2696 }
2697
2698 if (j >= NVRAM_TIMEOUT_COUNT)
2699 return -EBUSY;
2700
2701 return 0;
2702}
2703
2704
2705static int
2706bnx2_enable_nvram_write(struct bnx2 *bp)
2707{
2708 u32 val;
2709
2710 val = REG_RD(bp, BNX2_MISC_CFG);
2711 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2712
2713 if (!bp->flash_info->buffered) {
2714 int j;
2715
2716 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2717 REG_WR(bp, BNX2_NVM_COMMAND,
2718 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2719
2720 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2721 udelay(5);
2722
2723 val = REG_RD(bp, BNX2_NVM_COMMAND);
2724 if (val & BNX2_NVM_COMMAND_DONE)
2725 break;
2726 }
2727
2728 if (j >= NVRAM_TIMEOUT_COUNT)
2729 return -EBUSY;
2730 }
2731 return 0;
2732}
2733
2734static void
2735bnx2_disable_nvram_write(struct bnx2 *bp)
2736{
2737 u32 val;
2738
2739 val = REG_RD(bp, BNX2_MISC_CFG);
2740 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2741}
2742
2743
2744static void
2745bnx2_enable_nvram_access(struct bnx2 *bp)
2746{
2747 u32 val;
2748
2749 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2750 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002751 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002752 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2753}
2754
2755static void
2756bnx2_disable_nvram_access(struct bnx2 *bp)
2757{
2758 u32 val;
2759
2760 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2761 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002762 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002763 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2764 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2765}
2766
2767static int
2768bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2769{
2770 u32 cmd;
2771 int j;
2772
2773 if (bp->flash_info->buffered)
2774 /* Buffered flash, no erase needed */
2775 return 0;
2776
2777 /* Build an erase command */
2778 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2779 BNX2_NVM_COMMAND_DOIT;
2780
2781 /* Need to clear DONE bit separately. */
2782 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2783
2784 /* Address of the NVRAM to read from. */
2785 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2786
2787 /* Issue an erase command. */
2788 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2789
2790 /* Wait for completion. */
2791 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2792 u32 val;
2793
2794 udelay(5);
2795
2796 val = REG_RD(bp, BNX2_NVM_COMMAND);
2797 if (val & BNX2_NVM_COMMAND_DONE)
2798 break;
2799 }
2800
2801 if (j >= NVRAM_TIMEOUT_COUNT)
2802 return -EBUSY;
2803
2804 return 0;
2805}
2806
2807static int
2808bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2809{
2810 u32 cmd;
2811 int j;
2812
2813 /* Build the command word. */
2814 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2815
2816 /* Calculate an offset of a buffered flash. */
2817 if (bp->flash_info->buffered) {
2818 offset = ((offset / bp->flash_info->page_size) <<
2819 bp->flash_info->page_bits) +
2820 (offset % bp->flash_info->page_size);
2821 }
2822
2823 /* Need to clear DONE bit separately. */
2824 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2825
2826 /* Address of the NVRAM to read from. */
2827 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2828
2829 /* Issue a read command. */
2830 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2831
2832 /* Wait for completion. */
2833 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2834 u32 val;
2835
2836 udelay(5);
2837
2838 val = REG_RD(bp, BNX2_NVM_COMMAND);
2839 if (val & BNX2_NVM_COMMAND_DONE) {
2840 val = REG_RD(bp, BNX2_NVM_READ);
2841
2842 val = be32_to_cpu(val);
2843 memcpy(ret_val, &val, 4);
2844 break;
2845 }
2846 }
2847 if (j >= NVRAM_TIMEOUT_COUNT)
2848 return -EBUSY;
2849
2850 return 0;
2851}
2852
2853
2854static int
2855bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2856{
2857 u32 cmd, val32;
2858 int j;
2859
2860 /* Build the command word. */
2861 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2862
2863 /* Calculate an offset of a buffered flash. */
2864 if (bp->flash_info->buffered) {
2865 offset = ((offset / bp->flash_info->page_size) <<
2866 bp->flash_info->page_bits) +
2867 (offset % bp->flash_info->page_size);
2868 }
2869
2870 /* Need to clear DONE bit separately. */
2871 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2872
2873 memcpy(&val32, val, 4);
2874 val32 = cpu_to_be32(val32);
2875
2876 /* Write the data. */
2877 REG_WR(bp, BNX2_NVM_WRITE, val32);
2878
2879 /* Address of the NVRAM to write to. */
2880 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2881
2882 /* Issue the write command. */
2883 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2884
2885 /* Wait for completion. */
2886 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2887 udelay(5);
2888
2889 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2890 break;
2891 }
2892 if (j >= NVRAM_TIMEOUT_COUNT)
2893 return -EBUSY;
2894
2895 return 0;
2896}
2897
2898static int
2899bnx2_init_nvram(struct bnx2 *bp)
2900{
2901 u32 val;
2902 int j, entry_count, rc;
2903 struct flash_spec *flash;
2904
2905 /* Determine the selected interface. */
2906 val = REG_RD(bp, BNX2_NVM_CFG1);
2907
2908 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2909
2910 rc = 0;
2911 if (val & 0x40000000) {
2912
2913 /* Flash interface has been reconfigured */
2914 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002915 j++, flash++) {
2916 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2917 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002918 bp->flash_info = flash;
2919 break;
2920 }
2921 }
2922 }
2923 else {
Michael Chan37137702005-11-04 08:49:17 -08002924 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002925 /* Not yet been reconfigured */
2926
Michael Chan37137702005-11-04 08:49:17 -08002927 if (val & (1 << 23))
2928 mask = FLASH_BACKUP_STRAP_MASK;
2929 else
2930 mask = FLASH_STRAP_MASK;
2931
Michael Chanb6016b72005-05-26 13:03:09 -07002932 for (j = 0, flash = &flash_table[0]; j < entry_count;
2933 j++, flash++) {
2934
Michael Chan37137702005-11-04 08:49:17 -08002935 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002936 bp->flash_info = flash;
2937
2938 /* Request access to the flash interface. */
2939 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2940 return rc;
2941
2942 /* Enable access to flash interface */
2943 bnx2_enable_nvram_access(bp);
2944
2945 /* Reconfigure the flash interface */
2946 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2947 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2948 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2949 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2950
2951 /* Disable access to flash interface */
2952 bnx2_disable_nvram_access(bp);
2953 bnx2_release_nvram_lock(bp);
2954
2955 break;
2956 }
2957 }
2958 } /* if (val & 0x40000000) */
2959
2960 if (j == entry_count) {
2961 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002962 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002963 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002964 }
2965
Michael Chan1122db72006-01-23 16:11:42 -08002966 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2967 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2968 if (val)
2969 bp->flash_size = val;
2970 else
2971 bp->flash_size = bp->flash_info->total_size;
2972
Michael Chanb6016b72005-05-26 13:03:09 -07002973 return rc;
2974}
2975
2976static int
2977bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2978 int buf_size)
2979{
2980 int rc = 0;
2981 u32 cmd_flags, offset32, len32, extra;
2982
2983 if (buf_size == 0)
2984 return 0;
2985
2986 /* Request access to the flash interface. */
2987 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2988 return rc;
2989
2990 /* Enable access to flash interface */
2991 bnx2_enable_nvram_access(bp);
2992
2993 len32 = buf_size;
2994 offset32 = offset;
2995 extra = 0;
2996
2997 cmd_flags = 0;
2998
2999 if (offset32 & 3) {
3000 u8 buf[4];
3001 u32 pre_len;
3002
3003 offset32 &= ~3;
3004 pre_len = 4 - (offset & 3);
3005
3006 if (pre_len >= len32) {
3007 pre_len = len32;
3008 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3009 BNX2_NVM_COMMAND_LAST;
3010 }
3011 else {
3012 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3013 }
3014
3015 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3016
3017 if (rc)
3018 return rc;
3019
3020 memcpy(ret_buf, buf + (offset & 3), pre_len);
3021
3022 offset32 += 4;
3023 ret_buf += pre_len;
3024 len32 -= pre_len;
3025 }
3026 if (len32 & 3) {
3027 extra = 4 - (len32 & 3);
3028 len32 = (len32 + 4) & ~3;
3029 }
3030
3031 if (len32 == 4) {
3032 u8 buf[4];
3033
3034 if (cmd_flags)
3035 cmd_flags = BNX2_NVM_COMMAND_LAST;
3036 else
3037 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3038 BNX2_NVM_COMMAND_LAST;
3039
3040 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3041
3042 memcpy(ret_buf, buf, 4 - extra);
3043 }
3044 else if (len32 > 0) {
3045 u8 buf[4];
3046
3047 /* Read the first word. */
3048 if (cmd_flags)
3049 cmd_flags = 0;
3050 else
3051 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3052
3053 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3054
3055 /* Advance to the next dword. */
3056 offset32 += 4;
3057 ret_buf += 4;
3058 len32 -= 4;
3059
3060 while (len32 > 4 && rc == 0) {
3061 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3062
3063 /* Advance to the next dword. */
3064 offset32 += 4;
3065 ret_buf += 4;
3066 len32 -= 4;
3067 }
3068
3069 if (rc)
3070 return rc;
3071
3072 cmd_flags = BNX2_NVM_COMMAND_LAST;
3073 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3074
3075 memcpy(ret_buf, buf, 4 - extra);
3076 }
3077
3078 /* Disable access to flash interface */
3079 bnx2_disable_nvram_access(bp);
3080
3081 bnx2_release_nvram_lock(bp);
3082
3083 return rc;
3084}
3085
3086static int
3087bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3088 int buf_size)
3089{
3090 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07003091 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003092 int rc = 0;
3093 int align_start, align_end;
3094
3095 buf = data_buf;
3096 offset32 = offset;
3097 len32 = buf_size;
3098 align_start = align_end = 0;
3099
3100 if ((align_start = (offset32 & 3))) {
3101 offset32 &= ~3;
3102 len32 += align_start;
3103 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3104 return rc;
3105 }
3106
3107 if (len32 & 3) {
3108 if ((len32 > 4) || !align_start) {
3109 align_end = 4 - (len32 & 3);
3110 len32 += align_end;
3111 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3112 end, 4))) {
3113 return rc;
3114 }
3115 }
3116 }
3117
3118 if (align_start || align_end) {
3119 buf = kmalloc(len32, GFP_KERNEL);
3120 if (buf == 0)
3121 return -ENOMEM;
3122 if (align_start) {
3123 memcpy(buf, start, 4);
3124 }
3125 if (align_end) {
3126 memcpy(buf + len32 - 4, end, 4);
3127 }
3128 memcpy(buf + align_start, data_buf, buf_size);
3129 }
3130
Michael Chanae181bc2006-05-22 16:39:20 -07003131 if (bp->flash_info->buffered == 0) {
3132 flash_buffer = kmalloc(264, GFP_KERNEL);
3133 if (flash_buffer == NULL) {
3134 rc = -ENOMEM;
3135 goto nvram_write_end;
3136 }
3137 }
3138
Michael Chanb6016b72005-05-26 13:03:09 -07003139 written = 0;
3140 while ((written < len32) && (rc == 0)) {
3141 u32 page_start, page_end, data_start, data_end;
3142 u32 addr, cmd_flags;
3143 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003144
3145 /* Find the page_start addr */
3146 page_start = offset32 + written;
3147 page_start -= (page_start % bp->flash_info->page_size);
3148 /* Find the page_end addr */
3149 page_end = page_start + bp->flash_info->page_size;
3150 /* Find the data_start addr */
3151 data_start = (written == 0) ? offset32 : page_start;
3152 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003153 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003154 (offset32 + len32) : page_end;
3155
3156 /* Request access to the flash interface. */
3157 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3158 goto nvram_write_end;
3159
3160 /* Enable access to flash interface */
3161 bnx2_enable_nvram_access(bp);
3162
3163 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3164 if (bp->flash_info->buffered == 0) {
3165 int j;
3166
3167 /* Read the whole page into the buffer
3168 * (non-buffer flash only) */
3169 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3170 if (j == (bp->flash_info->page_size - 4)) {
3171 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3172 }
3173 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003174 page_start + j,
3175 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003176 cmd_flags);
3177
3178 if (rc)
3179 goto nvram_write_end;
3180
3181 cmd_flags = 0;
3182 }
3183 }
3184
3185 /* Enable writes to flash interface (unlock write-protect) */
3186 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3187 goto nvram_write_end;
3188
3189 /* Erase the page */
3190 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3191 goto nvram_write_end;
3192
3193 /* Re-enable the write again for the actual write */
3194 bnx2_enable_nvram_write(bp);
3195
3196 /* Loop to write back the buffer data from page_start to
3197 * data_start */
3198 i = 0;
3199 if (bp->flash_info->buffered == 0) {
3200 for (addr = page_start; addr < data_start;
3201 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003202
Michael Chanb6016b72005-05-26 13:03:09 -07003203 rc = bnx2_nvram_write_dword(bp, addr,
3204 &flash_buffer[i], cmd_flags);
3205
3206 if (rc != 0)
3207 goto nvram_write_end;
3208
3209 cmd_flags = 0;
3210 }
3211 }
3212
3213 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003214 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003215 if ((addr == page_end - 4) ||
3216 ((bp->flash_info->buffered) &&
3217 (addr == data_end - 4))) {
3218
3219 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3220 }
3221 rc = bnx2_nvram_write_dword(bp, addr, buf,
3222 cmd_flags);
3223
3224 if (rc != 0)
3225 goto nvram_write_end;
3226
3227 cmd_flags = 0;
3228 buf += 4;
3229 }
3230
3231 /* Loop to write back the buffer data from data_end
3232 * to page_end */
3233 if (bp->flash_info->buffered == 0) {
3234 for (addr = data_end; addr < page_end;
3235 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003236
Michael Chanb6016b72005-05-26 13:03:09 -07003237 if (addr == page_end-4) {
3238 cmd_flags = BNX2_NVM_COMMAND_LAST;
3239 }
3240 rc = bnx2_nvram_write_dword(bp, addr,
3241 &flash_buffer[i], cmd_flags);
3242
3243 if (rc != 0)
3244 goto nvram_write_end;
3245
3246 cmd_flags = 0;
3247 }
3248 }
3249
3250 /* Disable writes to flash interface (lock write-protect) */
3251 bnx2_disable_nvram_write(bp);
3252
3253 /* Disable access to flash interface */
3254 bnx2_disable_nvram_access(bp);
3255 bnx2_release_nvram_lock(bp);
3256
3257 /* Increment written */
3258 written += data_end - data_start;
3259 }
3260
3261nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003262 if (bp->flash_info->buffered == 0)
3263 kfree(flash_buffer);
3264
Michael Chanb6016b72005-05-26 13:03:09 -07003265 if (align_start || align_end)
3266 kfree(buf);
3267 return rc;
3268}
3269
3270static int
3271bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3272{
3273 u32 val;
3274 int i, rc = 0;
3275
3276 /* Wait for the current PCI transaction to complete before
3277 * issuing a reset. */
3278 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3279 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3280 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3281 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3282 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3283 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3284 udelay(5);
3285
Michael Chanb090ae22006-01-23 16:07:10 -08003286 /* Wait for the firmware to tell us it is ok to issue a reset. */
3287 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3288
Michael Chanb6016b72005-05-26 13:03:09 -07003289 /* Deposit a driver reset signature so the firmware knows that
3290 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003291 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003292 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3293
Michael Chanb6016b72005-05-26 13:03:09 -07003294 /* Do a dummy read to force the chip to complete all current transaction
3295 * before we issue a reset. */
3296 val = REG_RD(bp, BNX2_MISC_ID);
3297
3298 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3299 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3300 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3301
3302 /* Chip reset. */
3303 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3304
3305 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3306 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3307 msleep(15);
3308
3309 /* Reset takes approximate 30 usec */
3310 for (i = 0; i < 10; i++) {
3311 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3312 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3313 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3314 break;
3315 }
3316 udelay(10);
3317 }
3318
3319 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3320 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3321 printk(KERN_ERR PFX "Chip reset did not complete\n");
3322 return -EBUSY;
3323 }
3324
3325 /* Make sure byte swapping is properly configured. */
3326 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3327 if (val != 0x01020304) {
3328 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3329 return -ENODEV;
3330 }
3331
Michael Chanb6016b72005-05-26 13:03:09 -07003332 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003333 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3334 if (rc)
3335 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003336
3337 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3338 /* Adjust the voltage regular to two steps lower. The default
3339 * of this register is 0x0000000e. */
3340 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3341
3342 /* Remove bad rbuf memory from the free pool. */
3343 rc = bnx2_alloc_bad_rbuf(bp);
3344 }
3345
3346 return rc;
3347}
3348
3349static int
3350bnx2_init_chip(struct bnx2 *bp)
3351{
3352 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003353 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003354
3355 /* Make sure the interrupt is not active. */
3356 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3357
3358 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3359 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3360#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003361 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003362#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003363 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003364 DMA_READ_CHANS << 12 |
3365 DMA_WRITE_CHANS << 16;
3366
3367 val |= (0x2 << 20) | (1 << 11);
3368
Michael Chandda1e392006-01-23 16:08:14 -08003369 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003370 val |= (1 << 23);
3371
3372 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3373 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3374 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3375
3376 REG_WR(bp, BNX2_DMA_CONFIG, val);
3377
3378 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3379 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3380 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3381 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3382 }
3383
3384 if (bp->flags & PCIX_FLAG) {
3385 u16 val16;
3386
3387 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3388 &val16);
3389 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3390 val16 & ~PCI_X_CMD_ERO);
3391 }
3392
3393 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3394 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3395 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3396 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3397
3398 /* Initialize context mapping and zero out the quick contexts. The
3399 * context block must have already been enabled. */
3400 bnx2_init_context(bp);
3401
Michael Chanfba9fe92006-06-12 22:21:25 -07003402 if ((rc = bnx2_init_cpus(bp)) != 0)
3403 return rc;
3404
Michael Chanb6016b72005-05-26 13:03:09 -07003405 bnx2_init_nvram(bp);
3406
3407 bnx2_set_mac_addr(bp);
3408
3409 val = REG_RD(bp, BNX2_MQ_CONFIG);
3410 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3411 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3412 REG_WR(bp, BNX2_MQ_CONFIG, val);
3413
3414 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3415 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3416 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3417
3418 val = (BCM_PAGE_BITS - 8) << 24;
3419 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3420
3421 /* Configure page size. */
3422 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3423 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3424 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3425 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3426
3427 val = bp->mac_addr[0] +
3428 (bp->mac_addr[1] << 8) +
3429 (bp->mac_addr[2] << 16) +
3430 bp->mac_addr[3] +
3431 (bp->mac_addr[4] << 8) +
3432 (bp->mac_addr[5] << 16);
3433 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3434
3435 /* Program the MTU. Also include 4 bytes for CRC32. */
3436 val = bp->dev->mtu + ETH_HLEN + 4;
3437 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3438 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3439 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3440
3441 bp->last_status_idx = 0;
3442 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3443
3444 /* Set up how to generate a link change interrupt. */
3445 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3446
3447 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3448 (u64) bp->status_blk_mapping & 0xffffffff);
3449 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3450
3451 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3452 (u64) bp->stats_blk_mapping & 0xffffffff);
3453 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3454 (u64) bp->stats_blk_mapping >> 32);
3455
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003456 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003457 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3458
3459 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3460 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3461
3462 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3463 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3464
3465 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3466
3467 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3468
3469 REG_WR(bp, BNX2_HC_COM_TICKS,
3470 (bp->com_ticks_int << 16) | bp->com_ticks);
3471
3472 REG_WR(bp, BNX2_HC_CMD_TICKS,
3473 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3474
3475 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3476 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3477
3478 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3479 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3480 else {
3481 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3482 BNX2_HC_CONFIG_TX_TMR_MODE |
3483 BNX2_HC_CONFIG_COLLECT_STATS);
3484 }
3485
3486 /* Clear internal stats counters. */
3487 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3488
3489 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3490
Michael Chane29054f2006-01-23 16:06:06 -08003491 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3492 BNX2_PORT_FEATURE_ASF_ENABLED)
3493 bp->flags |= ASF_ENABLE_FLAG;
3494
Michael Chanb6016b72005-05-26 13:03:09 -07003495 /* Initialize the receive filter. */
3496 bnx2_set_rx_mode(bp->dev);
3497
Michael Chanb090ae22006-01-23 16:07:10 -08003498 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3499 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003500
3501 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3502 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3503
3504 udelay(20);
3505
Michael Chanbf5295b2006-03-23 01:11:56 -08003506 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3507
Michael Chanb090ae22006-01-23 16:07:10 -08003508 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003509}
3510
3511
3512static void
3513bnx2_init_tx_ring(struct bnx2 *bp)
3514{
3515 struct tx_bd *txbd;
3516 u32 val;
3517
Michael Chan2f8af122006-08-15 01:39:10 -07003518 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3519
Michael Chanb6016b72005-05-26 13:03:09 -07003520 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003521
Michael Chanb6016b72005-05-26 13:03:09 -07003522 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3523 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3524
3525 bp->tx_prod = 0;
3526 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003527 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003528 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003529
Michael Chanb6016b72005-05-26 13:03:09 -07003530 val = BNX2_L2CTX_TYPE_TYPE_L2;
3531 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3532 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3533
3534 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3535 val |= 8 << 16;
3536 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3537
3538 val = (u64) bp->tx_desc_mapping >> 32;
3539 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3540
3541 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3542 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3543}
3544
3545static void
3546bnx2_init_rx_ring(struct bnx2 *bp)
3547{
3548 struct rx_bd *rxbd;
3549 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003550 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003551 u32 val;
3552
3553 /* 8 for CRC and VLAN */
3554 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3555 /* 8 for alignment */
3556 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3557
3558 ring_prod = prod = bp->rx_prod = 0;
3559 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003560 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003561 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003562
Michael Chan13daffa2006-03-20 17:49:20 -08003563 for (i = 0; i < bp->rx_max_ring; i++) {
3564 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003565
Michael Chan13daffa2006-03-20 17:49:20 -08003566 rxbd = &bp->rx_desc_ring[i][0];
3567 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3568 rxbd->rx_bd_len = bp->rx_buf_use_size;
3569 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3570 }
3571 if (i == (bp->rx_max_ring - 1))
3572 j = 0;
3573 else
3574 j = i + 1;
3575 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3576 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3577 0xffffffff;
3578 }
Michael Chanb6016b72005-05-26 13:03:09 -07003579
3580 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3581 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3582 val |= 0x02 << 8;
3583 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3584
Michael Chan13daffa2006-03-20 17:49:20 -08003585 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003586 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3587
Michael Chan13daffa2006-03-20 17:49:20 -08003588 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003589 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3590
Michael Chan236b6392006-03-20 17:49:02 -08003591 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003592 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3593 break;
3594 }
3595 prod = NEXT_RX_BD(prod);
3596 ring_prod = RX_RING_IDX(prod);
3597 }
3598 bp->rx_prod = prod;
3599
3600 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3601
3602 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3603}
3604
3605static void
Michael Chan13daffa2006-03-20 17:49:20 -08003606bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3607{
3608 u32 num_rings, max;
3609
3610 bp->rx_ring_size = size;
3611 num_rings = 1;
3612 while (size > MAX_RX_DESC_CNT) {
3613 size -= MAX_RX_DESC_CNT;
3614 num_rings++;
3615 }
3616 /* round to next power of 2 */
3617 max = MAX_RX_RINGS;
3618 while ((max & num_rings) == 0)
3619 max >>= 1;
3620
3621 if (num_rings != max)
3622 max <<= 1;
3623
3624 bp->rx_max_ring = max;
3625 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3626}
3627
3628static void
Michael Chanb6016b72005-05-26 13:03:09 -07003629bnx2_free_tx_skbs(struct bnx2 *bp)
3630{
3631 int i;
3632
3633 if (bp->tx_buf_ring == NULL)
3634 return;
3635
3636 for (i = 0; i < TX_DESC_CNT; ) {
3637 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3638 struct sk_buff *skb = tx_buf->skb;
3639 int j, last;
3640
3641 if (skb == NULL) {
3642 i++;
3643 continue;
3644 }
3645
3646 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3647 skb_headlen(skb), PCI_DMA_TODEVICE);
3648
3649 tx_buf->skb = NULL;
3650
3651 last = skb_shinfo(skb)->nr_frags;
3652 for (j = 0; j < last; j++) {
3653 tx_buf = &bp->tx_buf_ring[i + j + 1];
3654 pci_unmap_page(bp->pdev,
3655 pci_unmap_addr(tx_buf, mapping),
3656 skb_shinfo(skb)->frags[j].size,
3657 PCI_DMA_TODEVICE);
3658 }
Michael Chan745720e2006-06-29 12:37:41 -07003659 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003660 i += j + 1;
3661 }
3662
3663}
3664
3665static void
3666bnx2_free_rx_skbs(struct bnx2 *bp)
3667{
3668 int i;
3669
3670 if (bp->rx_buf_ring == NULL)
3671 return;
3672
Michael Chan13daffa2006-03-20 17:49:20 -08003673 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003674 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3675 struct sk_buff *skb = rx_buf->skb;
3676
Michael Chan05d0f1c2005-11-04 08:53:48 -08003677 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003678 continue;
3679
3680 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3681 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3682
3683 rx_buf->skb = NULL;
3684
Michael Chan745720e2006-06-29 12:37:41 -07003685 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003686 }
3687}
3688
3689static void
3690bnx2_free_skbs(struct bnx2 *bp)
3691{
3692 bnx2_free_tx_skbs(bp);
3693 bnx2_free_rx_skbs(bp);
3694}
3695
3696static int
3697bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3698{
3699 int rc;
3700
3701 rc = bnx2_reset_chip(bp, reset_code);
3702 bnx2_free_skbs(bp);
3703 if (rc)
3704 return rc;
3705
Michael Chanfba9fe92006-06-12 22:21:25 -07003706 if ((rc = bnx2_init_chip(bp)) != 0)
3707 return rc;
3708
Michael Chanb6016b72005-05-26 13:03:09 -07003709 bnx2_init_tx_ring(bp);
3710 bnx2_init_rx_ring(bp);
3711 return 0;
3712}
3713
3714static int
3715bnx2_init_nic(struct bnx2 *bp)
3716{
3717 int rc;
3718
3719 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3720 return rc;
3721
Michael Chan80be4432006-11-19 14:07:28 -08003722 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003723 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003724 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003725 bnx2_set_link(bp);
3726 return 0;
3727}
3728
3729static int
3730bnx2_test_registers(struct bnx2 *bp)
3731{
3732 int ret;
3733 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003734 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003735 u16 offset;
3736 u16 flags;
3737 u32 rw_mask;
3738 u32 ro_mask;
3739 } reg_tbl[] = {
3740 { 0x006c, 0, 0x00000000, 0x0000003f },
3741 { 0x0090, 0, 0xffffffff, 0x00000000 },
3742 { 0x0094, 0, 0x00000000, 0x00000000 },
3743
3744 { 0x0404, 0, 0x00003f00, 0x00000000 },
3745 { 0x0418, 0, 0x00000000, 0xffffffff },
3746 { 0x041c, 0, 0x00000000, 0xffffffff },
3747 { 0x0420, 0, 0x00000000, 0x80ffffff },
3748 { 0x0424, 0, 0x00000000, 0x00000000 },
3749 { 0x0428, 0, 0x00000000, 0x00000001 },
3750 { 0x0450, 0, 0x00000000, 0x0000ffff },
3751 { 0x0454, 0, 0x00000000, 0xffffffff },
3752 { 0x0458, 0, 0x00000000, 0xffffffff },
3753
3754 { 0x0808, 0, 0x00000000, 0xffffffff },
3755 { 0x0854, 0, 0x00000000, 0xffffffff },
3756 { 0x0868, 0, 0x00000000, 0x77777777 },
3757 { 0x086c, 0, 0x00000000, 0x77777777 },
3758 { 0x0870, 0, 0x00000000, 0x77777777 },
3759 { 0x0874, 0, 0x00000000, 0x77777777 },
3760
3761 { 0x0c00, 0, 0x00000000, 0x00000001 },
3762 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3763 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003764
3765 { 0x1000, 0, 0x00000000, 0x00000001 },
3766 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003767
3768 { 0x1408, 0, 0x01c00800, 0x00000000 },
3769 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3770 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003771 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003772 { 0x14b0, 0, 0x00000002, 0x00000001 },
3773 { 0x14b8, 0, 0x00000000, 0x00000000 },
3774 { 0x14c0, 0, 0x00000000, 0x00000009 },
3775 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3776 { 0x14cc, 0, 0x00000000, 0x00000001 },
3777 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003778
3779 { 0x1800, 0, 0x00000000, 0x00000001 },
3780 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003781
3782 { 0x2800, 0, 0x00000000, 0x00000001 },
3783 { 0x2804, 0, 0x00000000, 0x00003f01 },
3784 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3785 { 0x2810, 0, 0xffff0000, 0x00000000 },
3786 { 0x2814, 0, 0xffff0000, 0x00000000 },
3787 { 0x2818, 0, 0xffff0000, 0x00000000 },
3788 { 0x281c, 0, 0xffff0000, 0x00000000 },
3789 { 0x2834, 0, 0xffffffff, 0x00000000 },
3790 { 0x2840, 0, 0x00000000, 0xffffffff },
3791 { 0x2844, 0, 0x00000000, 0xffffffff },
3792 { 0x2848, 0, 0xffffffff, 0x00000000 },
3793 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3794
3795 { 0x2c00, 0, 0x00000000, 0x00000011 },
3796 { 0x2c04, 0, 0x00000000, 0x00030007 },
3797
Michael Chanb6016b72005-05-26 13:03:09 -07003798 { 0x3c00, 0, 0x00000000, 0x00000001 },
3799 { 0x3c04, 0, 0x00000000, 0x00070000 },
3800 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3801 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3802 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3803 { 0x3c14, 0, 0x00000000, 0xffffffff },
3804 { 0x3c18, 0, 0x00000000, 0xffffffff },
3805 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3806 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003807
3808 { 0x5004, 0, 0x00000000, 0x0000007f },
3809 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3810 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3811
Michael Chanb6016b72005-05-26 13:03:09 -07003812 { 0x5c00, 0, 0x00000000, 0x00000001 },
3813 { 0x5c04, 0, 0x00000000, 0x0003000f },
3814 { 0x5c08, 0, 0x00000003, 0x00000000 },
3815 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3816 { 0x5c10, 0, 0x00000000, 0xffffffff },
3817 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3818 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3819 { 0x5c88, 0, 0x00000000, 0x00077373 },
3820 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3821
3822 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3823 { 0x680c, 0, 0xffffffff, 0x00000000 },
3824 { 0x6810, 0, 0xffffffff, 0x00000000 },
3825 { 0x6814, 0, 0xffffffff, 0x00000000 },
3826 { 0x6818, 0, 0xffffffff, 0x00000000 },
3827 { 0x681c, 0, 0xffffffff, 0x00000000 },
3828 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3829 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3830 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3831 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3832 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3833 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3834 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3835 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3836 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3837 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3838 { 0x684c, 0, 0xffffffff, 0x00000000 },
3839 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3840 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3841 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3842 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3843 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3844 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3845
3846 { 0xffff, 0, 0x00000000, 0x00000000 },
3847 };
3848
3849 ret = 0;
3850 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3851 u32 offset, rw_mask, ro_mask, save_val, val;
3852
3853 offset = (u32) reg_tbl[i].offset;
3854 rw_mask = reg_tbl[i].rw_mask;
3855 ro_mask = reg_tbl[i].ro_mask;
3856
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003857 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003858
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003859 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003860
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003861 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003862 if ((val & rw_mask) != 0) {
3863 goto reg_test_err;
3864 }
3865
3866 if ((val & ro_mask) != (save_val & ro_mask)) {
3867 goto reg_test_err;
3868 }
3869
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003870 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003871
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003872 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003873 if ((val & rw_mask) != rw_mask) {
3874 goto reg_test_err;
3875 }
3876
3877 if ((val & ro_mask) != (save_val & ro_mask)) {
3878 goto reg_test_err;
3879 }
3880
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003881 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003882 continue;
3883
3884reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003885 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003886 ret = -ENODEV;
3887 break;
3888 }
3889 return ret;
3890}
3891
3892static int
3893bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3894{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003895 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003896 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3897 int i;
3898
3899 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3900 u32 offset;
3901
3902 for (offset = 0; offset < size; offset += 4) {
3903
3904 REG_WR_IND(bp, start + offset, test_pattern[i]);
3905
3906 if (REG_RD_IND(bp, start + offset) !=
3907 test_pattern[i]) {
3908 return -ENODEV;
3909 }
3910 }
3911 }
3912 return 0;
3913}
3914
3915static int
3916bnx2_test_memory(struct bnx2 *bp)
3917{
3918 int ret = 0;
3919 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003920 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003921 u32 offset;
3922 u32 len;
3923 } mem_tbl[] = {
3924 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003925 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003926 { 0xe0000, 0x4000 },
3927 { 0x120000, 0x4000 },
3928 { 0x1a0000, 0x4000 },
3929 { 0x160000, 0x4000 },
3930 { 0xffffffff, 0 },
3931 };
3932
3933 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3934 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3935 mem_tbl[i].len)) != 0) {
3936 return ret;
3937 }
3938 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003939
Michael Chanb6016b72005-05-26 13:03:09 -07003940 return ret;
3941}
3942
Michael Chanbc5a0692006-01-23 16:13:22 -08003943#define BNX2_MAC_LOOPBACK 0
3944#define BNX2_PHY_LOOPBACK 1
3945
Michael Chanb6016b72005-05-26 13:03:09 -07003946static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003947bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003948{
3949 unsigned int pkt_size, num_pkts, i;
3950 struct sk_buff *skb, *rx_skb;
3951 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003952 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003953 dma_addr_t map;
3954 struct tx_bd *txbd;
3955 struct sw_bd *rx_buf;
3956 struct l2_fhdr *rx_hdr;
3957 int ret = -ENODEV;
3958
Michael Chanbc5a0692006-01-23 16:13:22 -08003959 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3960 bp->loopback = MAC_LOOPBACK;
3961 bnx2_set_mac_loopback(bp);
3962 }
3963 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003964 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003965 bnx2_set_phy_loopback(bp);
3966 }
3967 else
3968 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003969
3970 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07003971 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003972 if (!skb)
3973 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003974 packet = skb_put(skb, pkt_size);
3975 memcpy(packet, bp->mac_addr, 6);
3976 memset(packet + 6, 0x0, 8);
3977 for (i = 14; i < pkt_size; i++)
3978 packet[i] = (unsigned char) (i & 0xff);
3979
3980 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3981 PCI_DMA_TODEVICE);
3982
Michael Chanbf5295b2006-03-23 01:11:56 -08003983 REG_WR(bp, BNX2_HC_COMMAND,
3984 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3985
Michael Chanb6016b72005-05-26 13:03:09 -07003986 REG_RD(bp, BNX2_HC_COMMAND);
3987
3988 udelay(5);
3989 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3990
Michael Chanb6016b72005-05-26 13:03:09 -07003991 num_pkts = 0;
3992
Michael Chanbc5a0692006-01-23 16:13:22 -08003993 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07003994
3995 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3996 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3997 txbd->tx_bd_mss_nbytes = pkt_size;
3998 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3999
4000 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004001 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4002 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004003
Michael Chanbc5a0692006-01-23 16:13:22 -08004004 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4005 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004006
4007 udelay(100);
4008
Michael Chanbf5295b2006-03-23 01:11:56 -08004009 REG_WR(bp, BNX2_HC_COMMAND,
4010 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4011
Michael Chanb6016b72005-05-26 13:03:09 -07004012 REG_RD(bp, BNX2_HC_COMMAND);
4013
4014 udelay(5);
4015
4016 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004017 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004018
Michael Chanbc5a0692006-01-23 16:13:22 -08004019 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004020 goto loopback_test_done;
4021 }
4022
4023 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4024 if (rx_idx != rx_start_idx + num_pkts) {
4025 goto loopback_test_done;
4026 }
4027
4028 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4029 rx_skb = rx_buf->skb;
4030
4031 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4032 skb_reserve(rx_skb, bp->rx_offset);
4033
4034 pci_dma_sync_single_for_cpu(bp->pdev,
4035 pci_unmap_addr(rx_buf, mapping),
4036 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4037
Michael Chanade2bfe2006-01-23 16:09:51 -08004038 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004039 (L2_FHDR_ERRORS_BAD_CRC |
4040 L2_FHDR_ERRORS_PHY_DECODE |
4041 L2_FHDR_ERRORS_ALIGNMENT |
4042 L2_FHDR_ERRORS_TOO_SHORT |
4043 L2_FHDR_ERRORS_GIANT_FRAME)) {
4044
4045 goto loopback_test_done;
4046 }
4047
4048 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4049 goto loopback_test_done;
4050 }
4051
4052 for (i = 14; i < pkt_size; i++) {
4053 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4054 goto loopback_test_done;
4055 }
4056 }
4057
4058 ret = 0;
4059
4060loopback_test_done:
4061 bp->loopback = 0;
4062 return ret;
4063}
4064
Michael Chanbc5a0692006-01-23 16:13:22 -08004065#define BNX2_MAC_LOOPBACK_FAILED 1
4066#define BNX2_PHY_LOOPBACK_FAILED 2
4067#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4068 BNX2_PHY_LOOPBACK_FAILED)
4069
4070static int
4071bnx2_test_loopback(struct bnx2 *bp)
4072{
4073 int rc = 0;
4074
4075 if (!netif_running(bp->dev))
4076 return BNX2_LOOPBACK_FAILED;
4077
4078 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4079 spin_lock_bh(&bp->phy_lock);
4080 bnx2_init_phy(bp);
4081 spin_unlock_bh(&bp->phy_lock);
4082 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4083 rc |= BNX2_MAC_LOOPBACK_FAILED;
4084 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4085 rc |= BNX2_PHY_LOOPBACK_FAILED;
4086 return rc;
4087}
4088
Michael Chanb6016b72005-05-26 13:03:09 -07004089#define NVRAM_SIZE 0x200
4090#define CRC32_RESIDUAL 0xdebb20e3
4091
4092static int
4093bnx2_test_nvram(struct bnx2 *bp)
4094{
4095 u32 buf[NVRAM_SIZE / 4];
4096 u8 *data = (u8 *) buf;
4097 int rc = 0;
4098 u32 magic, csum;
4099
4100 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4101 goto test_nvram_done;
4102
4103 magic = be32_to_cpu(buf[0]);
4104 if (magic != 0x669955aa) {
4105 rc = -ENODEV;
4106 goto test_nvram_done;
4107 }
4108
4109 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4110 goto test_nvram_done;
4111
4112 csum = ether_crc_le(0x100, data);
4113 if (csum != CRC32_RESIDUAL) {
4114 rc = -ENODEV;
4115 goto test_nvram_done;
4116 }
4117
4118 csum = ether_crc_le(0x100, data + 0x100);
4119 if (csum != CRC32_RESIDUAL) {
4120 rc = -ENODEV;
4121 }
4122
4123test_nvram_done:
4124 return rc;
4125}
4126
4127static int
4128bnx2_test_link(struct bnx2 *bp)
4129{
4130 u32 bmsr;
4131
Michael Chanc770a652005-08-25 15:38:39 -07004132 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004133 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4134 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004135 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004136
Michael Chanb6016b72005-05-26 13:03:09 -07004137 if (bmsr & BMSR_LSTATUS) {
4138 return 0;
4139 }
4140 return -ENODEV;
4141}
4142
4143static int
4144bnx2_test_intr(struct bnx2 *bp)
4145{
4146 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004147 u16 status_idx;
4148
4149 if (!netif_running(bp->dev))
4150 return -ENODEV;
4151
4152 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4153
4154 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004155 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004156 REG_RD(bp, BNX2_HC_COMMAND);
4157
4158 for (i = 0; i < 10; i++) {
4159 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4160 status_idx) {
4161
4162 break;
4163 }
4164
4165 msleep_interruptible(10);
4166 }
4167 if (i < 10)
4168 return 0;
4169
4170 return -ENODEV;
4171}
4172
4173static void
Michael Chan48b01e22006-11-19 14:08:00 -08004174bnx2_5706_serdes_timer(struct bnx2 *bp)
4175{
4176 spin_lock(&bp->phy_lock);
4177 if (bp->serdes_an_pending)
4178 bp->serdes_an_pending--;
4179 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4180 u32 bmcr;
4181
4182 bp->current_interval = bp->timer_interval;
4183
4184 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4185
4186 if (bmcr & BMCR_ANENABLE) {
4187 u32 phy1, phy2;
4188
4189 bnx2_write_phy(bp, 0x1c, 0x7c00);
4190 bnx2_read_phy(bp, 0x1c, &phy1);
4191
4192 bnx2_write_phy(bp, 0x17, 0x0f01);
4193 bnx2_read_phy(bp, 0x15, &phy2);
4194 bnx2_write_phy(bp, 0x17, 0x0f01);
4195 bnx2_read_phy(bp, 0x15, &phy2);
4196
4197 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4198 !(phy2 & 0x20)) { /* no CONFIG */
4199
4200 bmcr &= ~BMCR_ANENABLE;
4201 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4202 bnx2_write_phy(bp, MII_BMCR, bmcr);
4203 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4204 }
4205 }
4206 }
4207 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4208 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4209 u32 phy2;
4210
4211 bnx2_write_phy(bp, 0x17, 0x0f01);
4212 bnx2_read_phy(bp, 0x15, &phy2);
4213 if (phy2 & 0x20) {
4214 u32 bmcr;
4215
4216 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4217 bmcr |= BMCR_ANENABLE;
4218 bnx2_write_phy(bp, MII_BMCR, bmcr);
4219
4220 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4221 }
4222 } else
4223 bp->current_interval = bp->timer_interval;
4224
4225 spin_unlock(&bp->phy_lock);
4226}
4227
4228static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004229bnx2_5708_serdes_timer(struct bnx2 *bp)
4230{
4231 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4232 bp->serdes_an_pending = 0;
4233 return;
4234 }
4235
4236 spin_lock(&bp->phy_lock);
4237 if (bp->serdes_an_pending)
4238 bp->serdes_an_pending--;
4239 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4240 u32 bmcr;
4241
4242 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4243
4244 if (bmcr & BMCR_ANENABLE) {
4245 bmcr &= ~BMCR_ANENABLE;
4246 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4247 bnx2_write_phy(bp, MII_BMCR, bmcr);
4248 bp->current_interval = SERDES_FORCED_TIMEOUT;
4249 } else {
4250 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4251 bmcr |= BMCR_ANENABLE;
4252 bnx2_write_phy(bp, MII_BMCR, bmcr);
4253 bp->serdes_an_pending = 2;
4254 bp->current_interval = bp->timer_interval;
4255 }
4256
4257 } else
4258 bp->current_interval = bp->timer_interval;
4259
4260 spin_unlock(&bp->phy_lock);
4261}
4262
4263static void
Michael Chanb6016b72005-05-26 13:03:09 -07004264bnx2_timer(unsigned long data)
4265{
4266 struct bnx2 *bp = (struct bnx2 *) data;
4267 u32 msg;
4268
Michael Chancd339a02005-08-25 15:35:24 -07004269 if (!netif_running(bp->dev))
4270 return;
4271
Michael Chanb6016b72005-05-26 13:03:09 -07004272 if (atomic_read(&bp->intr_sem) != 0)
4273 goto bnx2_restart_timer;
4274
4275 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004276 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004277
Michael Chancea94db2006-06-12 22:16:13 -07004278 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4279
Michael Chanf8dd0642006-11-19 14:08:29 -08004280 if (bp->phy_flags & PHY_SERDES_FLAG) {
4281 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4282 bnx2_5706_serdes_timer(bp);
4283 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4284 bnx2_5708_serdes_timer(bp);
4285 }
Michael Chanb6016b72005-05-26 13:03:09 -07004286
4287bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004288 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004289}
4290
4291/* Called with rtnl_lock */
4292static int
4293bnx2_open(struct net_device *dev)
4294{
Michael Chan972ec0d2006-01-23 16:12:43 -08004295 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004296 int rc;
4297
Pavel Machek829ca9a2005-09-03 15:56:56 -07004298 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004299 bnx2_disable_int(bp);
4300
4301 rc = bnx2_alloc_mem(bp);
4302 if (rc)
4303 return rc;
4304
4305 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4306 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4307 !disable_msi) {
4308
4309 if (pci_enable_msi(bp->pdev) == 0) {
4310 bp->flags |= USING_MSI_FLAG;
4311 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4312 dev);
4313 }
4314 else {
4315 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004316 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004317 }
4318 }
4319 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004320 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004321 dev->name, dev);
4322 }
4323 if (rc) {
4324 bnx2_free_mem(bp);
4325 return rc;
4326 }
4327
4328 rc = bnx2_init_nic(bp);
4329
4330 if (rc) {
4331 free_irq(bp->pdev->irq, dev);
4332 if (bp->flags & USING_MSI_FLAG) {
4333 pci_disable_msi(bp->pdev);
4334 bp->flags &= ~USING_MSI_FLAG;
4335 }
4336 bnx2_free_skbs(bp);
4337 bnx2_free_mem(bp);
4338 return rc;
4339 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004340
Michael Chancd339a02005-08-25 15:35:24 -07004341 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004342
4343 atomic_set(&bp->intr_sem, 0);
4344
4345 bnx2_enable_int(bp);
4346
4347 if (bp->flags & USING_MSI_FLAG) {
4348 /* Test MSI to make sure it is working
4349 * If MSI test fails, go back to INTx mode
4350 */
4351 if (bnx2_test_intr(bp) != 0) {
4352 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4353 " using MSI, switching to INTx mode. Please"
4354 " report this failure to the PCI maintainer"
4355 " and include system chipset information.\n",
4356 bp->dev->name);
4357
4358 bnx2_disable_int(bp);
4359 free_irq(bp->pdev->irq, dev);
4360 pci_disable_msi(bp->pdev);
4361 bp->flags &= ~USING_MSI_FLAG;
4362
4363 rc = bnx2_init_nic(bp);
4364
4365 if (!rc) {
4366 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004367 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004368 }
4369 if (rc) {
4370 bnx2_free_skbs(bp);
4371 bnx2_free_mem(bp);
4372 del_timer_sync(&bp->timer);
4373 return rc;
4374 }
4375 bnx2_enable_int(bp);
4376 }
4377 }
4378 if (bp->flags & USING_MSI_FLAG) {
4379 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4380 }
4381
4382 netif_start_queue(dev);
4383
4384 return 0;
4385}
4386
4387static void
4388bnx2_reset_task(void *data)
4389{
4390 struct bnx2 *bp = data;
4391
Michael Chanafdc08b2005-08-25 15:34:29 -07004392 if (!netif_running(bp->dev))
4393 return;
4394
4395 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004396 bnx2_netif_stop(bp);
4397
4398 bnx2_init_nic(bp);
4399
4400 atomic_set(&bp->intr_sem, 1);
4401 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004402 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004403}
4404
4405static void
4406bnx2_tx_timeout(struct net_device *dev)
4407{
Michael Chan972ec0d2006-01-23 16:12:43 -08004408 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004409
4410 /* This allows the netif to be shutdown gracefully before resetting */
4411 schedule_work(&bp->reset_task);
4412}
4413
4414#ifdef BCM_VLAN
4415/* Called with rtnl_lock */
4416static void
4417bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4418{
Michael Chan972ec0d2006-01-23 16:12:43 -08004419 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004420
4421 bnx2_netif_stop(bp);
4422
4423 bp->vlgrp = vlgrp;
4424 bnx2_set_rx_mode(dev);
4425
4426 bnx2_netif_start(bp);
4427}
4428
4429/* Called with rtnl_lock */
4430static void
4431bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4432{
Michael Chan972ec0d2006-01-23 16:12:43 -08004433 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004434
4435 bnx2_netif_stop(bp);
4436
4437 if (bp->vlgrp)
4438 bp->vlgrp->vlan_devices[vid] = NULL;
4439 bnx2_set_rx_mode(dev);
4440
4441 bnx2_netif_start(bp);
4442}
4443#endif
4444
Herbert Xu932ff272006-06-09 12:20:56 -07004445/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004446 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4447 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004448 */
4449static int
4450bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4451{
Michael Chan972ec0d2006-01-23 16:12:43 -08004452 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004453 dma_addr_t mapping;
4454 struct tx_bd *txbd;
4455 struct sw_bd *tx_buf;
4456 u32 len, vlan_tag_flags, last_frag, mss;
4457 u16 prod, ring_prod;
4458 int i;
4459
Michael Chane89bbf12005-08-25 15:36:58 -07004460 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004461 netif_stop_queue(dev);
4462 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4463 dev->name);
4464
4465 return NETDEV_TX_BUSY;
4466 }
4467 len = skb_headlen(skb);
4468 prod = bp->tx_prod;
4469 ring_prod = TX_RING_IDX(prod);
4470
4471 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004472 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004473 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4474 }
4475
4476 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4477 vlan_tag_flags |=
4478 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4479 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004480#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004481 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004482 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4483 u32 tcp_opt_len, ip_tcp_len;
4484
4485 if (skb_header_cloned(skb) &&
4486 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4487 dev_kfree_skb(skb);
4488 return NETDEV_TX_OK;
4489 }
4490
4491 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4492 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4493
4494 tcp_opt_len = 0;
4495 if (skb->h.th->doff > 5) {
4496 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4497 }
4498 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4499
4500 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004501 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004502 skb->h.th->check =
4503 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4504 skb->nh.iph->daddr,
4505 0, IPPROTO_TCP, 0);
4506
4507 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4508 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4509 (tcp_opt_len >> 2)) << 8;
4510 }
4511 }
4512 else
4513#endif
4514 {
4515 mss = 0;
4516 }
4517
4518 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004519
Michael Chanb6016b72005-05-26 13:03:09 -07004520 tx_buf = &bp->tx_buf_ring[ring_prod];
4521 tx_buf->skb = skb;
4522 pci_unmap_addr_set(tx_buf, mapping, mapping);
4523
4524 txbd = &bp->tx_desc_ring[ring_prod];
4525
4526 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4527 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4528 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4529 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4530
4531 last_frag = skb_shinfo(skb)->nr_frags;
4532
4533 for (i = 0; i < last_frag; i++) {
4534 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4535
4536 prod = NEXT_TX_BD(prod);
4537 ring_prod = TX_RING_IDX(prod);
4538 txbd = &bp->tx_desc_ring[ring_prod];
4539
4540 len = frag->size;
4541 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4542 len, PCI_DMA_TODEVICE);
4543 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4544 mapping, mapping);
4545
4546 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4547 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4548 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4549 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4550
4551 }
4552 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4553
4554 prod = NEXT_TX_BD(prod);
4555 bp->tx_prod_bseq += skb->len;
4556
Michael Chanb6016b72005-05-26 13:03:09 -07004557 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4558 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4559
4560 mmiowb();
4561
4562 bp->tx_prod = prod;
4563 dev->trans_start = jiffies;
4564
Michael Chane89bbf12005-08-25 15:36:58 -07004565 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004566 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004567 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004568 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004569 }
4570
4571 return NETDEV_TX_OK;
4572}
4573
4574/* Called with rtnl_lock */
4575static int
4576bnx2_close(struct net_device *dev)
4577{
Michael Chan972ec0d2006-01-23 16:12:43 -08004578 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004579 u32 reset_code;
4580
Michael Chanafdc08b2005-08-25 15:34:29 -07004581 /* Calling flush_scheduled_work() may deadlock because
4582 * linkwatch_event() may be on the workqueue and it will try to get
4583 * the rtnl_lock which we are holding.
4584 */
4585 while (bp->in_reset_task)
4586 msleep(1);
4587
Michael Chanb6016b72005-05-26 13:03:09 -07004588 bnx2_netif_stop(bp);
4589 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004590 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004591 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004592 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004593 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4594 else
4595 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4596 bnx2_reset_chip(bp, reset_code);
4597 free_irq(bp->pdev->irq, dev);
4598 if (bp->flags & USING_MSI_FLAG) {
4599 pci_disable_msi(bp->pdev);
4600 bp->flags &= ~USING_MSI_FLAG;
4601 }
4602 bnx2_free_skbs(bp);
4603 bnx2_free_mem(bp);
4604 bp->link_up = 0;
4605 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004606 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004607 return 0;
4608}
4609
4610#define GET_NET_STATS64(ctr) \
4611 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4612 (unsigned long) (ctr##_lo)
4613
4614#define GET_NET_STATS32(ctr) \
4615 (ctr##_lo)
4616
4617#if (BITS_PER_LONG == 64)
4618#define GET_NET_STATS GET_NET_STATS64
4619#else
4620#define GET_NET_STATS GET_NET_STATS32
4621#endif
4622
4623static struct net_device_stats *
4624bnx2_get_stats(struct net_device *dev)
4625{
Michael Chan972ec0d2006-01-23 16:12:43 -08004626 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004627 struct statistics_block *stats_blk = bp->stats_blk;
4628 struct net_device_stats *net_stats = &bp->net_stats;
4629
4630 if (bp->stats_blk == NULL) {
4631 return net_stats;
4632 }
4633 net_stats->rx_packets =
4634 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4635 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4636 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4637
4638 net_stats->tx_packets =
4639 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4640 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4641 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4642
4643 net_stats->rx_bytes =
4644 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4645
4646 net_stats->tx_bytes =
4647 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4648
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004649 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004650 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4651
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004652 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004653 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4654
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004655 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004656 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4657 stats_blk->stat_EtherStatsOverrsizePkts);
4658
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004659 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004660 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4661
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004662 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004663 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4664
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004665 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004666 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4667
4668 net_stats->rx_errors = net_stats->rx_length_errors +
4669 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4670 net_stats->rx_crc_errors;
4671
4672 net_stats->tx_aborted_errors =
4673 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4674 stats_blk->stat_Dot3StatsLateCollisions);
4675
Michael Chan5b0c76a2005-11-04 08:45:49 -08004676 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4677 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004678 net_stats->tx_carrier_errors = 0;
4679 else {
4680 net_stats->tx_carrier_errors =
4681 (unsigned long)
4682 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4683 }
4684
4685 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004686 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004687 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4688 +
4689 net_stats->tx_aborted_errors +
4690 net_stats->tx_carrier_errors;
4691
Michael Chancea94db2006-06-12 22:16:13 -07004692 net_stats->rx_missed_errors =
4693 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4694 stats_blk->stat_FwRxDrop);
4695
Michael Chanb6016b72005-05-26 13:03:09 -07004696 return net_stats;
4697}
4698
4699/* All ethtool functions called with rtnl_lock */
4700
4701static int
4702bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4703{
Michael Chan972ec0d2006-01-23 16:12:43 -08004704 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004705
4706 cmd->supported = SUPPORTED_Autoneg;
4707 if (bp->phy_flags & PHY_SERDES_FLAG) {
4708 cmd->supported |= SUPPORTED_1000baseT_Full |
4709 SUPPORTED_FIBRE;
4710
4711 cmd->port = PORT_FIBRE;
4712 }
4713 else {
4714 cmd->supported |= SUPPORTED_10baseT_Half |
4715 SUPPORTED_10baseT_Full |
4716 SUPPORTED_100baseT_Half |
4717 SUPPORTED_100baseT_Full |
4718 SUPPORTED_1000baseT_Full |
4719 SUPPORTED_TP;
4720
4721 cmd->port = PORT_TP;
4722 }
4723
4724 cmd->advertising = bp->advertising;
4725
4726 if (bp->autoneg & AUTONEG_SPEED) {
4727 cmd->autoneg = AUTONEG_ENABLE;
4728 }
4729 else {
4730 cmd->autoneg = AUTONEG_DISABLE;
4731 }
4732
4733 if (netif_carrier_ok(dev)) {
4734 cmd->speed = bp->line_speed;
4735 cmd->duplex = bp->duplex;
4736 }
4737 else {
4738 cmd->speed = -1;
4739 cmd->duplex = -1;
4740 }
4741
4742 cmd->transceiver = XCVR_INTERNAL;
4743 cmd->phy_address = bp->phy_addr;
4744
4745 return 0;
4746}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004747
Michael Chanb6016b72005-05-26 13:03:09 -07004748static int
4749bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4750{
Michael Chan972ec0d2006-01-23 16:12:43 -08004751 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004752 u8 autoneg = bp->autoneg;
4753 u8 req_duplex = bp->req_duplex;
4754 u16 req_line_speed = bp->req_line_speed;
4755 u32 advertising = bp->advertising;
4756
4757 if (cmd->autoneg == AUTONEG_ENABLE) {
4758 autoneg |= AUTONEG_SPEED;
4759
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004760 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004761
4762 /* allow advertising 1 speed */
4763 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4764 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4765 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4766 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4767
4768 if (bp->phy_flags & PHY_SERDES_FLAG)
4769 return -EINVAL;
4770
4771 advertising = cmd->advertising;
4772
4773 }
4774 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4775 advertising = cmd->advertising;
4776 }
4777 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4778 return -EINVAL;
4779 }
4780 else {
4781 if (bp->phy_flags & PHY_SERDES_FLAG) {
4782 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4783 }
4784 else {
4785 advertising = ETHTOOL_ALL_COPPER_SPEED;
4786 }
4787 }
4788 advertising |= ADVERTISED_Autoneg;
4789 }
4790 else {
4791 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004792 if ((cmd->speed != SPEED_1000 &&
4793 cmd->speed != SPEED_2500) ||
4794 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004795 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004796
4797 if (cmd->speed == SPEED_2500 &&
4798 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4799 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004800 }
4801 else if (cmd->speed == SPEED_1000) {
4802 return -EINVAL;
4803 }
4804 autoneg &= ~AUTONEG_SPEED;
4805 req_line_speed = cmd->speed;
4806 req_duplex = cmd->duplex;
4807 advertising = 0;
4808 }
4809
4810 bp->autoneg = autoneg;
4811 bp->advertising = advertising;
4812 bp->req_line_speed = req_line_speed;
4813 bp->req_duplex = req_duplex;
4814
Michael Chanc770a652005-08-25 15:38:39 -07004815 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004816
4817 bnx2_setup_phy(bp);
4818
Michael Chanc770a652005-08-25 15:38:39 -07004819 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004820
4821 return 0;
4822}
4823
4824static void
4825bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4826{
Michael Chan972ec0d2006-01-23 16:12:43 -08004827 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004828
4829 strcpy(info->driver, DRV_MODULE_NAME);
4830 strcpy(info->version, DRV_MODULE_VERSION);
4831 strcpy(info->bus_info, pci_name(bp->pdev));
4832 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4833 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4834 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004835 info->fw_version[1] = info->fw_version[3] = '.';
4836 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004837}
4838
Michael Chan244ac4f2006-03-20 17:48:46 -08004839#define BNX2_REGDUMP_LEN (32 * 1024)
4840
4841static int
4842bnx2_get_regs_len(struct net_device *dev)
4843{
4844 return BNX2_REGDUMP_LEN;
4845}
4846
4847static void
4848bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4849{
4850 u32 *p = _p, i, offset;
4851 u8 *orig_p = _p;
4852 struct bnx2 *bp = netdev_priv(dev);
4853 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4854 0x0800, 0x0880, 0x0c00, 0x0c10,
4855 0x0c30, 0x0d08, 0x1000, 0x101c,
4856 0x1040, 0x1048, 0x1080, 0x10a4,
4857 0x1400, 0x1490, 0x1498, 0x14f0,
4858 0x1500, 0x155c, 0x1580, 0x15dc,
4859 0x1600, 0x1658, 0x1680, 0x16d8,
4860 0x1800, 0x1820, 0x1840, 0x1854,
4861 0x1880, 0x1894, 0x1900, 0x1984,
4862 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4863 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4864 0x2000, 0x2030, 0x23c0, 0x2400,
4865 0x2800, 0x2820, 0x2830, 0x2850,
4866 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4867 0x3c00, 0x3c94, 0x4000, 0x4010,
4868 0x4080, 0x4090, 0x43c0, 0x4458,
4869 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4870 0x4fc0, 0x5010, 0x53c0, 0x5444,
4871 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4872 0x5fc0, 0x6000, 0x6400, 0x6428,
4873 0x6800, 0x6848, 0x684c, 0x6860,
4874 0x6888, 0x6910, 0x8000 };
4875
4876 regs->version = 0;
4877
4878 memset(p, 0, BNX2_REGDUMP_LEN);
4879
4880 if (!netif_running(bp->dev))
4881 return;
4882
4883 i = 0;
4884 offset = reg_boundaries[0];
4885 p += offset;
4886 while (offset < BNX2_REGDUMP_LEN) {
4887 *p++ = REG_RD(bp, offset);
4888 offset += 4;
4889 if (offset == reg_boundaries[i + 1]) {
4890 offset = reg_boundaries[i + 2];
4891 p = (u32 *) (orig_p + offset);
4892 i += 2;
4893 }
4894 }
4895}
4896
Michael Chanb6016b72005-05-26 13:03:09 -07004897static void
4898bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4899{
Michael Chan972ec0d2006-01-23 16:12:43 -08004900 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004901
4902 if (bp->flags & NO_WOL_FLAG) {
4903 wol->supported = 0;
4904 wol->wolopts = 0;
4905 }
4906 else {
4907 wol->supported = WAKE_MAGIC;
4908 if (bp->wol)
4909 wol->wolopts = WAKE_MAGIC;
4910 else
4911 wol->wolopts = 0;
4912 }
4913 memset(&wol->sopass, 0, sizeof(wol->sopass));
4914}
4915
4916static int
4917bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4918{
Michael Chan972ec0d2006-01-23 16:12:43 -08004919 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004920
4921 if (wol->wolopts & ~WAKE_MAGIC)
4922 return -EINVAL;
4923
4924 if (wol->wolopts & WAKE_MAGIC) {
4925 if (bp->flags & NO_WOL_FLAG)
4926 return -EINVAL;
4927
4928 bp->wol = 1;
4929 }
4930 else {
4931 bp->wol = 0;
4932 }
4933 return 0;
4934}
4935
4936static int
4937bnx2_nway_reset(struct net_device *dev)
4938{
Michael Chan972ec0d2006-01-23 16:12:43 -08004939 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004940 u32 bmcr;
4941
4942 if (!(bp->autoneg & AUTONEG_SPEED)) {
4943 return -EINVAL;
4944 }
4945
Michael Chanc770a652005-08-25 15:38:39 -07004946 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004947
4948 /* Force a link down visible on the other side */
4949 if (bp->phy_flags & PHY_SERDES_FLAG) {
4950 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004951 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004952
4953 msleep(20);
4954
Michael Chanc770a652005-08-25 15:38:39 -07004955 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004956
4957 bp->current_interval = SERDES_AN_TIMEOUT;
4958 bp->serdes_an_pending = 1;
4959 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004960 }
4961
4962 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4963 bmcr &= ~BMCR_LOOPBACK;
4964 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4965
Michael Chanc770a652005-08-25 15:38:39 -07004966 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004967
4968 return 0;
4969}
4970
4971static int
4972bnx2_get_eeprom_len(struct net_device *dev)
4973{
Michael Chan972ec0d2006-01-23 16:12:43 -08004974 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004975
Michael Chan1122db72006-01-23 16:11:42 -08004976 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004977 return 0;
4978
Michael Chan1122db72006-01-23 16:11:42 -08004979 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004980}
4981
4982static int
4983bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4984 u8 *eebuf)
4985{
Michael Chan972ec0d2006-01-23 16:12:43 -08004986 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004987 int rc;
4988
John W. Linville1064e942005-11-10 12:58:24 -08004989 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004990
4991 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4992
4993 return rc;
4994}
4995
4996static int
4997bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4998 u8 *eebuf)
4999{
Michael Chan972ec0d2006-01-23 16:12:43 -08005000 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005001 int rc;
5002
John W. Linville1064e942005-11-10 12:58:24 -08005003 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005004
5005 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5006
5007 return rc;
5008}
5009
5010static int
5011bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5012{
Michael Chan972ec0d2006-01-23 16:12:43 -08005013 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005014
5015 memset(coal, 0, sizeof(struct ethtool_coalesce));
5016
5017 coal->rx_coalesce_usecs = bp->rx_ticks;
5018 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5019 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5020 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5021
5022 coal->tx_coalesce_usecs = bp->tx_ticks;
5023 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5024 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5025 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5026
5027 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5028
5029 return 0;
5030}
5031
5032static int
5033bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5034{
Michael Chan972ec0d2006-01-23 16:12:43 -08005035 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005036
5037 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5038 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5039
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005040 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005041 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5042
5043 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5044 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5045
5046 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5047 if (bp->rx_quick_cons_trip_int > 0xff)
5048 bp->rx_quick_cons_trip_int = 0xff;
5049
5050 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5051 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5052
5053 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5054 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5055
5056 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5057 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5058
5059 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5060 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5061 0xff;
5062
5063 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5064 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5065 bp->stats_ticks &= 0xffff00;
5066
5067 if (netif_running(bp->dev)) {
5068 bnx2_netif_stop(bp);
5069 bnx2_init_nic(bp);
5070 bnx2_netif_start(bp);
5071 }
5072
5073 return 0;
5074}
5075
5076static void
5077bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5078{
Michael Chan972ec0d2006-01-23 16:12:43 -08005079 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005080
Michael Chan13daffa2006-03-20 17:49:20 -08005081 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005082 ering->rx_mini_max_pending = 0;
5083 ering->rx_jumbo_max_pending = 0;
5084
5085 ering->rx_pending = bp->rx_ring_size;
5086 ering->rx_mini_pending = 0;
5087 ering->rx_jumbo_pending = 0;
5088
5089 ering->tx_max_pending = MAX_TX_DESC_CNT;
5090 ering->tx_pending = bp->tx_ring_size;
5091}
5092
5093static int
5094bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5095{
Michael Chan972ec0d2006-01-23 16:12:43 -08005096 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005097
Michael Chan13daffa2006-03-20 17:49:20 -08005098 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005099 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5100 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5101
5102 return -EINVAL;
5103 }
Michael Chan13daffa2006-03-20 17:49:20 -08005104 if (netif_running(bp->dev)) {
5105 bnx2_netif_stop(bp);
5106 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5107 bnx2_free_skbs(bp);
5108 bnx2_free_mem(bp);
5109 }
5110
5111 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005112 bp->tx_ring_size = ering->tx_pending;
5113
5114 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005115 int rc;
5116
5117 rc = bnx2_alloc_mem(bp);
5118 if (rc)
5119 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005120 bnx2_init_nic(bp);
5121 bnx2_netif_start(bp);
5122 }
5123
5124 return 0;
5125}
5126
5127static void
5128bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5129{
Michael Chan972ec0d2006-01-23 16:12:43 -08005130 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005131
5132 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5133 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5134 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5135}
5136
5137static int
5138bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5139{
Michael Chan972ec0d2006-01-23 16:12:43 -08005140 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005141
5142 bp->req_flow_ctrl = 0;
5143 if (epause->rx_pause)
5144 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5145 if (epause->tx_pause)
5146 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5147
5148 if (epause->autoneg) {
5149 bp->autoneg |= AUTONEG_FLOW_CTRL;
5150 }
5151 else {
5152 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5153 }
5154
Michael Chanc770a652005-08-25 15:38:39 -07005155 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005156
5157 bnx2_setup_phy(bp);
5158
Michael Chanc770a652005-08-25 15:38:39 -07005159 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005160
5161 return 0;
5162}
5163
5164static u32
5165bnx2_get_rx_csum(struct net_device *dev)
5166{
Michael Chan972ec0d2006-01-23 16:12:43 -08005167 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005168
5169 return bp->rx_csum;
5170}
5171
5172static int
5173bnx2_set_rx_csum(struct net_device *dev, u32 data)
5174{
Michael Chan972ec0d2006-01-23 16:12:43 -08005175 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005176
5177 bp->rx_csum = data;
5178 return 0;
5179}
5180
Michael Chanb11d6212006-06-29 12:31:21 -07005181static int
5182bnx2_set_tso(struct net_device *dev, u32 data)
5183{
5184 if (data)
5185 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5186 else
5187 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5188 return 0;
5189}
5190
Michael Chancea94db2006-06-12 22:16:13 -07005191#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005192
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005193static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005194 char string[ETH_GSTRING_LEN];
5195} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5196 { "rx_bytes" },
5197 { "rx_error_bytes" },
5198 { "tx_bytes" },
5199 { "tx_error_bytes" },
5200 { "rx_ucast_packets" },
5201 { "rx_mcast_packets" },
5202 { "rx_bcast_packets" },
5203 { "tx_ucast_packets" },
5204 { "tx_mcast_packets" },
5205 { "tx_bcast_packets" },
5206 { "tx_mac_errors" },
5207 { "tx_carrier_errors" },
5208 { "rx_crc_errors" },
5209 { "rx_align_errors" },
5210 { "tx_single_collisions" },
5211 { "tx_multi_collisions" },
5212 { "tx_deferred" },
5213 { "tx_excess_collisions" },
5214 { "tx_late_collisions" },
5215 { "tx_total_collisions" },
5216 { "rx_fragments" },
5217 { "rx_jabbers" },
5218 { "rx_undersize_packets" },
5219 { "rx_oversize_packets" },
5220 { "rx_64_byte_packets" },
5221 { "rx_65_to_127_byte_packets" },
5222 { "rx_128_to_255_byte_packets" },
5223 { "rx_256_to_511_byte_packets" },
5224 { "rx_512_to_1023_byte_packets" },
5225 { "rx_1024_to_1522_byte_packets" },
5226 { "rx_1523_to_9022_byte_packets" },
5227 { "tx_64_byte_packets" },
5228 { "tx_65_to_127_byte_packets" },
5229 { "tx_128_to_255_byte_packets" },
5230 { "tx_256_to_511_byte_packets" },
5231 { "tx_512_to_1023_byte_packets" },
5232 { "tx_1024_to_1522_byte_packets" },
5233 { "tx_1523_to_9022_byte_packets" },
5234 { "rx_xon_frames" },
5235 { "rx_xoff_frames" },
5236 { "tx_xon_frames" },
5237 { "tx_xoff_frames" },
5238 { "rx_mac_ctrl_frames" },
5239 { "rx_filtered_packets" },
5240 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005241 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005242};
5243
5244#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5245
Arjan van de Venf71e1302006-03-03 21:33:57 -05005246static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005247 STATS_OFFSET32(stat_IfHCInOctets_hi),
5248 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5249 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5250 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5251 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5252 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5253 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5254 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5255 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5256 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5257 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005258 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5259 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5260 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5261 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5262 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5263 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5264 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5265 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5266 STATS_OFFSET32(stat_EtherStatsCollisions),
5267 STATS_OFFSET32(stat_EtherStatsFragments),
5268 STATS_OFFSET32(stat_EtherStatsJabbers),
5269 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5270 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5271 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5272 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5273 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5274 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5275 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5276 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5277 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5278 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5279 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5280 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5281 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5282 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5283 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5284 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5285 STATS_OFFSET32(stat_XonPauseFramesReceived),
5286 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5287 STATS_OFFSET32(stat_OutXonSent),
5288 STATS_OFFSET32(stat_OutXoffSent),
5289 STATS_OFFSET32(stat_MacControlFramesReceived),
5290 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5291 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005292 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005293};
5294
5295/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5296 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005297 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005298static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005299 8,0,8,8,8,8,8,8,8,8,
5300 4,0,4,4,4,4,4,4,4,4,
5301 4,4,4,4,4,4,4,4,4,4,
5302 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005303 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005304};
5305
Michael Chan5b0c76a2005-11-04 08:45:49 -08005306static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5307 8,0,8,8,8,8,8,8,8,8,
5308 4,4,4,4,4,4,4,4,4,4,
5309 4,4,4,4,4,4,4,4,4,4,
5310 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005311 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005312};
5313
Michael Chanb6016b72005-05-26 13:03:09 -07005314#define BNX2_NUM_TESTS 6
5315
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005316static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005317 char string[ETH_GSTRING_LEN];
5318} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5319 { "register_test (offline)" },
5320 { "memory_test (offline)" },
5321 { "loopback_test (offline)" },
5322 { "nvram_test (online)" },
5323 { "interrupt_test (online)" },
5324 { "link_test (online)" },
5325};
5326
5327static int
5328bnx2_self_test_count(struct net_device *dev)
5329{
5330 return BNX2_NUM_TESTS;
5331}
5332
5333static void
5334bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5335{
Michael Chan972ec0d2006-01-23 16:12:43 -08005336 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005337
5338 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5339 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005340 int i;
5341
Michael Chanb6016b72005-05-26 13:03:09 -07005342 bnx2_netif_stop(bp);
5343 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5344 bnx2_free_skbs(bp);
5345
5346 if (bnx2_test_registers(bp) != 0) {
5347 buf[0] = 1;
5348 etest->flags |= ETH_TEST_FL_FAILED;
5349 }
5350 if (bnx2_test_memory(bp) != 0) {
5351 buf[1] = 1;
5352 etest->flags |= ETH_TEST_FL_FAILED;
5353 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005354 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005355 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005356
5357 if (!netif_running(bp->dev)) {
5358 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5359 }
5360 else {
5361 bnx2_init_nic(bp);
5362 bnx2_netif_start(bp);
5363 }
5364
5365 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005366 for (i = 0; i < 7; i++) {
5367 if (bp->link_up)
5368 break;
5369 msleep_interruptible(1000);
5370 }
Michael Chanb6016b72005-05-26 13:03:09 -07005371 }
5372
5373 if (bnx2_test_nvram(bp) != 0) {
5374 buf[3] = 1;
5375 etest->flags |= ETH_TEST_FL_FAILED;
5376 }
5377 if (bnx2_test_intr(bp) != 0) {
5378 buf[4] = 1;
5379 etest->flags |= ETH_TEST_FL_FAILED;
5380 }
5381
5382 if (bnx2_test_link(bp) != 0) {
5383 buf[5] = 1;
5384 etest->flags |= ETH_TEST_FL_FAILED;
5385
5386 }
5387}
5388
5389static void
5390bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5391{
5392 switch (stringset) {
5393 case ETH_SS_STATS:
5394 memcpy(buf, bnx2_stats_str_arr,
5395 sizeof(bnx2_stats_str_arr));
5396 break;
5397 case ETH_SS_TEST:
5398 memcpy(buf, bnx2_tests_str_arr,
5399 sizeof(bnx2_tests_str_arr));
5400 break;
5401 }
5402}
5403
5404static int
5405bnx2_get_stats_count(struct net_device *dev)
5406{
5407 return BNX2_NUM_STATS;
5408}
5409
5410static void
5411bnx2_get_ethtool_stats(struct net_device *dev,
5412 struct ethtool_stats *stats, u64 *buf)
5413{
Michael Chan972ec0d2006-01-23 16:12:43 -08005414 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005415 int i;
5416 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005417 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005418
5419 if (hw_stats == NULL) {
5420 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5421 return;
5422 }
5423
Michael Chan5b0c76a2005-11-04 08:45:49 -08005424 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5425 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5426 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5427 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005428 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005429 else
5430 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005431
5432 for (i = 0; i < BNX2_NUM_STATS; i++) {
5433 if (stats_len_arr[i] == 0) {
5434 /* skip this counter */
5435 buf[i] = 0;
5436 continue;
5437 }
5438 if (stats_len_arr[i] == 4) {
5439 /* 4-byte counter */
5440 buf[i] = (u64)
5441 *(hw_stats + bnx2_stats_offset_arr[i]);
5442 continue;
5443 }
5444 /* 8-byte counter */
5445 buf[i] = (((u64) *(hw_stats +
5446 bnx2_stats_offset_arr[i])) << 32) +
5447 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5448 }
5449}
5450
5451static int
5452bnx2_phys_id(struct net_device *dev, u32 data)
5453{
Michael Chan972ec0d2006-01-23 16:12:43 -08005454 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005455 int i;
5456 u32 save;
5457
5458 if (data == 0)
5459 data = 2;
5460
5461 save = REG_RD(bp, BNX2_MISC_CFG);
5462 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5463
5464 for (i = 0; i < (data * 2); i++) {
5465 if ((i % 2) == 0) {
5466 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5467 }
5468 else {
5469 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5470 BNX2_EMAC_LED_1000MB_OVERRIDE |
5471 BNX2_EMAC_LED_100MB_OVERRIDE |
5472 BNX2_EMAC_LED_10MB_OVERRIDE |
5473 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5474 BNX2_EMAC_LED_TRAFFIC);
5475 }
5476 msleep_interruptible(500);
5477 if (signal_pending(current))
5478 break;
5479 }
5480 REG_WR(bp, BNX2_EMAC_LED, 0);
5481 REG_WR(bp, BNX2_MISC_CFG, save);
5482 return 0;
5483}
5484
Jeff Garzik7282d492006-09-13 14:30:00 -04005485static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005486 .get_settings = bnx2_get_settings,
5487 .set_settings = bnx2_set_settings,
5488 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005489 .get_regs_len = bnx2_get_regs_len,
5490 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005491 .get_wol = bnx2_get_wol,
5492 .set_wol = bnx2_set_wol,
5493 .nway_reset = bnx2_nway_reset,
5494 .get_link = ethtool_op_get_link,
5495 .get_eeprom_len = bnx2_get_eeprom_len,
5496 .get_eeprom = bnx2_get_eeprom,
5497 .set_eeprom = bnx2_set_eeprom,
5498 .get_coalesce = bnx2_get_coalesce,
5499 .set_coalesce = bnx2_set_coalesce,
5500 .get_ringparam = bnx2_get_ringparam,
5501 .set_ringparam = bnx2_set_ringparam,
5502 .get_pauseparam = bnx2_get_pauseparam,
5503 .set_pauseparam = bnx2_set_pauseparam,
5504 .get_rx_csum = bnx2_get_rx_csum,
5505 .set_rx_csum = bnx2_set_rx_csum,
5506 .get_tx_csum = ethtool_op_get_tx_csum,
5507 .set_tx_csum = ethtool_op_set_tx_csum,
5508 .get_sg = ethtool_op_get_sg,
5509 .set_sg = ethtool_op_set_sg,
5510#ifdef BCM_TSO
5511 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005512 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005513#endif
5514 .self_test_count = bnx2_self_test_count,
5515 .self_test = bnx2_self_test,
5516 .get_strings = bnx2_get_strings,
5517 .phys_id = bnx2_phys_id,
5518 .get_stats_count = bnx2_get_stats_count,
5519 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005520 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005521};
5522
5523/* Called with rtnl_lock */
5524static int
5525bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5526{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005527 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005528 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005529 int err;
5530
5531 switch(cmd) {
5532 case SIOCGMIIPHY:
5533 data->phy_id = bp->phy_addr;
5534
5535 /* fallthru */
5536 case SIOCGMIIREG: {
5537 u32 mii_regval;
5538
Michael Chanc770a652005-08-25 15:38:39 -07005539 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005540 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005541 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005542
5543 data->val_out = mii_regval;
5544
5545 return err;
5546 }
5547
5548 case SIOCSMIIREG:
5549 if (!capable(CAP_NET_ADMIN))
5550 return -EPERM;
5551
Michael Chanc770a652005-08-25 15:38:39 -07005552 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005553 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005554 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005555
5556 return err;
5557
5558 default:
5559 /* do nothing */
5560 break;
5561 }
5562 return -EOPNOTSUPP;
5563}
5564
5565/* Called with rtnl_lock */
5566static int
5567bnx2_change_mac_addr(struct net_device *dev, void *p)
5568{
5569 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005570 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005571
Michael Chan73eef4c2005-08-25 15:39:15 -07005572 if (!is_valid_ether_addr(addr->sa_data))
5573 return -EINVAL;
5574
Michael Chanb6016b72005-05-26 13:03:09 -07005575 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5576 if (netif_running(dev))
5577 bnx2_set_mac_addr(bp);
5578
5579 return 0;
5580}
5581
5582/* Called with rtnl_lock */
5583static int
5584bnx2_change_mtu(struct net_device *dev, int new_mtu)
5585{
Michael Chan972ec0d2006-01-23 16:12:43 -08005586 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005587
5588 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5589 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5590 return -EINVAL;
5591
5592 dev->mtu = new_mtu;
5593 if (netif_running(dev)) {
5594 bnx2_netif_stop(bp);
5595
5596 bnx2_init_nic(bp);
5597
5598 bnx2_netif_start(bp);
5599 }
5600 return 0;
5601}
5602
5603#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5604static void
5605poll_bnx2(struct net_device *dev)
5606{
Michael Chan972ec0d2006-01-23 16:12:43 -08005607 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005608
5609 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005610 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005611 enable_irq(bp->pdev->irq);
5612}
5613#endif
5614
5615static int __devinit
5616bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5617{
5618 struct bnx2 *bp;
5619 unsigned long mem_len;
5620 int rc;
5621 u32 reg;
5622
5623 SET_MODULE_OWNER(dev);
5624 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005625 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005626
5627 bp->flags = 0;
5628 bp->phy_flags = 0;
5629
5630 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5631 rc = pci_enable_device(pdev);
5632 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005633 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005634 goto err_out;
5635 }
5636
5637 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005638 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005639 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005640 rc = -ENODEV;
5641 goto err_out_disable;
5642 }
5643
5644 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5645 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005646 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005647 goto err_out_disable;
5648 }
5649
5650 pci_set_master(pdev);
5651
5652 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5653 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005654 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005655 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005656 rc = -EIO;
5657 goto err_out_release;
5658 }
5659
5660 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5661 if (bp->pcix_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005662 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005663 rc = -EIO;
5664 goto err_out_release;
5665 }
5666
5667 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5668 bp->flags |= USING_DAC_FLAG;
5669 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005670 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005671 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005672 rc = -EIO;
5673 goto err_out_release;
5674 }
5675 }
5676 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005677 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005678 rc = -EIO;
5679 goto err_out_release;
5680 }
5681
5682 bp->dev = dev;
5683 bp->pdev = pdev;
5684
5685 spin_lock_init(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005686 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5687
5688 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5689 mem_len = MB_GET_CID_ADDR(17);
5690 dev->mem_end = dev->mem_start + mem_len;
5691 dev->irq = pdev->irq;
5692
5693 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5694
5695 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005696 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005697 rc = -ENOMEM;
5698 goto err_out_release;
5699 }
5700
5701 /* Configure byte swap and enable write to the reg_window registers.
5702 * Rely on CPU to do target byte swapping on big endian systems
5703 * The chip's target access swapping will not swap all accesses
5704 */
5705 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5706 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5707 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5708
Pavel Machek829ca9a2005-09-03 15:56:56 -07005709 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005710
5711 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5712
Michael Chanb6016b72005-05-26 13:03:09 -07005713 /* Get bus information. */
5714 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5715 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5716 u32 clkreg;
5717
5718 bp->flags |= PCIX_FLAG;
5719
5720 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005721
Michael Chanb6016b72005-05-26 13:03:09 -07005722 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5723 switch (clkreg) {
5724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5725 bp->bus_speed_mhz = 133;
5726 break;
5727
5728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5729 bp->bus_speed_mhz = 100;
5730 break;
5731
5732 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5733 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5734 bp->bus_speed_mhz = 66;
5735 break;
5736
5737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5738 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5739 bp->bus_speed_mhz = 50;
5740 break;
5741
5742 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5743 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5744 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5745 bp->bus_speed_mhz = 33;
5746 break;
5747 }
5748 }
5749 else {
5750 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5751 bp->bus_speed_mhz = 66;
5752 else
5753 bp->bus_speed_mhz = 33;
5754 }
5755
5756 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5757 bp->flags |= PCI_32BIT_FLAG;
5758
5759 /* 5706A0 may falsely detect SERR and PERR. */
5760 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5761 reg = REG_RD(bp, PCI_COMMAND);
5762 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5763 REG_WR(bp, PCI_COMMAND, reg);
5764 }
5765 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5766 !(bp->flags & PCIX_FLAG)) {
5767
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005768 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005769 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005770 goto err_out_unmap;
5771 }
5772
5773 bnx2_init_nvram(bp);
5774
Michael Chane3648b32005-11-04 08:51:21 -08005775 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5776
5777 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5778 BNX2_SHM_HDR_SIGNATURE_SIG)
5779 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5780 else
5781 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5782
Michael Chanb6016b72005-05-26 13:03:09 -07005783 /* Get the permanent MAC address. First we need to make sure the
5784 * firmware is actually running.
5785 */
Michael Chane3648b32005-11-04 08:51:21 -08005786 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005787
5788 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5789 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005790 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005791 rc = -ENODEV;
5792 goto err_out_unmap;
5793 }
5794
Michael Chane3648b32005-11-04 08:51:21 -08005795 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005796
Michael Chane3648b32005-11-04 08:51:21 -08005797 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005798 bp->mac_addr[0] = (u8) (reg >> 8);
5799 bp->mac_addr[1] = (u8) reg;
5800
Michael Chane3648b32005-11-04 08:51:21 -08005801 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005802 bp->mac_addr[2] = (u8) (reg >> 24);
5803 bp->mac_addr[3] = (u8) (reg >> 16);
5804 bp->mac_addr[4] = (u8) (reg >> 8);
5805 bp->mac_addr[5] = (u8) reg;
5806
5807 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005808 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005809
5810 bp->rx_csum = 1;
5811
5812 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5813
5814 bp->tx_quick_cons_trip_int = 20;
5815 bp->tx_quick_cons_trip = 20;
5816 bp->tx_ticks_int = 80;
5817 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005818
Michael Chanb6016b72005-05-26 13:03:09 -07005819 bp->rx_quick_cons_trip_int = 6;
5820 bp->rx_quick_cons_trip = 6;
5821 bp->rx_ticks_int = 18;
5822 bp->rx_ticks = 18;
5823
5824 bp->stats_ticks = 1000000 & 0xffff00;
5825
5826 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005827 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005828
Michael Chan5b0c76a2005-11-04 08:45:49 -08005829 bp->phy_addr = 1;
5830
Michael Chanb6016b72005-05-26 13:03:09 -07005831 /* Disable WOL support if we are running on a SERDES chip. */
5832 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5833 bp->phy_flags |= PHY_SERDES_FLAG;
5834 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005835 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5836 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005837 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005838 BNX2_SHARED_HW_CFG_CONFIG);
5839 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5840 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5841 }
Michael Chanb6016b72005-05-26 13:03:09 -07005842 }
5843
Michael Chan16088272006-06-12 22:16:43 -07005844 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5845 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5846 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005847 bp->flags |= NO_WOL_FLAG;
5848
Michael Chanb6016b72005-05-26 13:03:09 -07005849 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5850 bp->tx_quick_cons_trip_int =
5851 bp->tx_quick_cons_trip;
5852 bp->tx_ticks_int = bp->tx_ticks;
5853 bp->rx_quick_cons_trip_int =
5854 bp->rx_quick_cons_trip;
5855 bp->rx_ticks_int = bp->rx_ticks;
5856 bp->comp_prod_trip_int = bp->comp_prod_trip;
5857 bp->com_ticks_int = bp->com_ticks;
5858 bp->cmd_ticks_int = bp->cmd_ticks;
5859 }
5860
Michael Chanf9317a42006-09-29 17:06:23 -07005861 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5862 *
5863 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5864 * with byte enables disabled on the unused 32-bit word. This is legal
5865 * but causes problems on the AMD 8132 which will eventually stop
5866 * responding after a while.
5867 *
5868 * AMD believes this incompatibility is unique to the 5706, and
5869 * prefers to locally disable MSI rather than globally disabling it
5870 * using pci_msi_quirk.
5871 */
5872 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5873 struct pci_dev *amd_8132 = NULL;
5874
5875 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5876 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5877 amd_8132))) {
5878 u8 rev;
5879
5880 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5881 if (rev >= 0x10 && rev <= 0x13) {
5882 disable_msi = 1;
5883 pci_dev_put(amd_8132);
5884 break;
5885 }
5886 }
5887 }
5888
Michael Chanb6016b72005-05-26 13:03:09 -07005889 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5890 bp->req_line_speed = 0;
5891 if (bp->phy_flags & PHY_SERDES_FLAG) {
5892 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005893
Michael Chane3648b32005-11-04 08:51:21 -08005894 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005895 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5896 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5897 bp->autoneg = 0;
5898 bp->req_line_speed = bp->line_speed = SPEED_1000;
5899 bp->req_duplex = DUPLEX_FULL;
5900 }
Michael Chanb6016b72005-05-26 13:03:09 -07005901 }
5902 else {
5903 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5904 }
5905
5906 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5907
Michael Chancd339a02005-08-25 15:35:24 -07005908 init_timer(&bp->timer);
5909 bp->timer.expires = RUN_AT(bp->timer_interval);
5910 bp->timer.data = (unsigned long) bp;
5911 bp->timer.function = bnx2_timer;
5912
Michael Chanb6016b72005-05-26 13:03:09 -07005913 return 0;
5914
5915err_out_unmap:
5916 if (bp->regview) {
5917 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005918 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005919 }
5920
5921err_out_release:
5922 pci_release_regions(pdev);
5923
5924err_out_disable:
5925 pci_disable_device(pdev);
5926 pci_set_drvdata(pdev, NULL);
5927
5928err_out:
5929 return rc;
5930}
5931
5932static int __devinit
5933bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5934{
5935 static int version_printed = 0;
5936 struct net_device *dev = NULL;
5937 struct bnx2 *bp;
5938 int rc, i;
5939
5940 if (version_printed++ == 0)
5941 printk(KERN_INFO "%s", version);
5942
5943 /* dev zeroed in init_etherdev */
5944 dev = alloc_etherdev(sizeof(*bp));
5945
5946 if (!dev)
5947 return -ENOMEM;
5948
5949 rc = bnx2_init_board(pdev, dev);
5950 if (rc < 0) {
5951 free_netdev(dev);
5952 return rc;
5953 }
5954
5955 dev->open = bnx2_open;
5956 dev->hard_start_xmit = bnx2_start_xmit;
5957 dev->stop = bnx2_close;
5958 dev->get_stats = bnx2_get_stats;
5959 dev->set_multicast_list = bnx2_set_rx_mode;
5960 dev->do_ioctl = bnx2_ioctl;
5961 dev->set_mac_address = bnx2_change_mac_addr;
5962 dev->change_mtu = bnx2_change_mtu;
5963 dev->tx_timeout = bnx2_tx_timeout;
5964 dev->watchdog_timeo = TX_TIMEOUT;
5965#ifdef BCM_VLAN
5966 dev->vlan_rx_register = bnx2_vlan_rx_register;
5967 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5968#endif
5969 dev->poll = bnx2_poll;
5970 dev->ethtool_ops = &bnx2_ethtool_ops;
5971 dev->weight = 64;
5972
Michael Chan972ec0d2006-01-23 16:12:43 -08005973 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005974
5975#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5976 dev->poll_controller = poll_bnx2;
5977#endif
5978
5979 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005980 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005981 if (bp->regview)
5982 iounmap(bp->regview);
5983 pci_release_regions(pdev);
5984 pci_disable_device(pdev);
5985 pci_set_drvdata(pdev, NULL);
5986 free_netdev(dev);
5987 return rc;
5988 }
5989
5990 pci_set_drvdata(pdev, dev);
5991
5992 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07005993 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07005994 bp->name = board_info[ent->driver_data].name,
5995 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5996 "IRQ %d, ",
5997 dev->name,
5998 bp->name,
5999 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6000 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6001 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6002 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6003 bp->bus_speed_mhz,
6004 dev->base_addr,
6005 bp->pdev->irq);
6006
6007 printk("node addr ");
6008 for (i = 0; i < 6; i++)
6009 printk("%2.2x", dev->dev_addr[i]);
6010 printk("\n");
6011
6012 dev->features |= NETIF_F_SG;
6013 if (bp->flags & USING_DAC_FLAG)
6014 dev->features |= NETIF_F_HIGHDMA;
6015 dev->features |= NETIF_F_IP_CSUM;
6016#ifdef BCM_VLAN
6017 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6018#endif
6019#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07006020 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006021#endif
6022
6023 netif_carrier_off(bp->dev);
6024
6025 return 0;
6026}
6027
6028static void __devexit
6029bnx2_remove_one(struct pci_dev *pdev)
6030{
6031 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006032 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006033
Michael Chanafdc08b2005-08-25 15:34:29 -07006034 flush_scheduled_work();
6035
Michael Chanb6016b72005-05-26 13:03:09 -07006036 unregister_netdev(dev);
6037
6038 if (bp->regview)
6039 iounmap(bp->regview);
6040
6041 free_netdev(dev);
6042 pci_release_regions(pdev);
6043 pci_disable_device(pdev);
6044 pci_set_drvdata(pdev, NULL);
6045}
6046
6047static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006048bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006049{
6050 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006051 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006052 u32 reset_code;
6053
6054 if (!netif_running(dev))
6055 return 0;
6056
Michael Chan1d60290f2006-03-20 17:50:08 -08006057 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006058 bnx2_netif_stop(bp);
6059 netif_device_detach(dev);
6060 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006061 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006062 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006063 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006064 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6065 else
6066 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6067 bnx2_reset_chip(bp, reset_code);
6068 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006069 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006070 return 0;
6071}
6072
6073static int
6074bnx2_resume(struct pci_dev *pdev)
6075{
6076 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006077 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006078
6079 if (!netif_running(dev))
6080 return 0;
6081
Pavel Machek829ca9a2005-09-03 15:56:56 -07006082 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006083 netif_device_attach(dev);
6084 bnx2_init_nic(bp);
6085 bnx2_netif_start(bp);
6086 return 0;
6087}
6088
6089static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006090 .name = DRV_MODULE_NAME,
6091 .id_table = bnx2_pci_tbl,
6092 .probe = bnx2_init_one,
6093 .remove = __devexit_p(bnx2_remove_one),
6094 .suspend = bnx2_suspend,
6095 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006096};
6097
6098static int __init bnx2_init(void)
6099{
Jeff Garzik29917622006-08-19 17:48:59 -04006100 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006101}
6102
6103static void __exit bnx2_cleanup(void)
6104{
6105 pci_unregister_driver(&bnx2_pci_driver);
6106}
6107
6108module_init(bnx2_init);
6109module_exit(bnx2_cleanup);
6110
6111
6112