blob: b60e45d1ce3d8226d2174cac22ec08aa121ea833 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
56
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chanf9317a42006-09-29 17:06:23 -070059#define DRV_MODULE_VERSION "1.4.45"
60#define DRV_MODULE_RELDATE "September 29, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -0700118 { 0, }
119};
120
121static struct flash_spec flash_table[] =
122{
123 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700206};
207
208MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
Michael Chane89bbf12005-08-25 15:36:58 -0700210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{
Michael Chan2f8af122006-08-15 01:39:10 -0700212 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700213
Michael Chan2f8af122006-08-15 01:39:10 -0700214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
Michael Chane89bbf12005-08-25 15:36:58 -0700216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219}
220
Michael Chanb6016b72005-05-26 13:03:09 -0700221static u32
222bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223{
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226}
227
228static void
229bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230{
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233}
234
235static void
236bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237{
238 offset += cid_addr;
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
241}
242
243static int
244bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245{
246 u32 val1;
247 int i, ret;
248
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256 udelay(40);
257 }
258
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264 for (i = 0; i < 50; i++) {
265 udelay(10);
266
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269 udelay(5);
270
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274 break;
275 }
276 }
277
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279 *val = 0x0;
280 ret = -EBUSY;
281 }
282 else {
283 *val = val1;
284 ret = 0;
285 }
286
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294 udelay(40);
295 }
296
297 return ret;
298}
299
300static int
301bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302{
303 u32 val1;
304 int i, ret;
305
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313 udelay(40);
314 }
315
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400320
Michael Chanb6016b72005-05-26 13:03:09 -0700321 for (i = 0; i < 50; i++) {
322 udelay(10);
323
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326 udelay(5);
327 break;
328 }
329 }
330
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332 ret = -EBUSY;
333 else
334 ret = 0;
335
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343 udelay(40);
344 }
345
346 return ret;
347}
348
349static void
350bnx2_disable_int(struct bnx2 *bp)
351{
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355}
356
357static void
358bnx2_enable_int(struct bnx2 *bp)
359{
Michael Chanb6016b72005-05-26 13:03:09 -0700360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
Michael Chanbf5295b2006-03-23 01:11:56 -0800367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700368}
369
370static void
371bnx2_disable_int_sync(struct bnx2 *bp)
372{
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
376}
377
378static void
379bnx2_netif_stop(struct bnx2 *bp)
380{
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386 }
387}
388
389static void
390bnx2_netif_start(struct bnx2 *bp)
391{
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
396 bnx2_enable_int(bp);
397 }
398 }
399}
400
401static void
402bnx2_free_mem(struct bnx2 *bp)
403{
Michael Chan13daffa2006-03-20 17:49:20 -0800404 int i;
405
Michael Chanb6016b72005-05-26 13:03:09 -0700406 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800407 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800410 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700411 }
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
417 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_ring[i],
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700427 }
Michael Chan13daffa2006-03-20 17:49:20 -0800428 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400429 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700430}
431
432static int
433bnx2_alloc_mem(struct bnx2 *bp)
434{
Michael Chan0f31f992006-03-23 01:12:38 -0800435 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800436
Michael Chan0f31f992006-03-23 01:12:38 -0800437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700439 if (bp->tx_buf_ring == NULL)
440 return -ENOMEM;
441
Michael Chanb6016b72005-05-26 13:03:09 -0700442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
444 TX_DESC_CNT,
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
447 goto alloc_mem_err;
448
Michael Chan13daffa2006-03-20 17:49:20 -0800449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->rx_buf_ring == NULL)
452 goto alloc_mem_err;
453
Michael Chan13daffa2006-03-20 17:49:20 -0800454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455 bp->rx_max_ring);
456
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
463 goto alloc_mem_err;
464
465 }
Michael Chanb6016b72005-05-26 13:03:09 -0700466
Michael Chan0f31f992006-03-23 01:12:38 -0800467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
471
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
475 goto alloc_mem_err;
476
Michael Chan0f31f992006-03-23 01:12:38 -0800477 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700478
Michael Chan0f31f992006-03-23 01:12:38 -0800479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700483
484 return 0;
485
486alloc_mem_err:
487 bnx2_free_mem(bp);
488 return -ENOMEM;
489}
490
491static void
Michael Chane3648b32005-11-04 08:51:21 -0800492bnx2_report_fw_link(struct bnx2 *bp)
493{
494 u32 fw_link_status = 0;
495
496 if (bp->link_up) {
497 u32 bmsr;
498
499 switch (bp->line_speed) {
500 case SPEED_10:
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 else
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
505 break;
506 case SPEED_100:
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 else
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
511 break;
512 case SPEED_1000:
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 else
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517 break;
518 case SPEED_2500:
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 else
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 break;
524 }
525
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528 if (bp->autoneg) {
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 else
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 }
540 }
541 else
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545}
546
547static void
Michael Chanb6016b72005-05-26 13:03:09 -0700548bnx2_report_link(struct bnx2 *bp)
549{
550 if (bp->link_up) {
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554 printk("%d Mbps ", bp->line_speed);
555
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
558 else
559 printk("half duplex");
560
561 if (bp->flow_ctrl) {
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
566 }
567 else {
568 printk(", transmit ");
569 }
570 printk("flow control ON");
571 }
572 printk("\n");
573 }
574 else {
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577 }
Michael Chane3648b32005-11-04 08:51:21 -0800578
579 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700580}
581
582static void
583bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584{
585 u32 local_adv, remote_adv;
586
587 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
593 }
594 return;
595 }
596
597 if (bp->duplex != DUPLEX_FULL) {
598 return;
599 }
600
Michael Chan5b0c76a2005-11-04 08:45:49 -0800601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603 u32 val;
604
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
610 return;
611 }
612
Michael Chanb6016b72005-05-26 13:03:09 -0700613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
619
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
631 }
632
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 }
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
641 }
642 }
643 else {
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 }
647 }
648 }
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653 bp->flow_ctrl = FLOW_CTRL_TX;
654 }
655 }
656}
657
658static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800659bnx2_5708s_linkup(struct bnx2 *bp)
660{
661 u32 val;
662
663 bp->link_up = 1;
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
674 break;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
677 break;
678 }
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
681 else
682 bp->duplex = DUPLEX_HALF;
683
684 return 0;
685}
686
687static int
688bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700689{
690 u32 bmcr, local_adv, remote_adv, common;
691
692 bp->link_up = 1;
693 bp->line_speed = SPEED_1000;
694
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
698 }
699 else {
700 bp->duplex = DUPLEX_HALF;
701 }
702
703 if (!(bmcr & BMCR_ANENABLE)) {
704 return 0;
705 }
706
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
715 }
716 else {
717 bp->duplex = DUPLEX_HALF;
718 }
719 }
720
721 return 0;
722}
723
724static int
725bnx2_copper_linkup(struct bnx2 *bp)
726{
727 u32 bmcr;
728
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
732
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
740 }
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
744 }
745 else {
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
753 }
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
757 }
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
761 }
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
765 }
766 else {
767 bp->line_speed = 0;
768 bp->link_up = 0;
769 }
770 }
771 }
772 else {
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
775 }
776 else {
777 bp->line_speed = SPEED_10;
778 }
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
781 }
782 else {
783 bp->duplex = DUPLEX_HALF;
784 }
785 }
786
787 return 0;
788}
789
790static int
791bnx2_set_mac_link(struct bnx2 *bp)
792{
793 u32 val;
794
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799 }
800
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
803
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 BNX2_EMAC_MODE_25G);
Michael Chanb6016b72005-05-26 13:03:09 -0700807
808 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800809 switch (bp->line_speed) {
810 case SPEED_10:
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
813 break;
814 }
815 /* fall through */
816 case SPEED_100:
817 val |= BNX2_EMAC_MODE_PORT_MII;
818 break;
819 case SPEED_2500:
820 val |= BNX2_EMAC_MODE_25G;
821 /* fall through */
822 case SPEED_1000:
823 val |= BNX2_EMAC_MODE_PORT_GMII;
824 break;
825 }
Michael Chanb6016b72005-05-26 13:03:09 -0700826 }
827 else {
828 val |= BNX2_EMAC_MODE_PORT_GMII;
829 }
830
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
835
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854 return 0;
855}
856
857static int
858bnx2_set_link(struct bnx2 *bp)
859{
860 u32 bmsr;
861 u8 link_up;
862
863 if (bp->loopback == MAC_LOOPBACK) {
864 bp->link_up = 1;
865 return 0;
866 }
867
868 link_up = bp->link_up;
869
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875 u32 val;
876
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
880 else
881 bmsr &= ~BMSR_LSTATUS;
882 }
883
884 if (bmsr & BMSR_LSTATUS) {
885 bp->link_up = 1;
886
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700892 }
893 else {
894 bnx2_copper_linkup(bp);
895 }
896 bnx2_resolve_flow_ctrl(bp);
897 }
898 else {
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
901
902 u32 bmcr;
903
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
905 if (!(bmcr & BMCR_ANENABLE)) {
906 bnx2_write_phy(bp, MII_BMCR, bmcr |
907 BMCR_ANENABLE);
908 }
909 }
910 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
911 bp->link_up = 0;
912 }
913
914 if (bp->link_up != link_up) {
915 bnx2_report_link(bp);
916 }
917
918 bnx2_set_mac_link(bp);
919
920 return 0;
921}
922
923static int
924bnx2_reset_phy(struct bnx2 *bp)
925{
926 int i;
927 u32 reg;
928
929 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
930
931#define PHY_RESET_MAX_WAIT 100
932 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
933 udelay(10);
934
935 bnx2_read_phy(bp, MII_BMCR, &reg);
936 if (!(reg & BMCR_RESET)) {
937 udelay(20);
938 break;
939 }
940 }
941 if (i == PHY_RESET_MAX_WAIT) {
942 return -EBUSY;
943 }
944 return 0;
945}
946
947static u32
948bnx2_phy_get_pause_adv(struct bnx2 *bp)
949{
950 u32 adv = 0;
951
952 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
953 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
954
955 if (bp->phy_flags & PHY_SERDES_FLAG) {
956 adv = ADVERTISE_1000XPAUSE;
957 }
958 else {
959 adv = ADVERTISE_PAUSE_CAP;
960 }
961 }
962 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
963 if (bp->phy_flags & PHY_SERDES_FLAG) {
964 adv = ADVERTISE_1000XPSE_ASYM;
965 }
966 else {
967 adv = ADVERTISE_PAUSE_ASYM;
968 }
969 }
970 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
971 if (bp->phy_flags & PHY_SERDES_FLAG) {
972 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
973 }
974 else {
975 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
976 }
977 }
978 return adv;
979}
980
981static int
982bnx2_setup_serdes_phy(struct bnx2 *bp)
983{
Michael Chan5b0c76a2005-11-04 08:45:49 -0800984 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -0700985 u32 new_adv = 0;
986
987 if (!(bp->autoneg & AUTONEG_SPEED)) {
988 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800989 int force_link_down = 0;
990
991 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
992 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
993 if (up1 & BCM5708S_UP1_2G5) {
994 up1 &= ~BCM5708S_UP1_2G5;
995 bnx2_write_phy(bp, BCM5708S_UP1, up1);
996 force_link_down = 1;
997 }
998 }
999
1000 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1001 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
Michael Chanb6016b72005-05-26 13:03:09 -07001002
1003 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1004 new_bmcr = bmcr & ~BMCR_ANENABLE;
1005 new_bmcr |= BMCR_SPEED1000;
1006 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001007 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001008 new_bmcr |= BMCR_FULLDPLX;
1009 }
1010 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001011 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001012 new_bmcr &= ~BMCR_FULLDPLX;
1013 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001014 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001015 /* Force a link down visible on the other side */
1016 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001017 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1018 ~(ADVERTISE_1000XFULL |
1019 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001020 bnx2_write_phy(bp, MII_BMCR, bmcr |
1021 BMCR_ANRESTART | BMCR_ANENABLE);
1022
1023 bp->link_up = 0;
1024 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001025 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001026 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001027 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001028 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1029 }
1030 return 0;
1031 }
1032
Michael Chan5b0c76a2005-11-04 08:45:49 -08001033 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1034 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1037 }
1038
Michael Chanb6016b72005-05-26 13:03:09 -07001039 if (bp->advertising & ADVERTISED_1000baseT_Full)
1040 new_adv |= ADVERTISE_1000XFULL;
1041
1042 new_adv |= bnx2_phy_get_pause_adv(bp);
1043
1044 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1045 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1046
1047 bp->serdes_an_pending = 0;
1048 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1049 /* Force a link down visible on the other side */
1050 if (bp->link_up) {
1051 int i;
1052
1053 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1054 for (i = 0; i < 110; i++) {
1055 udelay(100);
1056 }
1057 }
1058
1059 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1060 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1061 BMCR_ANENABLE);
Michael Chancd339a02005-08-25 15:35:24 -07001062 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1063 /* Speed up link-up time when the link partner
1064 * does not autonegotiate which is very common
1065 * in blade servers. Some blade servers use
1066 * IPMI for kerboard input and it's important
1067 * to minimize link disruptions. Autoneg. involves
1068 * exchanging base pages plus 3 next pages and
1069 * normally completes in about 120 msec.
1070 */
1071 bp->current_interval = SERDES_AN_TIMEOUT;
1072 bp->serdes_an_pending = 1;
1073 mod_timer(&bp->timer, jiffies + bp->current_interval);
1074 }
Michael Chanb6016b72005-05-26 13:03:09 -07001075 }
1076
1077 return 0;
1078}
1079
1080#define ETHTOOL_ALL_FIBRE_SPEED \
1081 (ADVERTISED_1000baseT_Full)
1082
1083#define ETHTOOL_ALL_COPPER_SPEED \
1084 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1085 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1086 ADVERTISED_1000baseT_Full)
1087
1088#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1089 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001090
Michael Chanb6016b72005-05-26 13:03:09 -07001091#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1092
1093static int
1094bnx2_setup_copper_phy(struct bnx2 *bp)
1095{
1096 u32 bmcr;
1097 u32 new_bmcr;
1098
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101 if (bp->autoneg & AUTONEG_SPEED) {
1102 u32 adv_reg, adv1000_reg;
1103 u32 new_adv_reg = 0;
1104 u32 new_adv1000_reg = 0;
1105
1106 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1107 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1108 ADVERTISE_PAUSE_ASYM);
1109
1110 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1111 adv1000_reg &= PHY_ALL_1000_SPEED;
1112
1113 if (bp->advertising & ADVERTISED_10baseT_Half)
1114 new_adv_reg |= ADVERTISE_10HALF;
1115 if (bp->advertising & ADVERTISED_10baseT_Full)
1116 new_adv_reg |= ADVERTISE_10FULL;
1117 if (bp->advertising & ADVERTISED_100baseT_Half)
1118 new_adv_reg |= ADVERTISE_100HALF;
1119 if (bp->advertising & ADVERTISED_100baseT_Full)
1120 new_adv_reg |= ADVERTISE_100FULL;
1121 if (bp->advertising & ADVERTISED_1000baseT_Full)
1122 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001123
Michael Chanb6016b72005-05-26 13:03:09 -07001124 new_adv_reg |= ADVERTISE_CSMA;
1125
1126 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1127
1128 if ((adv1000_reg != new_adv1000_reg) ||
1129 (adv_reg != new_adv_reg) ||
1130 ((bmcr & BMCR_ANENABLE) == 0)) {
1131
1132 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1133 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1134 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1135 BMCR_ANENABLE);
1136 }
1137 else if (bp->link_up) {
1138 /* Flow ctrl may have changed from auto to forced */
1139 /* or vice-versa. */
1140
1141 bnx2_resolve_flow_ctrl(bp);
1142 bnx2_set_mac_link(bp);
1143 }
1144 return 0;
1145 }
1146
1147 new_bmcr = 0;
1148 if (bp->req_line_speed == SPEED_100) {
1149 new_bmcr |= BMCR_SPEED100;
1150 }
1151 if (bp->req_duplex == DUPLEX_FULL) {
1152 new_bmcr |= BMCR_FULLDPLX;
1153 }
1154 if (new_bmcr != bmcr) {
1155 u32 bmsr;
1156 int i = 0;
1157
1158 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1159 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001160
Michael Chanb6016b72005-05-26 13:03:09 -07001161 if (bmsr & BMSR_LSTATUS) {
1162 /* Force link down */
1163 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1164 do {
1165 udelay(100);
1166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1168 i++;
1169 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1170 }
1171
1172 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1173
1174 /* Normally, the new speed is setup after the link has
1175 * gone down and up again. In some cases, link will not go
1176 * down so we need to set up the new speed here.
1177 */
1178 if (bmsr & BMSR_LSTATUS) {
1179 bp->line_speed = bp->req_line_speed;
1180 bp->duplex = bp->req_duplex;
1181 bnx2_resolve_flow_ctrl(bp);
1182 bnx2_set_mac_link(bp);
1183 }
1184 }
1185 return 0;
1186}
1187
1188static int
1189bnx2_setup_phy(struct bnx2 *bp)
1190{
1191 if (bp->loopback == MAC_LOOPBACK)
1192 return 0;
1193
1194 if (bp->phy_flags & PHY_SERDES_FLAG) {
1195 return (bnx2_setup_serdes_phy(bp));
1196 }
1197 else {
1198 return (bnx2_setup_copper_phy(bp));
1199 }
1200}
1201
1202static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001203bnx2_init_5708s_phy(struct bnx2 *bp)
1204{
1205 u32 val;
1206
1207 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1208 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1210
1211 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1212 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1213 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1214
1215 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1216 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1217 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1218
1219 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1220 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1221 val |= BCM5708S_UP1_2G5;
1222 bnx2_write_phy(bp, BCM5708S_UP1, val);
1223 }
1224
1225 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001226 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1227 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001228 /* increase tx signal amplitude */
1229 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1230 BCM5708S_BLK_ADDR_TX_MISC);
1231 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1232 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1233 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1235 }
1236
Michael Chane3648b32005-11-04 08:51:21 -08001237 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001238 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1239
1240 if (val) {
1241 u32 is_backplane;
1242
Michael Chane3648b32005-11-04 08:51:21 -08001243 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001244 BNX2_SHARED_HW_CFG_CONFIG);
1245 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1247 BCM5708S_BLK_ADDR_TX_MISC);
1248 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1250 BCM5708S_BLK_ADDR_DIG);
1251 }
1252 }
1253 return 0;
1254}
1255
1256static int
1257bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001258{
1259 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1260
1261 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1262 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1263 }
1264
1265 if (bp->dev->mtu > 1500) {
1266 u32 val;
1267
1268 /* Set extended packet length bit */
1269 bnx2_write_phy(bp, 0x18, 0x7);
1270 bnx2_read_phy(bp, 0x18, &val);
1271 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1272
1273 bnx2_write_phy(bp, 0x1c, 0x6c00);
1274 bnx2_read_phy(bp, 0x1c, &val);
1275 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1276 }
1277 else {
1278 u32 val;
1279
1280 bnx2_write_phy(bp, 0x18, 0x7);
1281 bnx2_read_phy(bp, 0x18, &val);
1282 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1283
1284 bnx2_write_phy(bp, 0x1c, 0x6c00);
1285 bnx2_read_phy(bp, 0x1c, &val);
1286 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1287 }
1288
1289 return 0;
1290}
1291
1292static int
1293bnx2_init_copper_phy(struct bnx2 *bp)
1294{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001295 u32 val;
1296
Michael Chanb6016b72005-05-26 13:03:09 -07001297 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1298
1299 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1300 bnx2_write_phy(bp, 0x18, 0x0c00);
1301 bnx2_write_phy(bp, 0x17, 0x000a);
1302 bnx2_write_phy(bp, 0x15, 0x310b);
1303 bnx2_write_phy(bp, 0x17, 0x201f);
1304 bnx2_write_phy(bp, 0x15, 0x9506);
1305 bnx2_write_phy(bp, 0x17, 0x401f);
1306 bnx2_write_phy(bp, 0x15, 0x14e2);
1307 bnx2_write_phy(bp, 0x18, 0x0400);
1308 }
1309
1310 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001311 /* Set extended packet length bit */
1312 bnx2_write_phy(bp, 0x18, 0x7);
1313 bnx2_read_phy(bp, 0x18, &val);
1314 bnx2_write_phy(bp, 0x18, val | 0x4000);
1315
1316 bnx2_read_phy(bp, 0x10, &val);
1317 bnx2_write_phy(bp, 0x10, val | 0x1);
1318 }
1319 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1323
1324 bnx2_read_phy(bp, 0x10, &val);
1325 bnx2_write_phy(bp, 0x10, val & ~0x1);
1326 }
1327
Michael Chan5b0c76a2005-11-04 08:45:49 -08001328 /* ethernet@wirespeed */
1329 bnx2_write_phy(bp, 0x18, 0x7007);
1330 bnx2_read_phy(bp, 0x18, &val);
1331 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001332 return 0;
1333}
1334
1335
1336static int
1337bnx2_init_phy(struct bnx2 *bp)
1338{
1339 u32 val;
1340 int rc = 0;
1341
1342 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1343 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1344
1345 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1346
1347 bnx2_reset_phy(bp);
1348
1349 bnx2_read_phy(bp, MII_PHYSID1, &val);
1350 bp->phy_id = val << 16;
1351 bnx2_read_phy(bp, MII_PHYSID2, &val);
1352 bp->phy_id |= val & 0xffff;
1353
1354 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001355 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1356 rc = bnx2_init_5706s_phy(bp);
1357 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1358 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001359 }
1360 else {
1361 rc = bnx2_init_copper_phy(bp);
1362 }
1363
1364 bnx2_setup_phy(bp);
1365
1366 return rc;
1367}
1368
1369static int
1370bnx2_set_mac_loopback(struct bnx2 *bp)
1371{
1372 u32 mac_mode;
1373
1374 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1375 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1376 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1377 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1378 bp->link_up = 1;
1379 return 0;
1380}
1381
Michael Chanbc5a0692006-01-23 16:13:22 -08001382static int bnx2_test_link(struct bnx2 *);
1383
1384static int
1385bnx2_set_phy_loopback(struct bnx2 *bp)
1386{
1387 u32 mac_mode;
1388 int rc, i;
1389
1390 spin_lock_bh(&bp->phy_lock);
1391 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1392 BMCR_SPEED1000);
1393 spin_unlock_bh(&bp->phy_lock);
1394 if (rc)
1395 return rc;
1396
1397 for (i = 0; i < 10; i++) {
1398 if (bnx2_test_link(bp) == 0)
1399 break;
1400 udelay(10);
1401 }
1402
1403 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1404 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1405 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1406 BNX2_EMAC_MODE_25G);
1407
1408 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1409 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1410 bp->link_up = 1;
1411 return 0;
1412}
1413
Michael Chanb6016b72005-05-26 13:03:09 -07001414static int
Michael Chanb090ae22006-01-23 16:07:10 -08001415bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001416{
1417 int i;
1418 u32 val;
1419
Michael Chanb6016b72005-05-26 13:03:09 -07001420 bp->fw_wr_seq++;
1421 msg_data |= bp->fw_wr_seq;
1422
Michael Chane3648b32005-11-04 08:51:21 -08001423 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001424
1425 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001426 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1427 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001428
Michael Chane3648b32005-11-04 08:51:21 -08001429 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001430
1431 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1432 break;
1433 }
Michael Chanb090ae22006-01-23 16:07:10 -08001434 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1435 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001436
1437 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001438 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1439 if (!silent)
1440 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1441 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001442
1443 msg_data &= ~BNX2_DRV_MSG_CODE;
1444 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1445
Michael Chane3648b32005-11-04 08:51:21 -08001446 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001447
Michael Chanb6016b72005-05-26 13:03:09 -07001448 return -EBUSY;
1449 }
1450
Michael Chanb090ae22006-01-23 16:07:10 -08001451 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1452 return -EIO;
1453
Michael Chanb6016b72005-05-26 13:03:09 -07001454 return 0;
1455}
1456
1457static void
1458bnx2_init_context(struct bnx2 *bp)
1459{
1460 u32 vcid;
1461
1462 vcid = 96;
1463 while (vcid) {
1464 u32 vcid_addr, pcid_addr, offset;
1465
1466 vcid--;
1467
1468 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1469 u32 new_vcid;
1470
1471 vcid_addr = GET_PCID_ADDR(vcid);
1472 if (vcid & 0x8) {
1473 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1474 }
1475 else {
1476 new_vcid = vcid;
1477 }
1478 pcid_addr = GET_PCID_ADDR(new_vcid);
1479 }
1480 else {
1481 vcid_addr = GET_CID_ADDR(vcid);
1482 pcid_addr = vcid_addr;
1483 }
1484
1485 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1486 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1487
1488 /* Zero out the context. */
1489 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1490 CTX_WR(bp, 0x00, offset, 0);
1491 }
1492
1493 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1494 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1495 }
1496}
1497
1498static int
1499bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1500{
1501 u16 *good_mbuf;
1502 u32 good_mbuf_cnt;
1503 u32 val;
1504
1505 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1506 if (good_mbuf == NULL) {
1507 printk(KERN_ERR PFX "Failed to allocate memory in "
1508 "bnx2_alloc_bad_rbuf\n");
1509 return -ENOMEM;
1510 }
1511
1512 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1513 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1514
1515 good_mbuf_cnt = 0;
1516
1517 /* Allocate a bunch of mbufs and save the good ones in an array. */
1518 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1519 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1520 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1521
1522 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1523
1524 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1525
1526 /* The addresses with Bit 9 set are bad memory blocks. */
1527 if (!(val & (1 << 9))) {
1528 good_mbuf[good_mbuf_cnt] = (u16) val;
1529 good_mbuf_cnt++;
1530 }
1531
1532 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1533 }
1534
1535 /* Free the good ones back to the mbuf pool thus discarding
1536 * all the bad ones. */
1537 while (good_mbuf_cnt) {
1538 good_mbuf_cnt--;
1539
1540 val = good_mbuf[good_mbuf_cnt];
1541 val = (val << 9) | val | 1;
1542
1543 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1544 }
1545 kfree(good_mbuf);
1546 return 0;
1547}
1548
1549static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001550bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001551{
1552 u32 val;
1553 u8 *mac_addr = bp->dev->dev_addr;
1554
1555 val = (mac_addr[0] << 8) | mac_addr[1];
1556
1557 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1558
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001559 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001560 (mac_addr[4] << 8) | mac_addr[5];
1561
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1563}
1564
1565static inline int
1566bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1567{
1568 struct sk_buff *skb;
1569 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1570 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001572 unsigned long align;
1573
Michael Chan932f3772006-08-15 01:39:36 -07001574 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001575 if (skb == NULL) {
1576 return -ENOMEM;
1577 }
1578
1579 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1580 skb_reserve(skb, 8 - align);
1581 }
1582
Michael Chanb6016b72005-05-26 13:03:09 -07001583 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1584 PCI_DMA_FROMDEVICE);
1585
1586 rx_buf->skb = skb;
1587 pci_unmap_addr_set(rx_buf, mapping, mapping);
1588
1589 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1590 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1591
1592 bp->rx_prod_bseq += bp->rx_buf_use_size;
1593
1594 return 0;
1595}
1596
1597static void
1598bnx2_phy_int(struct bnx2 *bp)
1599{
1600 u32 new_link_state, old_link_state;
1601
1602 new_link_state = bp->status_blk->status_attn_bits &
1603 STATUS_ATTN_BITS_LINK_STATE;
1604 old_link_state = bp->status_blk->status_attn_bits_ack &
1605 STATUS_ATTN_BITS_LINK_STATE;
1606 if (new_link_state != old_link_state) {
1607 if (new_link_state) {
1608 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1609 STATUS_ATTN_BITS_LINK_STATE);
1610 }
1611 else {
1612 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1613 STATUS_ATTN_BITS_LINK_STATE);
1614 }
1615 bnx2_set_link(bp);
1616 }
1617}
1618
1619static void
1620bnx2_tx_int(struct bnx2 *bp)
1621{
Michael Chanf4e418f2005-11-04 08:53:48 -08001622 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001623 u16 hw_cons, sw_cons, sw_ring_cons;
1624 int tx_free_bd = 0;
1625
Michael Chanf4e418f2005-11-04 08:53:48 -08001626 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001627 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1628 hw_cons++;
1629 }
1630 sw_cons = bp->tx_cons;
1631
1632 while (sw_cons != hw_cons) {
1633 struct sw_bd *tx_buf;
1634 struct sk_buff *skb;
1635 int i, last;
1636
1637 sw_ring_cons = TX_RING_IDX(sw_cons);
1638
1639 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1640 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001641#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001642 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001643 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001644 u16 last_idx, last_ring_idx;
1645
1646 last_idx = sw_cons +
1647 skb_shinfo(skb)->nr_frags + 1;
1648 last_ring_idx = sw_ring_cons +
1649 skb_shinfo(skb)->nr_frags + 1;
1650 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1651 last_idx++;
1652 }
1653 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1654 break;
1655 }
1656 }
1657#endif
1658 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1659 skb_headlen(skb), PCI_DMA_TODEVICE);
1660
1661 tx_buf->skb = NULL;
1662 last = skb_shinfo(skb)->nr_frags;
1663
1664 for (i = 0; i < last; i++) {
1665 sw_cons = NEXT_TX_BD(sw_cons);
1666
1667 pci_unmap_page(bp->pdev,
1668 pci_unmap_addr(
1669 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1670 mapping),
1671 skb_shinfo(skb)->frags[i].size,
1672 PCI_DMA_TODEVICE);
1673 }
1674
1675 sw_cons = NEXT_TX_BD(sw_cons);
1676
1677 tx_free_bd += last + 1;
1678
Michael Chan745720e2006-06-29 12:37:41 -07001679 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001680
Michael Chanf4e418f2005-11-04 08:53:48 -08001681 hw_cons = bp->hw_tx_cons =
1682 sblk->status_tx_quick_consumer_index0;
1683
Michael Chanb6016b72005-05-26 13:03:09 -07001684 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1685 hw_cons++;
1686 }
1687 }
1688
Michael Chane89bbf12005-08-25 15:36:58 -07001689 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001690 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1691 * before checking for netif_queue_stopped(). Without the
1692 * memory barrier, there is a small possibility that bnx2_start_xmit()
1693 * will miss it and cause the queue to be stopped forever.
1694 */
1695 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001696
Michael Chan2f8af122006-08-15 01:39:10 -07001697 if (unlikely(netif_queue_stopped(bp->dev)) &&
1698 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1699 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001700 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001701 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001702 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001703 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001704 }
Michael Chanb6016b72005-05-26 13:03:09 -07001705}
1706
1707static inline void
1708bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1709 u16 cons, u16 prod)
1710{
Michael Chan236b6392006-03-20 17:49:02 -08001711 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1712 struct rx_bd *cons_bd, *prod_bd;
1713
1714 cons_rx_buf = &bp->rx_buf_ring[cons];
1715 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001716
1717 pci_dma_sync_single_for_device(bp->pdev,
1718 pci_unmap_addr(cons_rx_buf, mapping),
1719 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1720
Michael Chan236b6392006-03-20 17:49:02 -08001721 bp->rx_prod_bseq += bp->rx_buf_use_size;
1722
1723 prod_rx_buf->skb = skb;
1724
1725 if (cons == prod)
1726 return;
1727
Michael Chanb6016b72005-05-26 13:03:09 -07001728 pci_unmap_addr_set(prod_rx_buf, mapping,
1729 pci_unmap_addr(cons_rx_buf, mapping));
1730
Michael Chan3fdfcc22006-03-20 17:49:49 -08001731 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1732 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001733 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1734 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001735}
1736
1737static int
1738bnx2_rx_int(struct bnx2 *bp, int budget)
1739{
Michael Chanf4e418f2005-11-04 08:53:48 -08001740 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001741 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1742 struct l2_fhdr *rx_hdr;
1743 int rx_pkt = 0;
1744
Michael Chanf4e418f2005-11-04 08:53:48 -08001745 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001746 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1747 hw_cons++;
1748 }
1749 sw_cons = bp->rx_cons;
1750 sw_prod = bp->rx_prod;
1751
1752 /* Memory barrier necessary as speculative reads of the rx
1753 * buffer can be ahead of the index in the status block
1754 */
1755 rmb();
1756 while (sw_cons != hw_cons) {
1757 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001758 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001759 struct sw_bd *rx_buf;
1760 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001761 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001762
1763 sw_ring_cons = RX_RING_IDX(sw_cons);
1764 sw_ring_prod = RX_RING_IDX(sw_prod);
1765
1766 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1767 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001768
1769 rx_buf->skb = NULL;
1770
1771 dma_addr = pci_unmap_addr(rx_buf, mapping);
1772
1773 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001774 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1775
1776 rx_hdr = (struct l2_fhdr *) skb->data;
1777 len = rx_hdr->l2_fhdr_pkt_len - 4;
1778
Michael Chanade2bfe2006-01-23 16:09:51 -08001779 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001780 (L2_FHDR_ERRORS_BAD_CRC |
1781 L2_FHDR_ERRORS_PHY_DECODE |
1782 L2_FHDR_ERRORS_ALIGNMENT |
1783 L2_FHDR_ERRORS_TOO_SHORT |
1784 L2_FHDR_ERRORS_GIANT_FRAME)) {
1785
1786 goto reuse_rx;
1787 }
1788
1789 /* Since we don't have a jumbo ring, copy small packets
1790 * if mtu > 1500
1791 */
1792 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1793 struct sk_buff *new_skb;
1794
Michael Chan932f3772006-08-15 01:39:36 -07001795 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001796 if (new_skb == NULL)
1797 goto reuse_rx;
1798
1799 /* aligned copy */
1800 memcpy(new_skb->data,
1801 skb->data + bp->rx_offset - 2,
1802 len + 2);
1803
1804 skb_reserve(new_skb, 2);
1805 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001806
1807 bnx2_reuse_rx_skb(bp, skb,
1808 sw_ring_cons, sw_ring_prod);
1809
1810 skb = new_skb;
1811 }
1812 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001813 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001814 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1815
1816 skb_reserve(skb, bp->rx_offset);
1817 skb_put(skb, len);
1818 }
1819 else {
1820reuse_rx:
1821 bnx2_reuse_rx_skb(bp, skb,
1822 sw_ring_cons, sw_ring_prod);
1823 goto next_rx;
1824 }
1825
1826 skb->protocol = eth_type_trans(skb, bp->dev);
1827
1828 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001829 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001830
Michael Chan745720e2006-06-29 12:37:41 -07001831 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001832 goto next_rx;
1833
1834 }
1835
Michael Chanb6016b72005-05-26 13:03:09 -07001836 skb->ip_summed = CHECKSUM_NONE;
1837 if (bp->rx_csum &&
1838 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1839 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1840
Michael Chanade2bfe2006-01-23 16:09:51 -08001841 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1842 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001843 skb->ip_summed = CHECKSUM_UNNECESSARY;
1844 }
1845
1846#ifdef BCM_VLAN
1847 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1848 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1849 rx_hdr->l2_fhdr_vlan_tag);
1850 }
1851 else
1852#endif
1853 netif_receive_skb(skb);
1854
1855 bp->dev->last_rx = jiffies;
1856 rx_pkt++;
1857
1858next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001859 sw_cons = NEXT_RX_BD(sw_cons);
1860 sw_prod = NEXT_RX_BD(sw_prod);
1861
1862 if ((rx_pkt == budget))
1863 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001864
1865 /* Refresh hw_cons to see if there is new work */
1866 if (sw_cons == hw_cons) {
1867 hw_cons = bp->hw_rx_cons =
1868 sblk->status_rx_quick_consumer_index0;
1869 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1870 hw_cons++;
1871 rmb();
1872 }
Michael Chanb6016b72005-05-26 13:03:09 -07001873 }
1874 bp->rx_cons = sw_cons;
1875 bp->rx_prod = sw_prod;
1876
1877 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1878
1879 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1880
1881 mmiowb();
1882
1883 return rx_pkt;
1884
1885}
1886
1887/* MSI ISR - The only difference between this and the INTx ISR
1888 * is that the MSI interrupt is always serviced.
1889 */
1890static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001891bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001892{
1893 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001894 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001895
Michael Chanc921e4c2005-09-08 13:15:32 -07001896 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001897 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1898 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1899 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1900
1901 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001902 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1903 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001904
Michael Chan73eef4c2005-08-25 15:39:15 -07001905 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001906
Michael Chan73eef4c2005-08-25 15:39:15 -07001907 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001908}
1909
1910static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001911bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001912{
1913 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001914 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001915
1916 /* When using INTx, it is possible for the interrupt to arrive
1917 * at the CPU before the status block posted prior to the
1918 * interrupt. Reading a register will flush the status block.
1919 * When using MSI, the MSI message will always complete after
1920 * the status block write.
1921 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001922 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001923 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1924 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001925 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001926
1927 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1928 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1929 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1930
1931 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001932 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1933 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001934
Michael Chan73eef4c2005-08-25 15:39:15 -07001935 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001936
Michael Chan73eef4c2005-08-25 15:39:15 -07001937 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001938}
1939
Michael Chanf4e418f2005-11-04 08:53:48 -08001940static inline int
1941bnx2_has_work(struct bnx2 *bp)
1942{
1943 struct status_block *sblk = bp->status_blk;
1944
1945 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1946 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1947 return 1;
1948
1949 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1950 bp->link_up)
1951 return 1;
1952
1953 return 0;
1954}
1955
Michael Chanb6016b72005-05-26 13:03:09 -07001956static int
1957bnx2_poll(struct net_device *dev, int *budget)
1958{
Michael Chan972ec0d2006-01-23 16:12:43 -08001959 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001960
Michael Chanb6016b72005-05-26 13:03:09 -07001961 if ((bp->status_blk->status_attn_bits &
1962 STATUS_ATTN_BITS_LINK_STATE) !=
1963 (bp->status_blk->status_attn_bits_ack &
1964 STATUS_ATTN_BITS_LINK_STATE)) {
1965
Michael Chanc770a652005-08-25 15:38:39 -07001966 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001967 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07001968 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08001969
1970 /* This is needed to take care of transient status
1971 * during link changes.
1972 */
1973 REG_WR(bp, BNX2_HC_COMMAND,
1974 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1975 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07001976 }
1977
Michael Chanf4e418f2005-11-04 08:53:48 -08001978 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07001979 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001980
Michael Chanf4e418f2005-11-04 08:53:48 -08001981 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07001982 int orig_budget = *budget;
1983 int work_done;
1984
1985 if (orig_budget > dev->quota)
1986 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001987
Michael Chanb6016b72005-05-26 13:03:09 -07001988 work_done = bnx2_rx_int(bp, orig_budget);
1989 *budget -= work_done;
1990 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07001991 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001992
Michael Chanf4e418f2005-11-04 08:53:48 -08001993 bp->last_status_idx = bp->status_blk->status_idx;
1994 rmb();
1995
1996 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001997 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08001998 if (likely(bp->flags & USING_MSI_FLAG)) {
1999 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2000 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2001 bp->last_status_idx);
2002 return 0;
2003 }
Michael Chanb6016b72005-05-26 13:03:09 -07002004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2006 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2007 bp->last_status_idx);
2008
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2011 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002012 return 0;
2013 }
2014
2015 return 1;
2016}
2017
Herbert Xu932ff272006-06-09 12:20:56 -07002018/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002019 * from set_multicast.
2020 */
2021static void
2022bnx2_set_rx_mode(struct net_device *dev)
2023{
Michael Chan972ec0d2006-01-23 16:12:43 -08002024 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002025 u32 rx_mode, sort_mode;
2026 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002027
Michael Chanc770a652005-08-25 15:38:39 -07002028 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002029
2030 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2031 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2032 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2033#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002034 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002035 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002036#else
Michael Chane29054f2006-01-23 16:06:06 -08002037 if (!(bp->flags & ASF_ENABLE_FLAG))
2038 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002039#endif
2040 if (dev->flags & IFF_PROMISC) {
2041 /* Promiscuous mode. */
2042 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002043 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2044 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002045 }
2046 else if (dev->flags & IFF_ALLMULTI) {
2047 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2048 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2049 0xffffffff);
2050 }
2051 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2052 }
2053 else {
2054 /* Accept one or more multicast(s). */
2055 struct dev_mc_list *mclist;
2056 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2057 u32 regidx;
2058 u32 bit;
2059 u32 crc;
2060
2061 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2062
2063 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2064 i++, mclist = mclist->next) {
2065
2066 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2067 bit = crc & 0xff;
2068 regidx = (bit & 0xe0) >> 5;
2069 bit &= 0x1f;
2070 mc_filter[regidx] |= (1 << bit);
2071 }
2072
2073 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2074 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2075 mc_filter[i]);
2076 }
2077
2078 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2079 }
2080
2081 if (rx_mode != bp->rx_mode) {
2082 bp->rx_mode = rx_mode;
2083 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2084 }
2085
2086 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2087 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2088 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2089
Michael Chanc770a652005-08-25 15:38:39 -07002090 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002091}
2092
Michael Chanfba9fe92006-06-12 22:21:25 -07002093#define FW_BUF_SIZE 0x8000
2094
2095static int
2096bnx2_gunzip_init(struct bnx2 *bp)
2097{
2098 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2099 goto gunzip_nomem1;
2100
2101 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2102 goto gunzip_nomem2;
2103
2104 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2105 if (bp->strm->workspace == NULL)
2106 goto gunzip_nomem3;
2107
2108 return 0;
2109
2110gunzip_nomem3:
2111 kfree(bp->strm);
2112 bp->strm = NULL;
2113
2114gunzip_nomem2:
2115 vfree(bp->gunzip_buf);
2116 bp->gunzip_buf = NULL;
2117
2118gunzip_nomem1:
2119 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2120 "uncompression.\n", bp->dev->name);
2121 return -ENOMEM;
2122}
2123
2124static void
2125bnx2_gunzip_end(struct bnx2 *bp)
2126{
2127 kfree(bp->strm->workspace);
2128
2129 kfree(bp->strm);
2130 bp->strm = NULL;
2131
2132 if (bp->gunzip_buf) {
2133 vfree(bp->gunzip_buf);
2134 bp->gunzip_buf = NULL;
2135 }
2136}
2137
2138static int
2139bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2140{
2141 int n, rc;
2142
2143 /* check gzip header */
2144 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2145 return -EINVAL;
2146
2147 n = 10;
2148
2149#define FNAME 0x8
2150 if (zbuf[3] & FNAME)
2151 while ((zbuf[n++] != 0) && (n < len));
2152
2153 bp->strm->next_in = zbuf + n;
2154 bp->strm->avail_in = len - n;
2155 bp->strm->next_out = bp->gunzip_buf;
2156 bp->strm->avail_out = FW_BUF_SIZE;
2157
2158 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2159 if (rc != Z_OK)
2160 return rc;
2161
2162 rc = zlib_inflate(bp->strm, Z_FINISH);
2163
2164 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2165 *outbuf = bp->gunzip_buf;
2166
2167 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2168 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2169 bp->dev->name, bp->strm->msg);
2170
2171 zlib_inflateEnd(bp->strm);
2172
2173 if (rc == Z_STREAM_END)
2174 return 0;
2175
2176 return rc;
2177}
2178
Michael Chanb6016b72005-05-26 13:03:09 -07002179static void
2180load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2181 u32 rv2p_proc)
2182{
2183 int i;
2184 u32 val;
2185
2186
2187 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002188 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002189 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002190 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002191 rv2p_code++;
2192
2193 if (rv2p_proc == RV2P_PROC1) {
2194 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2195 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2196 }
2197 else {
2198 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2199 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2200 }
2201 }
2202
2203 /* Reset the processor, un-stall is done later. */
2204 if (rv2p_proc == RV2P_PROC1) {
2205 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2206 }
2207 else {
2208 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2209 }
2210}
2211
2212static void
2213load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2214{
2215 u32 offset;
2216 u32 val;
2217
2218 /* Halt the CPU. */
2219 val = REG_RD_IND(bp, cpu_reg->mode);
2220 val |= cpu_reg->mode_value_halt;
2221 REG_WR_IND(bp, cpu_reg->mode, val);
2222 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2223
2224 /* Load the Text area. */
2225 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2226 if (fw->text) {
2227 int j;
2228
2229 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002230 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002231 }
2232 }
2233
2234 /* Load the Data area. */
2235 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2236 if (fw->data) {
2237 int j;
2238
2239 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2240 REG_WR_IND(bp, offset, fw->data[j]);
2241 }
2242 }
2243
2244 /* Load the SBSS area. */
2245 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2246 if (fw->sbss) {
2247 int j;
2248
2249 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2250 REG_WR_IND(bp, offset, fw->sbss[j]);
2251 }
2252 }
2253
2254 /* Load the BSS area. */
2255 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2256 if (fw->bss) {
2257 int j;
2258
2259 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2260 REG_WR_IND(bp, offset, fw->bss[j]);
2261 }
2262 }
2263
2264 /* Load the Read-Only area. */
2265 offset = cpu_reg->spad_base +
2266 (fw->rodata_addr - cpu_reg->mips_view_base);
2267 if (fw->rodata) {
2268 int j;
2269
2270 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2271 REG_WR_IND(bp, offset, fw->rodata[j]);
2272 }
2273 }
2274
2275 /* Clear the pre-fetch instruction. */
2276 REG_WR_IND(bp, cpu_reg->inst, 0);
2277 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2278
2279 /* Start the CPU. */
2280 val = REG_RD_IND(bp, cpu_reg->mode);
2281 val &= ~cpu_reg->mode_value_halt;
2282 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2283 REG_WR_IND(bp, cpu_reg->mode, val);
2284}
2285
Michael Chanfba9fe92006-06-12 22:21:25 -07002286static int
Michael Chanb6016b72005-05-26 13:03:09 -07002287bnx2_init_cpus(struct bnx2 *bp)
2288{
2289 struct cpu_reg cpu_reg;
2290 struct fw_info fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002291 int rc = 0;
2292 void *text;
2293 u32 text_len;
2294
2295 if ((rc = bnx2_gunzip_init(bp)) != 0)
2296 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002297
2298 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002299 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2300 &text_len);
2301 if (rc)
2302 goto init_cpu_err;
2303
2304 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2305
2306 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2307 &text_len);
2308 if (rc)
2309 goto init_cpu_err;
2310
2311 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002312
2313 /* Initialize the RX Processor. */
2314 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2315 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2316 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2317 cpu_reg.state = BNX2_RXP_CPU_STATE;
2318 cpu_reg.state_value_clear = 0xffffff;
2319 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2320 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2321 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2322 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2323 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2324 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2325 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002326
Michael Chanb6016b72005-05-26 13:03:09 -07002327 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2328 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2329 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2330 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2331
2332 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2333 fw.text_len = bnx2_RXP_b06FwTextLen;
2334 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002335
2336 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2337 &text, &text_len);
2338 if (rc)
2339 goto init_cpu_err;
2340
2341 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002342
2343 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2344 fw.data_len = bnx2_RXP_b06FwDataLen;
2345 fw.data_index = 0;
2346 fw.data = bnx2_RXP_b06FwData;
2347
2348 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2349 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2350 fw.sbss_index = 0;
2351 fw.sbss = bnx2_RXP_b06FwSbss;
2352
2353 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2354 fw.bss_len = bnx2_RXP_b06FwBssLen;
2355 fw.bss_index = 0;
2356 fw.bss = bnx2_RXP_b06FwBss;
2357
2358 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2359 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2360 fw.rodata_index = 0;
2361 fw.rodata = bnx2_RXP_b06FwRodata;
2362
2363 load_cpu_fw(bp, &cpu_reg, &fw);
2364
2365 /* Initialize the TX Processor. */
2366 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2367 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2368 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2369 cpu_reg.state = BNX2_TXP_CPU_STATE;
2370 cpu_reg.state_value_clear = 0xffffff;
2371 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2372 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2373 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2374 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2375 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2376 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2377 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002378
Michael Chanb6016b72005-05-26 13:03:09 -07002379 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2380 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2381 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2382 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2383
2384 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2385 fw.text_len = bnx2_TXP_b06FwTextLen;
2386 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002387
2388 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2389 &text, &text_len);
2390 if (rc)
2391 goto init_cpu_err;
2392
2393 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002394
2395 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2396 fw.data_len = bnx2_TXP_b06FwDataLen;
2397 fw.data_index = 0;
2398 fw.data = bnx2_TXP_b06FwData;
2399
2400 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2401 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2402 fw.sbss_index = 0;
2403 fw.sbss = bnx2_TXP_b06FwSbss;
2404
2405 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2406 fw.bss_len = bnx2_TXP_b06FwBssLen;
2407 fw.bss_index = 0;
2408 fw.bss = bnx2_TXP_b06FwBss;
2409
2410 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2411 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2412 fw.rodata_index = 0;
2413 fw.rodata = bnx2_TXP_b06FwRodata;
2414
2415 load_cpu_fw(bp, &cpu_reg, &fw);
2416
2417 /* Initialize the TX Patch-up Processor. */
2418 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2419 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2420 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2421 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2422 cpu_reg.state_value_clear = 0xffffff;
2423 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2424 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2425 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2426 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2427 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2428 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2429 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002430
Michael Chanb6016b72005-05-26 13:03:09 -07002431 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2432 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2433 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2434 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2435
2436 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2437 fw.text_len = bnx2_TPAT_b06FwTextLen;
2438 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002439
2440 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2441 &text, &text_len);
2442 if (rc)
2443 goto init_cpu_err;
2444
2445 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002446
2447 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2448 fw.data_len = bnx2_TPAT_b06FwDataLen;
2449 fw.data_index = 0;
2450 fw.data = bnx2_TPAT_b06FwData;
2451
2452 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2453 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2454 fw.sbss_index = 0;
2455 fw.sbss = bnx2_TPAT_b06FwSbss;
2456
2457 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2458 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2459 fw.bss_index = 0;
2460 fw.bss = bnx2_TPAT_b06FwBss;
2461
2462 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2463 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2464 fw.rodata_index = 0;
2465 fw.rodata = bnx2_TPAT_b06FwRodata;
2466
2467 load_cpu_fw(bp, &cpu_reg, &fw);
2468
2469 /* Initialize the Completion Processor. */
2470 cpu_reg.mode = BNX2_COM_CPU_MODE;
2471 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2472 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2473 cpu_reg.state = BNX2_COM_CPU_STATE;
2474 cpu_reg.state_value_clear = 0xffffff;
2475 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2476 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2477 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2478 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2479 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2480 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2481 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002482
Michael Chanb6016b72005-05-26 13:03:09 -07002483 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2484 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2485 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2486 fw.start_addr = bnx2_COM_b06FwStartAddr;
2487
2488 fw.text_addr = bnx2_COM_b06FwTextAddr;
2489 fw.text_len = bnx2_COM_b06FwTextLen;
2490 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002491
2492 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2493 &text, &text_len);
2494 if (rc)
2495 goto init_cpu_err;
2496
2497 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002498
2499 fw.data_addr = bnx2_COM_b06FwDataAddr;
2500 fw.data_len = bnx2_COM_b06FwDataLen;
2501 fw.data_index = 0;
2502 fw.data = bnx2_COM_b06FwData;
2503
2504 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2505 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2506 fw.sbss_index = 0;
2507 fw.sbss = bnx2_COM_b06FwSbss;
2508
2509 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2510 fw.bss_len = bnx2_COM_b06FwBssLen;
2511 fw.bss_index = 0;
2512 fw.bss = bnx2_COM_b06FwBss;
2513
2514 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2515 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2516 fw.rodata_index = 0;
2517 fw.rodata = bnx2_COM_b06FwRodata;
2518
2519 load_cpu_fw(bp, &cpu_reg, &fw);
2520
Michael Chanfba9fe92006-06-12 22:21:25 -07002521init_cpu_err:
2522 bnx2_gunzip_end(bp);
2523 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002524}
2525
2526static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002527bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002528{
2529 u16 pmcsr;
2530
2531 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2532
2533 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002534 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002535 u32 val;
2536
2537 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2538 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2539 PCI_PM_CTRL_PME_STATUS);
2540
2541 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2542 /* delay required during transition out of D3hot */
2543 msleep(20);
2544
2545 val = REG_RD(bp, BNX2_EMAC_MODE);
2546 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2547 val &= ~BNX2_EMAC_MODE_MPKT;
2548 REG_WR(bp, BNX2_EMAC_MODE, val);
2549
2550 val = REG_RD(bp, BNX2_RPM_CONFIG);
2551 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2552 REG_WR(bp, BNX2_RPM_CONFIG, val);
2553 break;
2554 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002555 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002556 int i;
2557 u32 val, wol_msg;
2558
2559 if (bp->wol) {
2560 u32 advertising;
2561 u8 autoneg;
2562
2563 autoneg = bp->autoneg;
2564 advertising = bp->advertising;
2565
2566 bp->autoneg = AUTONEG_SPEED;
2567 bp->advertising = ADVERTISED_10baseT_Half |
2568 ADVERTISED_10baseT_Full |
2569 ADVERTISED_100baseT_Half |
2570 ADVERTISED_100baseT_Full |
2571 ADVERTISED_Autoneg;
2572
2573 bnx2_setup_copper_phy(bp);
2574
2575 bp->autoneg = autoneg;
2576 bp->advertising = advertising;
2577
2578 bnx2_set_mac_addr(bp);
2579
2580 val = REG_RD(bp, BNX2_EMAC_MODE);
2581
2582 /* Enable port mode. */
2583 val &= ~BNX2_EMAC_MODE_PORT;
2584 val |= BNX2_EMAC_MODE_PORT_MII |
2585 BNX2_EMAC_MODE_MPKT_RCVD |
2586 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002587 BNX2_EMAC_MODE_MPKT;
2588
2589 REG_WR(bp, BNX2_EMAC_MODE, val);
2590
2591 /* receive all multicast */
2592 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2593 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2594 0xffffffff);
2595 }
2596 REG_WR(bp, BNX2_EMAC_RX_MODE,
2597 BNX2_EMAC_RX_MODE_SORT_MODE);
2598
2599 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2600 BNX2_RPM_SORT_USER0_MC_EN;
2601 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2602 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2603 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2604 BNX2_RPM_SORT_USER0_ENA);
2605
2606 /* Need to enable EMAC and RPM for WOL. */
2607 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2608 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2609 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2610 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2611
2612 val = REG_RD(bp, BNX2_RPM_CONFIG);
2613 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2614 REG_WR(bp, BNX2_RPM_CONFIG, val);
2615
2616 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2617 }
2618 else {
2619 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2620 }
2621
Michael Chandda1e392006-01-23 16:08:14 -08002622 if (!(bp->flags & NO_WOL_FLAG))
2623 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002624
2625 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2626 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2627 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2628
2629 if (bp->wol)
2630 pmcsr |= 3;
2631 }
2632 else {
2633 pmcsr |= 3;
2634 }
2635 if (bp->wol) {
2636 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2637 }
2638 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2639 pmcsr);
2640
2641 /* No more memory access after this point until
2642 * device is brought back to D0.
2643 */
2644 udelay(50);
2645 break;
2646 }
2647 default:
2648 return -EINVAL;
2649 }
2650 return 0;
2651}
2652
2653static int
2654bnx2_acquire_nvram_lock(struct bnx2 *bp)
2655{
2656 u32 val;
2657 int j;
2658
2659 /* Request access to the flash interface. */
2660 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2661 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2662 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2663 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2664 break;
2665
2666 udelay(5);
2667 }
2668
2669 if (j >= NVRAM_TIMEOUT_COUNT)
2670 return -EBUSY;
2671
2672 return 0;
2673}
2674
2675static int
2676bnx2_release_nvram_lock(struct bnx2 *bp)
2677{
2678 int j;
2679 u32 val;
2680
2681 /* Relinquish nvram interface. */
2682 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2683
2684 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2685 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2686 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2687 break;
2688
2689 udelay(5);
2690 }
2691
2692 if (j >= NVRAM_TIMEOUT_COUNT)
2693 return -EBUSY;
2694
2695 return 0;
2696}
2697
2698
2699static int
2700bnx2_enable_nvram_write(struct bnx2 *bp)
2701{
2702 u32 val;
2703
2704 val = REG_RD(bp, BNX2_MISC_CFG);
2705 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2706
2707 if (!bp->flash_info->buffered) {
2708 int j;
2709
2710 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2711 REG_WR(bp, BNX2_NVM_COMMAND,
2712 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2713
2714 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2715 udelay(5);
2716
2717 val = REG_RD(bp, BNX2_NVM_COMMAND);
2718 if (val & BNX2_NVM_COMMAND_DONE)
2719 break;
2720 }
2721
2722 if (j >= NVRAM_TIMEOUT_COUNT)
2723 return -EBUSY;
2724 }
2725 return 0;
2726}
2727
2728static void
2729bnx2_disable_nvram_write(struct bnx2 *bp)
2730{
2731 u32 val;
2732
2733 val = REG_RD(bp, BNX2_MISC_CFG);
2734 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2735}
2736
2737
2738static void
2739bnx2_enable_nvram_access(struct bnx2 *bp)
2740{
2741 u32 val;
2742
2743 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2744 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002745 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002746 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2747}
2748
2749static void
2750bnx2_disable_nvram_access(struct bnx2 *bp)
2751{
2752 u32 val;
2753
2754 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2755 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002756 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002757 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2758 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2759}
2760
2761static int
2762bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2763{
2764 u32 cmd;
2765 int j;
2766
2767 if (bp->flash_info->buffered)
2768 /* Buffered flash, no erase needed */
2769 return 0;
2770
2771 /* Build an erase command */
2772 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2773 BNX2_NVM_COMMAND_DOIT;
2774
2775 /* Need to clear DONE bit separately. */
2776 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2777
2778 /* Address of the NVRAM to read from. */
2779 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2780
2781 /* Issue an erase command. */
2782 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2783
2784 /* Wait for completion. */
2785 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2786 u32 val;
2787
2788 udelay(5);
2789
2790 val = REG_RD(bp, BNX2_NVM_COMMAND);
2791 if (val & BNX2_NVM_COMMAND_DONE)
2792 break;
2793 }
2794
2795 if (j >= NVRAM_TIMEOUT_COUNT)
2796 return -EBUSY;
2797
2798 return 0;
2799}
2800
2801static int
2802bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2803{
2804 u32 cmd;
2805 int j;
2806
2807 /* Build the command word. */
2808 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2809
2810 /* Calculate an offset of a buffered flash. */
2811 if (bp->flash_info->buffered) {
2812 offset = ((offset / bp->flash_info->page_size) <<
2813 bp->flash_info->page_bits) +
2814 (offset % bp->flash_info->page_size);
2815 }
2816
2817 /* Need to clear DONE bit separately. */
2818 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2819
2820 /* Address of the NVRAM to read from. */
2821 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2822
2823 /* Issue a read command. */
2824 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2825
2826 /* Wait for completion. */
2827 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2828 u32 val;
2829
2830 udelay(5);
2831
2832 val = REG_RD(bp, BNX2_NVM_COMMAND);
2833 if (val & BNX2_NVM_COMMAND_DONE) {
2834 val = REG_RD(bp, BNX2_NVM_READ);
2835
2836 val = be32_to_cpu(val);
2837 memcpy(ret_val, &val, 4);
2838 break;
2839 }
2840 }
2841 if (j >= NVRAM_TIMEOUT_COUNT)
2842 return -EBUSY;
2843
2844 return 0;
2845}
2846
2847
2848static int
2849bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2850{
2851 u32 cmd, val32;
2852 int j;
2853
2854 /* Build the command word. */
2855 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2856
2857 /* Calculate an offset of a buffered flash. */
2858 if (bp->flash_info->buffered) {
2859 offset = ((offset / bp->flash_info->page_size) <<
2860 bp->flash_info->page_bits) +
2861 (offset % bp->flash_info->page_size);
2862 }
2863
2864 /* Need to clear DONE bit separately. */
2865 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2866
2867 memcpy(&val32, val, 4);
2868 val32 = cpu_to_be32(val32);
2869
2870 /* Write the data. */
2871 REG_WR(bp, BNX2_NVM_WRITE, val32);
2872
2873 /* Address of the NVRAM to write to. */
2874 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2875
2876 /* Issue the write command. */
2877 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2878
2879 /* Wait for completion. */
2880 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2881 udelay(5);
2882
2883 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2884 break;
2885 }
2886 if (j >= NVRAM_TIMEOUT_COUNT)
2887 return -EBUSY;
2888
2889 return 0;
2890}
2891
2892static int
2893bnx2_init_nvram(struct bnx2 *bp)
2894{
2895 u32 val;
2896 int j, entry_count, rc;
2897 struct flash_spec *flash;
2898
2899 /* Determine the selected interface. */
2900 val = REG_RD(bp, BNX2_NVM_CFG1);
2901
2902 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2903
2904 rc = 0;
2905 if (val & 0x40000000) {
2906
2907 /* Flash interface has been reconfigured */
2908 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002909 j++, flash++) {
2910 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2911 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002912 bp->flash_info = flash;
2913 break;
2914 }
2915 }
2916 }
2917 else {
Michael Chan37137702005-11-04 08:49:17 -08002918 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002919 /* Not yet been reconfigured */
2920
Michael Chan37137702005-11-04 08:49:17 -08002921 if (val & (1 << 23))
2922 mask = FLASH_BACKUP_STRAP_MASK;
2923 else
2924 mask = FLASH_STRAP_MASK;
2925
Michael Chanb6016b72005-05-26 13:03:09 -07002926 for (j = 0, flash = &flash_table[0]; j < entry_count;
2927 j++, flash++) {
2928
Michael Chan37137702005-11-04 08:49:17 -08002929 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002930 bp->flash_info = flash;
2931
2932 /* Request access to the flash interface. */
2933 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2934 return rc;
2935
2936 /* Enable access to flash interface */
2937 bnx2_enable_nvram_access(bp);
2938
2939 /* Reconfigure the flash interface */
2940 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2941 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2942 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2943 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2944
2945 /* Disable access to flash interface */
2946 bnx2_disable_nvram_access(bp);
2947 bnx2_release_nvram_lock(bp);
2948
2949 break;
2950 }
2951 }
2952 } /* if (val & 0x40000000) */
2953
2954 if (j == entry_count) {
2955 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002956 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002957 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002958 }
2959
Michael Chan1122db72006-01-23 16:11:42 -08002960 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2961 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2962 if (val)
2963 bp->flash_size = val;
2964 else
2965 bp->flash_size = bp->flash_info->total_size;
2966
Michael Chanb6016b72005-05-26 13:03:09 -07002967 return rc;
2968}
2969
2970static int
2971bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2972 int buf_size)
2973{
2974 int rc = 0;
2975 u32 cmd_flags, offset32, len32, extra;
2976
2977 if (buf_size == 0)
2978 return 0;
2979
2980 /* Request access to the flash interface. */
2981 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2982 return rc;
2983
2984 /* Enable access to flash interface */
2985 bnx2_enable_nvram_access(bp);
2986
2987 len32 = buf_size;
2988 offset32 = offset;
2989 extra = 0;
2990
2991 cmd_flags = 0;
2992
2993 if (offset32 & 3) {
2994 u8 buf[4];
2995 u32 pre_len;
2996
2997 offset32 &= ~3;
2998 pre_len = 4 - (offset & 3);
2999
3000 if (pre_len >= len32) {
3001 pre_len = len32;
3002 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3003 BNX2_NVM_COMMAND_LAST;
3004 }
3005 else {
3006 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3007 }
3008
3009 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3010
3011 if (rc)
3012 return rc;
3013
3014 memcpy(ret_buf, buf + (offset & 3), pre_len);
3015
3016 offset32 += 4;
3017 ret_buf += pre_len;
3018 len32 -= pre_len;
3019 }
3020 if (len32 & 3) {
3021 extra = 4 - (len32 & 3);
3022 len32 = (len32 + 4) & ~3;
3023 }
3024
3025 if (len32 == 4) {
3026 u8 buf[4];
3027
3028 if (cmd_flags)
3029 cmd_flags = BNX2_NVM_COMMAND_LAST;
3030 else
3031 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3032 BNX2_NVM_COMMAND_LAST;
3033
3034 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3035
3036 memcpy(ret_buf, buf, 4 - extra);
3037 }
3038 else if (len32 > 0) {
3039 u8 buf[4];
3040
3041 /* Read the first word. */
3042 if (cmd_flags)
3043 cmd_flags = 0;
3044 else
3045 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3046
3047 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3048
3049 /* Advance to the next dword. */
3050 offset32 += 4;
3051 ret_buf += 4;
3052 len32 -= 4;
3053
3054 while (len32 > 4 && rc == 0) {
3055 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3056
3057 /* Advance to the next dword. */
3058 offset32 += 4;
3059 ret_buf += 4;
3060 len32 -= 4;
3061 }
3062
3063 if (rc)
3064 return rc;
3065
3066 cmd_flags = BNX2_NVM_COMMAND_LAST;
3067 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3068
3069 memcpy(ret_buf, buf, 4 - extra);
3070 }
3071
3072 /* Disable access to flash interface */
3073 bnx2_disable_nvram_access(bp);
3074
3075 bnx2_release_nvram_lock(bp);
3076
3077 return rc;
3078}
3079
3080static int
3081bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3082 int buf_size)
3083{
3084 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07003085 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003086 int rc = 0;
3087 int align_start, align_end;
3088
3089 buf = data_buf;
3090 offset32 = offset;
3091 len32 = buf_size;
3092 align_start = align_end = 0;
3093
3094 if ((align_start = (offset32 & 3))) {
3095 offset32 &= ~3;
3096 len32 += align_start;
3097 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3098 return rc;
3099 }
3100
3101 if (len32 & 3) {
3102 if ((len32 > 4) || !align_start) {
3103 align_end = 4 - (len32 & 3);
3104 len32 += align_end;
3105 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3106 end, 4))) {
3107 return rc;
3108 }
3109 }
3110 }
3111
3112 if (align_start || align_end) {
3113 buf = kmalloc(len32, GFP_KERNEL);
3114 if (buf == 0)
3115 return -ENOMEM;
3116 if (align_start) {
3117 memcpy(buf, start, 4);
3118 }
3119 if (align_end) {
3120 memcpy(buf + len32 - 4, end, 4);
3121 }
3122 memcpy(buf + align_start, data_buf, buf_size);
3123 }
3124
Michael Chanae181bc2006-05-22 16:39:20 -07003125 if (bp->flash_info->buffered == 0) {
3126 flash_buffer = kmalloc(264, GFP_KERNEL);
3127 if (flash_buffer == NULL) {
3128 rc = -ENOMEM;
3129 goto nvram_write_end;
3130 }
3131 }
3132
Michael Chanb6016b72005-05-26 13:03:09 -07003133 written = 0;
3134 while ((written < len32) && (rc == 0)) {
3135 u32 page_start, page_end, data_start, data_end;
3136 u32 addr, cmd_flags;
3137 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003138
3139 /* Find the page_start addr */
3140 page_start = offset32 + written;
3141 page_start -= (page_start % bp->flash_info->page_size);
3142 /* Find the page_end addr */
3143 page_end = page_start + bp->flash_info->page_size;
3144 /* Find the data_start addr */
3145 data_start = (written == 0) ? offset32 : page_start;
3146 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003147 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003148 (offset32 + len32) : page_end;
3149
3150 /* Request access to the flash interface. */
3151 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3152 goto nvram_write_end;
3153
3154 /* Enable access to flash interface */
3155 bnx2_enable_nvram_access(bp);
3156
3157 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3158 if (bp->flash_info->buffered == 0) {
3159 int j;
3160
3161 /* Read the whole page into the buffer
3162 * (non-buffer flash only) */
3163 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3164 if (j == (bp->flash_info->page_size - 4)) {
3165 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3166 }
3167 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003168 page_start + j,
3169 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003170 cmd_flags);
3171
3172 if (rc)
3173 goto nvram_write_end;
3174
3175 cmd_flags = 0;
3176 }
3177 }
3178
3179 /* Enable writes to flash interface (unlock write-protect) */
3180 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3181 goto nvram_write_end;
3182
3183 /* Erase the page */
3184 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3185 goto nvram_write_end;
3186
3187 /* Re-enable the write again for the actual write */
3188 bnx2_enable_nvram_write(bp);
3189
3190 /* Loop to write back the buffer data from page_start to
3191 * data_start */
3192 i = 0;
3193 if (bp->flash_info->buffered == 0) {
3194 for (addr = page_start; addr < data_start;
3195 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003196
Michael Chanb6016b72005-05-26 13:03:09 -07003197 rc = bnx2_nvram_write_dword(bp, addr,
3198 &flash_buffer[i], cmd_flags);
3199
3200 if (rc != 0)
3201 goto nvram_write_end;
3202
3203 cmd_flags = 0;
3204 }
3205 }
3206
3207 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003208 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003209 if ((addr == page_end - 4) ||
3210 ((bp->flash_info->buffered) &&
3211 (addr == data_end - 4))) {
3212
3213 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3214 }
3215 rc = bnx2_nvram_write_dword(bp, addr, buf,
3216 cmd_flags);
3217
3218 if (rc != 0)
3219 goto nvram_write_end;
3220
3221 cmd_flags = 0;
3222 buf += 4;
3223 }
3224
3225 /* Loop to write back the buffer data from data_end
3226 * to page_end */
3227 if (bp->flash_info->buffered == 0) {
3228 for (addr = data_end; addr < page_end;
3229 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003230
Michael Chanb6016b72005-05-26 13:03:09 -07003231 if (addr == page_end-4) {
3232 cmd_flags = BNX2_NVM_COMMAND_LAST;
3233 }
3234 rc = bnx2_nvram_write_dword(bp, addr,
3235 &flash_buffer[i], cmd_flags);
3236
3237 if (rc != 0)
3238 goto nvram_write_end;
3239
3240 cmd_flags = 0;
3241 }
3242 }
3243
3244 /* Disable writes to flash interface (lock write-protect) */
3245 bnx2_disable_nvram_write(bp);
3246
3247 /* Disable access to flash interface */
3248 bnx2_disable_nvram_access(bp);
3249 bnx2_release_nvram_lock(bp);
3250
3251 /* Increment written */
3252 written += data_end - data_start;
3253 }
3254
3255nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003256 if (bp->flash_info->buffered == 0)
3257 kfree(flash_buffer);
3258
Michael Chanb6016b72005-05-26 13:03:09 -07003259 if (align_start || align_end)
3260 kfree(buf);
3261 return rc;
3262}
3263
3264static int
3265bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3266{
3267 u32 val;
3268 int i, rc = 0;
3269
3270 /* Wait for the current PCI transaction to complete before
3271 * issuing a reset. */
3272 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3273 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3274 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3277 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3278 udelay(5);
3279
Michael Chanb090ae22006-01-23 16:07:10 -08003280 /* Wait for the firmware to tell us it is ok to issue a reset. */
3281 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3282
Michael Chanb6016b72005-05-26 13:03:09 -07003283 /* Deposit a driver reset signature so the firmware knows that
3284 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003285 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003286 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3287
Michael Chanb6016b72005-05-26 13:03:09 -07003288 /* Do a dummy read to force the chip to complete all current transaction
3289 * before we issue a reset. */
3290 val = REG_RD(bp, BNX2_MISC_ID);
3291
3292 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3293 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3294 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3295
3296 /* Chip reset. */
3297 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3298
3299 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3300 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3301 msleep(15);
3302
3303 /* Reset takes approximate 30 usec */
3304 for (i = 0; i < 10; i++) {
3305 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3306 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3307 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3308 break;
3309 }
3310 udelay(10);
3311 }
3312
3313 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3314 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3315 printk(KERN_ERR PFX "Chip reset did not complete\n");
3316 return -EBUSY;
3317 }
3318
3319 /* Make sure byte swapping is properly configured. */
3320 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3321 if (val != 0x01020304) {
3322 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3323 return -ENODEV;
3324 }
3325
Michael Chanb6016b72005-05-26 13:03:09 -07003326 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003327 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3328 if (rc)
3329 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003330
3331 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3332 /* Adjust the voltage regular to two steps lower. The default
3333 * of this register is 0x0000000e. */
3334 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3335
3336 /* Remove bad rbuf memory from the free pool. */
3337 rc = bnx2_alloc_bad_rbuf(bp);
3338 }
3339
3340 return rc;
3341}
3342
3343static int
3344bnx2_init_chip(struct bnx2 *bp)
3345{
3346 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003347 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003348
3349 /* Make sure the interrupt is not active. */
3350 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3351
3352 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3353 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3354#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003355 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003356#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003357 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003358 DMA_READ_CHANS << 12 |
3359 DMA_WRITE_CHANS << 16;
3360
3361 val |= (0x2 << 20) | (1 << 11);
3362
Michael Chandda1e392006-01-23 16:08:14 -08003363 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003364 val |= (1 << 23);
3365
3366 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3367 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3368 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3369
3370 REG_WR(bp, BNX2_DMA_CONFIG, val);
3371
3372 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3373 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3374 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3375 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3376 }
3377
3378 if (bp->flags & PCIX_FLAG) {
3379 u16 val16;
3380
3381 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3382 &val16);
3383 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3384 val16 & ~PCI_X_CMD_ERO);
3385 }
3386
3387 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3388 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3389 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3390 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3391
3392 /* Initialize context mapping and zero out the quick contexts. The
3393 * context block must have already been enabled. */
3394 bnx2_init_context(bp);
3395
Michael Chanfba9fe92006-06-12 22:21:25 -07003396 if ((rc = bnx2_init_cpus(bp)) != 0)
3397 return rc;
3398
Michael Chanb6016b72005-05-26 13:03:09 -07003399 bnx2_init_nvram(bp);
3400
3401 bnx2_set_mac_addr(bp);
3402
3403 val = REG_RD(bp, BNX2_MQ_CONFIG);
3404 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3405 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3406 REG_WR(bp, BNX2_MQ_CONFIG, val);
3407
3408 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3409 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3410 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3411
3412 val = (BCM_PAGE_BITS - 8) << 24;
3413 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3414
3415 /* Configure page size. */
3416 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3417 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3418 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3419 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3420
3421 val = bp->mac_addr[0] +
3422 (bp->mac_addr[1] << 8) +
3423 (bp->mac_addr[2] << 16) +
3424 bp->mac_addr[3] +
3425 (bp->mac_addr[4] << 8) +
3426 (bp->mac_addr[5] << 16);
3427 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3428
3429 /* Program the MTU. Also include 4 bytes for CRC32. */
3430 val = bp->dev->mtu + ETH_HLEN + 4;
3431 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3432 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3433 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3434
3435 bp->last_status_idx = 0;
3436 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3437
3438 /* Set up how to generate a link change interrupt. */
3439 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3440
3441 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3442 (u64) bp->status_blk_mapping & 0xffffffff);
3443 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3444
3445 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3446 (u64) bp->stats_blk_mapping & 0xffffffff);
3447 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3448 (u64) bp->stats_blk_mapping >> 32);
3449
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003450 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003451 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3452
3453 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3454 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3455
3456 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3457 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3458
3459 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3460
3461 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3462
3463 REG_WR(bp, BNX2_HC_COM_TICKS,
3464 (bp->com_ticks_int << 16) | bp->com_ticks);
3465
3466 REG_WR(bp, BNX2_HC_CMD_TICKS,
3467 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3468
3469 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3470 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3471
3472 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3473 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3474 else {
3475 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3476 BNX2_HC_CONFIG_TX_TMR_MODE |
3477 BNX2_HC_CONFIG_COLLECT_STATS);
3478 }
3479
3480 /* Clear internal stats counters. */
3481 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3482
3483 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3484
Michael Chane29054f2006-01-23 16:06:06 -08003485 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3486 BNX2_PORT_FEATURE_ASF_ENABLED)
3487 bp->flags |= ASF_ENABLE_FLAG;
3488
Michael Chanb6016b72005-05-26 13:03:09 -07003489 /* Initialize the receive filter. */
3490 bnx2_set_rx_mode(bp->dev);
3491
Michael Chanb090ae22006-01-23 16:07:10 -08003492 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3493 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003494
3495 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3496 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3497
3498 udelay(20);
3499
Michael Chanbf5295b2006-03-23 01:11:56 -08003500 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3501
Michael Chanb090ae22006-01-23 16:07:10 -08003502 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003503}
3504
3505
3506static void
3507bnx2_init_tx_ring(struct bnx2 *bp)
3508{
3509 struct tx_bd *txbd;
3510 u32 val;
3511
Michael Chan2f8af122006-08-15 01:39:10 -07003512 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3513
Michael Chanb6016b72005-05-26 13:03:09 -07003514 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003515
Michael Chanb6016b72005-05-26 13:03:09 -07003516 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3517 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3518
3519 bp->tx_prod = 0;
3520 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003521 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003522 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003523
Michael Chanb6016b72005-05-26 13:03:09 -07003524 val = BNX2_L2CTX_TYPE_TYPE_L2;
3525 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3526 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3527
3528 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3529 val |= 8 << 16;
3530 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3531
3532 val = (u64) bp->tx_desc_mapping >> 32;
3533 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3534
3535 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3536 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3537}
3538
3539static void
3540bnx2_init_rx_ring(struct bnx2 *bp)
3541{
3542 struct rx_bd *rxbd;
3543 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003544 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003545 u32 val;
3546
3547 /* 8 for CRC and VLAN */
3548 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3549 /* 8 for alignment */
3550 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3551
3552 ring_prod = prod = bp->rx_prod = 0;
3553 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003554 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003555 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003556
Michael Chan13daffa2006-03-20 17:49:20 -08003557 for (i = 0; i < bp->rx_max_ring; i++) {
3558 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003559
Michael Chan13daffa2006-03-20 17:49:20 -08003560 rxbd = &bp->rx_desc_ring[i][0];
3561 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3562 rxbd->rx_bd_len = bp->rx_buf_use_size;
3563 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3564 }
3565 if (i == (bp->rx_max_ring - 1))
3566 j = 0;
3567 else
3568 j = i + 1;
3569 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3570 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3571 0xffffffff;
3572 }
Michael Chanb6016b72005-05-26 13:03:09 -07003573
3574 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3575 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3576 val |= 0x02 << 8;
3577 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3578
Michael Chan13daffa2006-03-20 17:49:20 -08003579 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003580 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3581
Michael Chan13daffa2006-03-20 17:49:20 -08003582 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003583 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3584
Michael Chan236b6392006-03-20 17:49:02 -08003585 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003586 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3587 break;
3588 }
3589 prod = NEXT_RX_BD(prod);
3590 ring_prod = RX_RING_IDX(prod);
3591 }
3592 bp->rx_prod = prod;
3593
3594 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3595
3596 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3597}
3598
3599static void
Michael Chan13daffa2006-03-20 17:49:20 -08003600bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3601{
3602 u32 num_rings, max;
3603
3604 bp->rx_ring_size = size;
3605 num_rings = 1;
3606 while (size > MAX_RX_DESC_CNT) {
3607 size -= MAX_RX_DESC_CNT;
3608 num_rings++;
3609 }
3610 /* round to next power of 2 */
3611 max = MAX_RX_RINGS;
3612 while ((max & num_rings) == 0)
3613 max >>= 1;
3614
3615 if (num_rings != max)
3616 max <<= 1;
3617
3618 bp->rx_max_ring = max;
3619 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3620}
3621
3622static void
Michael Chanb6016b72005-05-26 13:03:09 -07003623bnx2_free_tx_skbs(struct bnx2 *bp)
3624{
3625 int i;
3626
3627 if (bp->tx_buf_ring == NULL)
3628 return;
3629
3630 for (i = 0; i < TX_DESC_CNT; ) {
3631 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3632 struct sk_buff *skb = tx_buf->skb;
3633 int j, last;
3634
3635 if (skb == NULL) {
3636 i++;
3637 continue;
3638 }
3639
3640 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3641 skb_headlen(skb), PCI_DMA_TODEVICE);
3642
3643 tx_buf->skb = NULL;
3644
3645 last = skb_shinfo(skb)->nr_frags;
3646 for (j = 0; j < last; j++) {
3647 tx_buf = &bp->tx_buf_ring[i + j + 1];
3648 pci_unmap_page(bp->pdev,
3649 pci_unmap_addr(tx_buf, mapping),
3650 skb_shinfo(skb)->frags[j].size,
3651 PCI_DMA_TODEVICE);
3652 }
Michael Chan745720e2006-06-29 12:37:41 -07003653 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003654 i += j + 1;
3655 }
3656
3657}
3658
3659static void
3660bnx2_free_rx_skbs(struct bnx2 *bp)
3661{
3662 int i;
3663
3664 if (bp->rx_buf_ring == NULL)
3665 return;
3666
Michael Chan13daffa2006-03-20 17:49:20 -08003667 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003668 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3669 struct sk_buff *skb = rx_buf->skb;
3670
Michael Chan05d0f1c2005-11-04 08:53:48 -08003671 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003672 continue;
3673
3674 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3675 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3676
3677 rx_buf->skb = NULL;
3678
Michael Chan745720e2006-06-29 12:37:41 -07003679 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003680 }
3681}
3682
3683static void
3684bnx2_free_skbs(struct bnx2 *bp)
3685{
3686 bnx2_free_tx_skbs(bp);
3687 bnx2_free_rx_skbs(bp);
3688}
3689
3690static int
3691bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3692{
3693 int rc;
3694
3695 rc = bnx2_reset_chip(bp, reset_code);
3696 bnx2_free_skbs(bp);
3697 if (rc)
3698 return rc;
3699
Michael Chanfba9fe92006-06-12 22:21:25 -07003700 if ((rc = bnx2_init_chip(bp)) != 0)
3701 return rc;
3702
Michael Chanb6016b72005-05-26 13:03:09 -07003703 bnx2_init_tx_ring(bp);
3704 bnx2_init_rx_ring(bp);
3705 return 0;
3706}
3707
3708static int
3709bnx2_init_nic(struct bnx2 *bp)
3710{
3711 int rc;
3712
3713 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3714 return rc;
3715
3716 bnx2_init_phy(bp);
3717 bnx2_set_link(bp);
3718 return 0;
3719}
3720
3721static int
3722bnx2_test_registers(struct bnx2 *bp)
3723{
3724 int ret;
3725 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003726 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003727 u16 offset;
3728 u16 flags;
3729 u32 rw_mask;
3730 u32 ro_mask;
3731 } reg_tbl[] = {
3732 { 0x006c, 0, 0x00000000, 0x0000003f },
3733 { 0x0090, 0, 0xffffffff, 0x00000000 },
3734 { 0x0094, 0, 0x00000000, 0x00000000 },
3735
3736 { 0x0404, 0, 0x00003f00, 0x00000000 },
3737 { 0x0418, 0, 0x00000000, 0xffffffff },
3738 { 0x041c, 0, 0x00000000, 0xffffffff },
3739 { 0x0420, 0, 0x00000000, 0x80ffffff },
3740 { 0x0424, 0, 0x00000000, 0x00000000 },
3741 { 0x0428, 0, 0x00000000, 0x00000001 },
3742 { 0x0450, 0, 0x00000000, 0x0000ffff },
3743 { 0x0454, 0, 0x00000000, 0xffffffff },
3744 { 0x0458, 0, 0x00000000, 0xffffffff },
3745
3746 { 0x0808, 0, 0x00000000, 0xffffffff },
3747 { 0x0854, 0, 0x00000000, 0xffffffff },
3748 { 0x0868, 0, 0x00000000, 0x77777777 },
3749 { 0x086c, 0, 0x00000000, 0x77777777 },
3750 { 0x0870, 0, 0x00000000, 0x77777777 },
3751 { 0x0874, 0, 0x00000000, 0x77777777 },
3752
3753 { 0x0c00, 0, 0x00000000, 0x00000001 },
3754 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3755 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003756
3757 { 0x1000, 0, 0x00000000, 0x00000001 },
3758 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003759
3760 { 0x1408, 0, 0x01c00800, 0x00000000 },
3761 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3762 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003763 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003764 { 0x14b0, 0, 0x00000002, 0x00000001 },
3765 { 0x14b8, 0, 0x00000000, 0x00000000 },
3766 { 0x14c0, 0, 0x00000000, 0x00000009 },
3767 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3768 { 0x14cc, 0, 0x00000000, 0x00000001 },
3769 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003770
3771 { 0x1800, 0, 0x00000000, 0x00000001 },
3772 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003773
3774 { 0x2800, 0, 0x00000000, 0x00000001 },
3775 { 0x2804, 0, 0x00000000, 0x00003f01 },
3776 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3777 { 0x2810, 0, 0xffff0000, 0x00000000 },
3778 { 0x2814, 0, 0xffff0000, 0x00000000 },
3779 { 0x2818, 0, 0xffff0000, 0x00000000 },
3780 { 0x281c, 0, 0xffff0000, 0x00000000 },
3781 { 0x2834, 0, 0xffffffff, 0x00000000 },
3782 { 0x2840, 0, 0x00000000, 0xffffffff },
3783 { 0x2844, 0, 0x00000000, 0xffffffff },
3784 { 0x2848, 0, 0xffffffff, 0x00000000 },
3785 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3786
3787 { 0x2c00, 0, 0x00000000, 0x00000011 },
3788 { 0x2c04, 0, 0x00000000, 0x00030007 },
3789
Michael Chanb6016b72005-05-26 13:03:09 -07003790 { 0x3c00, 0, 0x00000000, 0x00000001 },
3791 { 0x3c04, 0, 0x00000000, 0x00070000 },
3792 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3793 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3794 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3795 { 0x3c14, 0, 0x00000000, 0xffffffff },
3796 { 0x3c18, 0, 0x00000000, 0xffffffff },
3797 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3798 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003799
3800 { 0x5004, 0, 0x00000000, 0x0000007f },
3801 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3802 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3803
Michael Chanb6016b72005-05-26 13:03:09 -07003804 { 0x5c00, 0, 0x00000000, 0x00000001 },
3805 { 0x5c04, 0, 0x00000000, 0x0003000f },
3806 { 0x5c08, 0, 0x00000003, 0x00000000 },
3807 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3808 { 0x5c10, 0, 0x00000000, 0xffffffff },
3809 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3810 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3811 { 0x5c88, 0, 0x00000000, 0x00077373 },
3812 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3813
3814 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3815 { 0x680c, 0, 0xffffffff, 0x00000000 },
3816 { 0x6810, 0, 0xffffffff, 0x00000000 },
3817 { 0x6814, 0, 0xffffffff, 0x00000000 },
3818 { 0x6818, 0, 0xffffffff, 0x00000000 },
3819 { 0x681c, 0, 0xffffffff, 0x00000000 },
3820 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3821 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3822 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3823 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3824 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3825 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3826 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3827 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3828 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3829 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3830 { 0x684c, 0, 0xffffffff, 0x00000000 },
3831 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3832 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3833 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3834 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3835 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3836 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3837
3838 { 0xffff, 0, 0x00000000, 0x00000000 },
3839 };
3840
3841 ret = 0;
3842 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3843 u32 offset, rw_mask, ro_mask, save_val, val;
3844
3845 offset = (u32) reg_tbl[i].offset;
3846 rw_mask = reg_tbl[i].rw_mask;
3847 ro_mask = reg_tbl[i].ro_mask;
3848
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003849 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003850
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003851 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003852
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003853 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003854 if ((val & rw_mask) != 0) {
3855 goto reg_test_err;
3856 }
3857
3858 if ((val & ro_mask) != (save_val & ro_mask)) {
3859 goto reg_test_err;
3860 }
3861
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003862 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003863
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003864 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003865 if ((val & rw_mask) != rw_mask) {
3866 goto reg_test_err;
3867 }
3868
3869 if ((val & ro_mask) != (save_val & ro_mask)) {
3870 goto reg_test_err;
3871 }
3872
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003873 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003874 continue;
3875
3876reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003877 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003878 ret = -ENODEV;
3879 break;
3880 }
3881 return ret;
3882}
3883
3884static int
3885bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3886{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003887 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003888 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3889 int i;
3890
3891 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3892 u32 offset;
3893
3894 for (offset = 0; offset < size; offset += 4) {
3895
3896 REG_WR_IND(bp, start + offset, test_pattern[i]);
3897
3898 if (REG_RD_IND(bp, start + offset) !=
3899 test_pattern[i]) {
3900 return -ENODEV;
3901 }
3902 }
3903 }
3904 return 0;
3905}
3906
3907static int
3908bnx2_test_memory(struct bnx2 *bp)
3909{
3910 int ret = 0;
3911 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003912 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003913 u32 offset;
3914 u32 len;
3915 } mem_tbl[] = {
3916 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003917 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003918 { 0xe0000, 0x4000 },
3919 { 0x120000, 0x4000 },
3920 { 0x1a0000, 0x4000 },
3921 { 0x160000, 0x4000 },
3922 { 0xffffffff, 0 },
3923 };
3924
3925 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3926 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3927 mem_tbl[i].len)) != 0) {
3928 return ret;
3929 }
3930 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003931
Michael Chanb6016b72005-05-26 13:03:09 -07003932 return ret;
3933}
3934
Michael Chanbc5a0692006-01-23 16:13:22 -08003935#define BNX2_MAC_LOOPBACK 0
3936#define BNX2_PHY_LOOPBACK 1
3937
Michael Chanb6016b72005-05-26 13:03:09 -07003938static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003939bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003940{
3941 unsigned int pkt_size, num_pkts, i;
3942 struct sk_buff *skb, *rx_skb;
3943 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003944 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003945 dma_addr_t map;
3946 struct tx_bd *txbd;
3947 struct sw_bd *rx_buf;
3948 struct l2_fhdr *rx_hdr;
3949 int ret = -ENODEV;
3950
Michael Chanbc5a0692006-01-23 16:13:22 -08003951 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3952 bp->loopback = MAC_LOOPBACK;
3953 bnx2_set_mac_loopback(bp);
3954 }
3955 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3956 bp->loopback = 0;
3957 bnx2_set_phy_loopback(bp);
3958 }
3959 else
3960 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003961
3962 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07003963 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003964 if (!skb)
3965 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003966 packet = skb_put(skb, pkt_size);
3967 memcpy(packet, bp->mac_addr, 6);
3968 memset(packet + 6, 0x0, 8);
3969 for (i = 14; i < pkt_size; i++)
3970 packet[i] = (unsigned char) (i & 0xff);
3971
3972 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3973 PCI_DMA_TODEVICE);
3974
Michael Chanbf5295b2006-03-23 01:11:56 -08003975 REG_WR(bp, BNX2_HC_COMMAND,
3976 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3977
Michael Chanb6016b72005-05-26 13:03:09 -07003978 REG_RD(bp, BNX2_HC_COMMAND);
3979
3980 udelay(5);
3981 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3982
Michael Chanb6016b72005-05-26 13:03:09 -07003983 num_pkts = 0;
3984
Michael Chanbc5a0692006-01-23 16:13:22 -08003985 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07003986
3987 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3988 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3989 txbd->tx_bd_mss_nbytes = pkt_size;
3990 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3991
3992 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08003993 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3994 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07003995
Michael Chanbc5a0692006-01-23 16:13:22 -08003996 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3997 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07003998
3999 udelay(100);
4000
Michael Chanbf5295b2006-03-23 01:11:56 -08004001 REG_WR(bp, BNX2_HC_COMMAND,
4002 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4003
Michael Chanb6016b72005-05-26 13:03:09 -07004004 REG_RD(bp, BNX2_HC_COMMAND);
4005
4006 udelay(5);
4007
4008 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004009 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004010
Michael Chanbc5a0692006-01-23 16:13:22 -08004011 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004012 goto loopback_test_done;
4013 }
4014
4015 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4016 if (rx_idx != rx_start_idx + num_pkts) {
4017 goto loopback_test_done;
4018 }
4019
4020 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4021 rx_skb = rx_buf->skb;
4022
4023 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4024 skb_reserve(rx_skb, bp->rx_offset);
4025
4026 pci_dma_sync_single_for_cpu(bp->pdev,
4027 pci_unmap_addr(rx_buf, mapping),
4028 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4029
Michael Chanade2bfe2006-01-23 16:09:51 -08004030 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004031 (L2_FHDR_ERRORS_BAD_CRC |
4032 L2_FHDR_ERRORS_PHY_DECODE |
4033 L2_FHDR_ERRORS_ALIGNMENT |
4034 L2_FHDR_ERRORS_TOO_SHORT |
4035 L2_FHDR_ERRORS_GIANT_FRAME)) {
4036
4037 goto loopback_test_done;
4038 }
4039
4040 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4041 goto loopback_test_done;
4042 }
4043
4044 for (i = 14; i < pkt_size; i++) {
4045 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4046 goto loopback_test_done;
4047 }
4048 }
4049
4050 ret = 0;
4051
4052loopback_test_done:
4053 bp->loopback = 0;
4054 return ret;
4055}
4056
Michael Chanbc5a0692006-01-23 16:13:22 -08004057#define BNX2_MAC_LOOPBACK_FAILED 1
4058#define BNX2_PHY_LOOPBACK_FAILED 2
4059#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4060 BNX2_PHY_LOOPBACK_FAILED)
4061
4062static int
4063bnx2_test_loopback(struct bnx2 *bp)
4064{
4065 int rc = 0;
4066
4067 if (!netif_running(bp->dev))
4068 return BNX2_LOOPBACK_FAILED;
4069
4070 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4071 spin_lock_bh(&bp->phy_lock);
4072 bnx2_init_phy(bp);
4073 spin_unlock_bh(&bp->phy_lock);
4074 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4075 rc |= BNX2_MAC_LOOPBACK_FAILED;
4076 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4077 rc |= BNX2_PHY_LOOPBACK_FAILED;
4078 return rc;
4079}
4080
Michael Chanb6016b72005-05-26 13:03:09 -07004081#define NVRAM_SIZE 0x200
4082#define CRC32_RESIDUAL 0xdebb20e3
4083
4084static int
4085bnx2_test_nvram(struct bnx2 *bp)
4086{
4087 u32 buf[NVRAM_SIZE / 4];
4088 u8 *data = (u8 *) buf;
4089 int rc = 0;
4090 u32 magic, csum;
4091
4092 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4093 goto test_nvram_done;
4094
4095 magic = be32_to_cpu(buf[0]);
4096 if (magic != 0x669955aa) {
4097 rc = -ENODEV;
4098 goto test_nvram_done;
4099 }
4100
4101 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4102 goto test_nvram_done;
4103
4104 csum = ether_crc_le(0x100, data);
4105 if (csum != CRC32_RESIDUAL) {
4106 rc = -ENODEV;
4107 goto test_nvram_done;
4108 }
4109
4110 csum = ether_crc_le(0x100, data + 0x100);
4111 if (csum != CRC32_RESIDUAL) {
4112 rc = -ENODEV;
4113 }
4114
4115test_nvram_done:
4116 return rc;
4117}
4118
4119static int
4120bnx2_test_link(struct bnx2 *bp)
4121{
4122 u32 bmsr;
4123
Michael Chanc770a652005-08-25 15:38:39 -07004124 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004125 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4126 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004127 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004128
Michael Chanb6016b72005-05-26 13:03:09 -07004129 if (bmsr & BMSR_LSTATUS) {
4130 return 0;
4131 }
4132 return -ENODEV;
4133}
4134
4135static int
4136bnx2_test_intr(struct bnx2 *bp)
4137{
4138 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004139 u16 status_idx;
4140
4141 if (!netif_running(bp->dev))
4142 return -ENODEV;
4143
4144 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4145
4146 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004147 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004148 REG_RD(bp, BNX2_HC_COMMAND);
4149
4150 for (i = 0; i < 10; i++) {
4151 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4152 status_idx) {
4153
4154 break;
4155 }
4156
4157 msleep_interruptible(10);
4158 }
4159 if (i < 10)
4160 return 0;
4161
4162 return -ENODEV;
4163}
4164
4165static void
4166bnx2_timer(unsigned long data)
4167{
4168 struct bnx2 *bp = (struct bnx2 *) data;
4169 u32 msg;
4170
Michael Chancd339a02005-08-25 15:35:24 -07004171 if (!netif_running(bp->dev))
4172 return;
4173
Michael Chanb6016b72005-05-26 13:03:09 -07004174 if (atomic_read(&bp->intr_sem) != 0)
4175 goto bnx2_restart_timer;
4176
4177 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004178 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004179
Michael Chancea94db2006-06-12 22:16:13 -07004180 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4181
Michael Chanb6016b72005-05-26 13:03:09 -07004182 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4183 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
Michael Chanb6016b72005-05-26 13:03:09 -07004184
Michael Chanc770a652005-08-25 15:38:39 -07004185 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004186 if (bp->serdes_an_pending) {
4187 bp->serdes_an_pending--;
4188 }
4189 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4190 u32 bmcr;
4191
Michael Chancd339a02005-08-25 15:35:24 -07004192 bp->current_interval = bp->timer_interval;
4193
Michael Chanb6016b72005-05-26 13:03:09 -07004194 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4195
4196 if (bmcr & BMCR_ANENABLE) {
4197 u32 phy1, phy2;
4198
4199 bnx2_write_phy(bp, 0x1c, 0x7c00);
4200 bnx2_read_phy(bp, 0x1c, &phy1);
4201
4202 bnx2_write_phy(bp, 0x17, 0x0f01);
4203 bnx2_read_phy(bp, 0x15, &phy2);
4204 bnx2_write_phy(bp, 0x17, 0x0f01);
4205 bnx2_read_phy(bp, 0x15, &phy2);
4206
4207 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4208 !(phy2 & 0x20)) { /* no CONFIG */
4209
4210 bmcr &= ~BMCR_ANENABLE;
4211 bmcr |= BMCR_SPEED1000 |
4212 BMCR_FULLDPLX;
4213 bnx2_write_phy(bp, MII_BMCR, bmcr);
4214 bp->phy_flags |=
4215 PHY_PARALLEL_DETECT_FLAG;
4216 }
4217 }
4218 }
4219 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4220 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4221 u32 phy2;
4222
4223 bnx2_write_phy(bp, 0x17, 0x0f01);
4224 bnx2_read_phy(bp, 0x15, &phy2);
4225 if (phy2 & 0x20) {
4226 u32 bmcr;
4227
4228 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4229 bmcr |= BMCR_ANENABLE;
4230 bnx2_write_phy(bp, MII_BMCR, bmcr);
4231
4232 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4233
4234 }
4235 }
Michael Chancd339a02005-08-25 15:35:24 -07004236 else
4237 bp->current_interval = bp->timer_interval;
Michael Chanb6016b72005-05-26 13:03:09 -07004238
Michael Chanc770a652005-08-25 15:38:39 -07004239 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004240 }
4241
4242bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004243 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004244}
4245
4246/* Called with rtnl_lock */
4247static int
4248bnx2_open(struct net_device *dev)
4249{
Michael Chan972ec0d2006-01-23 16:12:43 -08004250 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004251 int rc;
4252
Pavel Machek829ca9a2005-09-03 15:56:56 -07004253 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004254 bnx2_disable_int(bp);
4255
4256 rc = bnx2_alloc_mem(bp);
4257 if (rc)
4258 return rc;
4259
4260 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4261 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4262 !disable_msi) {
4263
4264 if (pci_enable_msi(bp->pdev) == 0) {
4265 bp->flags |= USING_MSI_FLAG;
4266 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4267 dev);
4268 }
4269 else {
4270 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004271 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004272 }
4273 }
4274 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004275 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004276 dev->name, dev);
4277 }
4278 if (rc) {
4279 bnx2_free_mem(bp);
4280 return rc;
4281 }
4282
4283 rc = bnx2_init_nic(bp);
4284
4285 if (rc) {
4286 free_irq(bp->pdev->irq, dev);
4287 if (bp->flags & USING_MSI_FLAG) {
4288 pci_disable_msi(bp->pdev);
4289 bp->flags &= ~USING_MSI_FLAG;
4290 }
4291 bnx2_free_skbs(bp);
4292 bnx2_free_mem(bp);
4293 return rc;
4294 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004295
Michael Chancd339a02005-08-25 15:35:24 -07004296 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004297
4298 atomic_set(&bp->intr_sem, 0);
4299
4300 bnx2_enable_int(bp);
4301
4302 if (bp->flags & USING_MSI_FLAG) {
4303 /* Test MSI to make sure it is working
4304 * If MSI test fails, go back to INTx mode
4305 */
4306 if (bnx2_test_intr(bp) != 0) {
4307 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4308 " using MSI, switching to INTx mode. Please"
4309 " report this failure to the PCI maintainer"
4310 " and include system chipset information.\n",
4311 bp->dev->name);
4312
4313 bnx2_disable_int(bp);
4314 free_irq(bp->pdev->irq, dev);
4315 pci_disable_msi(bp->pdev);
4316 bp->flags &= ~USING_MSI_FLAG;
4317
4318 rc = bnx2_init_nic(bp);
4319
4320 if (!rc) {
4321 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004322 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004323 }
4324 if (rc) {
4325 bnx2_free_skbs(bp);
4326 bnx2_free_mem(bp);
4327 del_timer_sync(&bp->timer);
4328 return rc;
4329 }
4330 bnx2_enable_int(bp);
4331 }
4332 }
4333 if (bp->flags & USING_MSI_FLAG) {
4334 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4335 }
4336
4337 netif_start_queue(dev);
4338
4339 return 0;
4340}
4341
4342static void
4343bnx2_reset_task(void *data)
4344{
4345 struct bnx2 *bp = data;
4346
Michael Chanafdc08b2005-08-25 15:34:29 -07004347 if (!netif_running(bp->dev))
4348 return;
4349
4350 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004351 bnx2_netif_stop(bp);
4352
4353 bnx2_init_nic(bp);
4354
4355 atomic_set(&bp->intr_sem, 1);
4356 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004357 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004358}
4359
4360static void
4361bnx2_tx_timeout(struct net_device *dev)
4362{
Michael Chan972ec0d2006-01-23 16:12:43 -08004363 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004364
4365 /* This allows the netif to be shutdown gracefully before resetting */
4366 schedule_work(&bp->reset_task);
4367}
4368
4369#ifdef BCM_VLAN
4370/* Called with rtnl_lock */
4371static void
4372bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4373{
Michael Chan972ec0d2006-01-23 16:12:43 -08004374 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004375
4376 bnx2_netif_stop(bp);
4377
4378 bp->vlgrp = vlgrp;
4379 bnx2_set_rx_mode(dev);
4380
4381 bnx2_netif_start(bp);
4382}
4383
4384/* Called with rtnl_lock */
4385static void
4386bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4387{
Michael Chan972ec0d2006-01-23 16:12:43 -08004388 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004389
4390 bnx2_netif_stop(bp);
4391
4392 if (bp->vlgrp)
4393 bp->vlgrp->vlan_devices[vid] = NULL;
4394 bnx2_set_rx_mode(dev);
4395
4396 bnx2_netif_start(bp);
4397}
4398#endif
4399
Herbert Xu932ff272006-06-09 12:20:56 -07004400/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004401 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4402 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004403 */
4404static int
4405bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4406{
Michael Chan972ec0d2006-01-23 16:12:43 -08004407 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004408 dma_addr_t mapping;
4409 struct tx_bd *txbd;
4410 struct sw_bd *tx_buf;
4411 u32 len, vlan_tag_flags, last_frag, mss;
4412 u16 prod, ring_prod;
4413 int i;
4414
Michael Chane89bbf12005-08-25 15:36:58 -07004415 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004416 netif_stop_queue(dev);
4417 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4418 dev->name);
4419
4420 return NETDEV_TX_BUSY;
4421 }
4422 len = skb_headlen(skb);
4423 prod = bp->tx_prod;
4424 ring_prod = TX_RING_IDX(prod);
4425
4426 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004427 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004428 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4429 }
4430
4431 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4432 vlan_tag_flags |=
4433 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4434 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004435#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004436 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004437 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4438 u32 tcp_opt_len, ip_tcp_len;
4439
4440 if (skb_header_cloned(skb) &&
4441 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4442 dev_kfree_skb(skb);
4443 return NETDEV_TX_OK;
4444 }
4445
4446 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4447 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4448
4449 tcp_opt_len = 0;
4450 if (skb->h.th->doff > 5) {
4451 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4452 }
4453 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4454
4455 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004456 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004457 skb->h.th->check =
4458 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4459 skb->nh.iph->daddr,
4460 0, IPPROTO_TCP, 0);
4461
4462 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4463 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4464 (tcp_opt_len >> 2)) << 8;
4465 }
4466 }
4467 else
4468#endif
4469 {
4470 mss = 0;
4471 }
4472
4473 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004474
Michael Chanb6016b72005-05-26 13:03:09 -07004475 tx_buf = &bp->tx_buf_ring[ring_prod];
4476 tx_buf->skb = skb;
4477 pci_unmap_addr_set(tx_buf, mapping, mapping);
4478
4479 txbd = &bp->tx_desc_ring[ring_prod];
4480
4481 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4482 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4483 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4484 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4485
4486 last_frag = skb_shinfo(skb)->nr_frags;
4487
4488 for (i = 0; i < last_frag; i++) {
4489 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4490
4491 prod = NEXT_TX_BD(prod);
4492 ring_prod = TX_RING_IDX(prod);
4493 txbd = &bp->tx_desc_ring[ring_prod];
4494
4495 len = frag->size;
4496 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4497 len, PCI_DMA_TODEVICE);
4498 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4499 mapping, mapping);
4500
4501 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4502 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4503 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4504 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4505
4506 }
4507 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4508
4509 prod = NEXT_TX_BD(prod);
4510 bp->tx_prod_bseq += skb->len;
4511
Michael Chanb6016b72005-05-26 13:03:09 -07004512 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4513 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4514
4515 mmiowb();
4516
4517 bp->tx_prod = prod;
4518 dev->trans_start = jiffies;
4519
Michael Chane89bbf12005-08-25 15:36:58 -07004520 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004521 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004522 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004523 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004524 }
4525
4526 return NETDEV_TX_OK;
4527}
4528
4529/* Called with rtnl_lock */
4530static int
4531bnx2_close(struct net_device *dev)
4532{
Michael Chan972ec0d2006-01-23 16:12:43 -08004533 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004534 u32 reset_code;
4535
Michael Chanafdc08b2005-08-25 15:34:29 -07004536 /* Calling flush_scheduled_work() may deadlock because
4537 * linkwatch_event() may be on the workqueue and it will try to get
4538 * the rtnl_lock which we are holding.
4539 */
4540 while (bp->in_reset_task)
4541 msleep(1);
4542
Michael Chanb6016b72005-05-26 13:03:09 -07004543 bnx2_netif_stop(bp);
4544 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004545 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004546 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004547 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004548 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4549 else
4550 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4551 bnx2_reset_chip(bp, reset_code);
4552 free_irq(bp->pdev->irq, dev);
4553 if (bp->flags & USING_MSI_FLAG) {
4554 pci_disable_msi(bp->pdev);
4555 bp->flags &= ~USING_MSI_FLAG;
4556 }
4557 bnx2_free_skbs(bp);
4558 bnx2_free_mem(bp);
4559 bp->link_up = 0;
4560 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004561 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004562 return 0;
4563}
4564
4565#define GET_NET_STATS64(ctr) \
4566 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4567 (unsigned long) (ctr##_lo)
4568
4569#define GET_NET_STATS32(ctr) \
4570 (ctr##_lo)
4571
4572#if (BITS_PER_LONG == 64)
4573#define GET_NET_STATS GET_NET_STATS64
4574#else
4575#define GET_NET_STATS GET_NET_STATS32
4576#endif
4577
4578static struct net_device_stats *
4579bnx2_get_stats(struct net_device *dev)
4580{
Michael Chan972ec0d2006-01-23 16:12:43 -08004581 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004582 struct statistics_block *stats_blk = bp->stats_blk;
4583 struct net_device_stats *net_stats = &bp->net_stats;
4584
4585 if (bp->stats_blk == NULL) {
4586 return net_stats;
4587 }
4588 net_stats->rx_packets =
4589 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4590 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4591 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4592
4593 net_stats->tx_packets =
4594 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4595 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4596 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4597
4598 net_stats->rx_bytes =
4599 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4600
4601 net_stats->tx_bytes =
4602 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4603
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004604 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004605 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4606
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004607 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004608 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4609
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004610 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004611 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4612 stats_blk->stat_EtherStatsOverrsizePkts);
4613
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004614 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004615 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4616
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004617 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004618 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4619
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004620 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004621 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4622
4623 net_stats->rx_errors = net_stats->rx_length_errors +
4624 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4625 net_stats->rx_crc_errors;
4626
4627 net_stats->tx_aborted_errors =
4628 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4629 stats_blk->stat_Dot3StatsLateCollisions);
4630
Michael Chan5b0c76a2005-11-04 08:45:49 -08004631 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4632 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004633 net_stats->tx_carrier_errors = 0;
4634 else {
4635 net_stats->tx_carrier_errors =
4636 (unsigned long)
4637 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4638 }
4639
4640 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004641 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004642 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4643 +
4644 net_stats->tx_aborted_errors +
4645 net_stats->tx_carrier_errors;
4646
Michael Chancea94db2006-06-12 22:16:13 -07004647 net_stats->rx_missed_errors =
4648 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4649 stats_blk->stat_FwRxDrop);
4650
Michael Chanb6016b72005-05-26 13:03:09 -07004651 return net_stats;
4652}
4653
4654/* All ethtool functions called with rtnl_lock */
4655
4656static int
4657bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4658{
Michael Chan972ec0d2006-01-23 16:12:43 -08004659 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004660
4661 cmd->supported = SUPPORTED_Autoneg;
4662 if (bp->phy_flags & PHY_SERDES_FLAG) {
4663 cmd->supported |= SUPPORTED_1000baseT_Full |
4664 SUPPORTED_FIBRE;
4665
4666 cmd->port = PORT_FIBRE;
4667 }
4668 else {
4669 cmd->supported |= SUPPORTED_10baseT_Half |
4670 SUPPORTED_10baseT_Full |
4671 SUPPORTED_100baseT_Half |
4672 SUPPORTED_100baseT_Full |
4673 SUPPORTED_1000baseT_Full |
4674 SUPPORTED_TP;
4675
4676 cmd->port = PORT_TP;
4677 }
4678
4679 cmd->advertising = bp->advertising;
4680
4681 if (bp->autoneg & AUTONEG_SPEED) {
4682 cmd->autoneg = AUTONEG_ENABLE;
4683 }
4684 else {
4685 cmd->autoneg = AUTONEG_DISABLE;
4686 }
4687
4688 if (netif_carrier_ok(dev)) {
4689 cmd->speed = bp->line_speed;
4690 cmd->duplex = bp->duplex;
4691 }
4692 else {
4693 cmd->speed = -1;
4694 cmd->duplex = -1;
4695 }
4696
4697 cmd->transceiver = XCVR_INTERNAL;
4698 cmd->phy_address = bp->phy_addr;
4699
4700 return 0;
4701}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004702
Michael Chanb6016b72005-05-26 13:03:09 -07004703static int
4704bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4705{
Michael Chan972ec0d2006-01-23 16:12:43 -08004706 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004707 u8 autoneg = bp->autoneg;
4708 u8 req_duplex = bp->req_duplex;
4709 u16 req_line_speed = bp->req_line_speed;
4710 u32 advertising = bp->advertising;
4711
4712 if (cmd->autoneg == AUTONEG_ENABLE) {
4713 autoneg |= AUTONEG_SPEED;
4714
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004715 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004716
4717 /* allow advertising 1 speed */
4718 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4719 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4720 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4721 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4722
4723 if (bp->phy_flags & PHY_SERDES_FLAG)
4724 return -EINVAL;
4725
4726 advertising = cmd->advertising;
4727
4728 }
4729 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4730 advertising = cmd->advertising;
4731 }
4732 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4733 return -EINVAL;
4734 }
4735 else {
4736 if (bp->phy_flags & PHY_SERDES_FLAG) {
4737 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4738 }
4739 else {
4740 advertising = ETHTOOL_ALL_COPPER_SPEED;
4741 }
4742 }
4743 advertising |= ADVERTISED_Autoneg;
4744 }
4745 else {
4746 if (bp->phy_flags & PHY_SERDES_FLAG) {
4747 if ((cmd->speed != SPEED_1000) ||
4748 (cmd->duplex != DUPLEX_FULL)) {
4749 return -EINVAL;
4750 }
4751 }
4752 else if (cmd->speed == SPEED_1000) {
4753 return -EINVAL;
4754 }
4755 autoneg &= ~AUTONEG_SPEED;
4756 req_line_speed = cmd->speed;
4757 req_duplex = cmd->duplex;
4758 advertising = 0;
4759 }
4760
4761 bp->autoneg = autoneg;
4762 bp->advertising = advertising;
4763 bp->req_line_speed = req_line_speed;
4764 bp->req_duplex = req_duplex;
4765
Michael Chanc770a652005-08-25 15:38:39 -07004766 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004767
4768 bnx2_setup_phy(bp);
4769
Michael Chanc770a652005-08-25 15:38:39 -07004770 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004771
4772 return 0;
4773}
4774
4775static void
4776bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4777{
Michael Chan972ec0d2006-01-23 16:12:43 -08004778 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004779
4780 strcpy(info->driver, DRV_MODULE_NAME);
4781 strcpy(info->version, DRV_MODULE_VERSION);
4782 strcpy(info->bus_info, pci_name(bp->pdev));
4783 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4784 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4785 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004786 info->fw_version[1] = info->fw_version[3] = '.';
4787 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004788}
4789
Michael Chan244ac4f2006-03-20 17:48:46 -08004790#define BNX2_REGDUMP_LEN (32 * 1024)
4791
4792static int
4793bnx2_get_regs_len(struct net_device *dev)
4794{
4795 return BNX2_REGDUMP_LEN;
4796}
4797
4798static void
4799bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4800{
4801 u32 *p = _p, i, offset;
4802 u8 *orig_p = _p;
4803 struct bnx2 *bp = netdev_priv(dev);
4804 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4805 0x0800, 0x0880, 0x0c00, 0x0c10,
4806 0x0c30, 0x0d08, 0x1000, 0x101c,
4807 0x1040, 0x1048, 0x1080, 0x10a4,
4808 0x1400, 0x1490, 0x1498, 0x14f0,
4809 0x1500, 0x155c, 0x1580, 0x15dc,
4810 0x1600, 0x1658, 0x1680, 0x16d8,
4811 0x1800, 0x1820, 0x1840, 0x1854,
4812 0x1880, 0x1894, 0x1900, 0x1984,
4813 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4814 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4815 0x2000, 0x2030, 0x23c0, 0x2400,
4816 0x2800, 0x2820, 0x2830, 0x2850,
4817 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4818 0x3c00, 0x3c94, 0x4000, 0x4010,
4819 0x4080, 0x4090, 0x43c0, 0x4458,
4820 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4821 0x4fc0, 0x5010, 0x53c0, 0x5444,
4822 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4823 0x5fc0, 0x6000, 0x6400, 0x6428,
4824 0x6800, 0x6848, 0x684c, 0x6860,
4825 0x6888, 0x6910, 0x8000 };
4826
4827 regs->version = 0;
4828
4829 memset(p, 0, BNX2_REGDUMP_LEN);
4830
4831 if (!netif_running(bp->dev))
4832 return;
4833
4834 i = 0;
4835 offset = reg_boundaries[0];
4836 p += offset;
4837 while (offset < BNX2_REGDUMP_LEN) {
4838 *p++ = REG_RD(bp, offset);
4839 offset += 4;
4840 if (offset == reg_boundaries[i + 1]) {
4841 offset = reg_boundaries[i + 2];
4842 p = (u32 *) (orig_p + offset);
4843 i += 2;
4844 }
4845 }
4846}
4847
Michael Chanb6016b72005-05-26 13:03:09 -07004848static void
4849bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4850{
Michael Chan972ec0d2006-01-23 16:12:43 -08004851 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004852
4853 if (bp->flags & NO_WOL_FLAG) {
4854 wol->supported = 0;
4855 wol->wolopts = 0;
4856 }
4857 else {
4858 wol->supported = WAKE_MAGIC;
4859 if (bp->wol)
4860 wol->wolopts = WAKE_MAGIC;
4861 else
4862 wol->wolopts = 0;
4863 }
4864 memset(&wol->sopass, 0, sizeof(wol->sopass));
4865}
4866
4867static int
4868bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4869{
Michael Chan972ec0d2006-01-23 16:12:43 -08004870 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004871
4872 if (wol->wolopts & ~WAKE_MAGIC)
4873 return -EINVAL;
4874
4875 if (wol->wolopts & WAKE_MAGIC) {
4876 if (bp->flags & NO_WOL_FLAG)
4877 return -EINVAL;
4878
4879 bp->wol = 1;
4880 }
4881 else {
4882 bp->wol = 0;
4883 }
4884 return 0;
4885}
4886
4887static int
4888bnx2_nway_reset(struct net_device *dev)
4889{
Michael Chan972ec0d2006-01-23 16:12:43 -08004890 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004891 u32 bmcr;
4892
4893 if (!(bp->autoneg & AUTONEG_SPEED)) {
4894 return -EINVAL;
4895 }
4896
Michael Chanc770a652005-08-25 15:38:39 -07004897 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004898
4899 /* Force a link down visible on the other side */
4900 if (bp->phy_flags & PHY_SERDES_FLAG) {
4901 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004902 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004903
4904 msleep(20);
4905
Michael Chanc770a652005-08-25 15:38:39 -07004906 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004907 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
Michael Chancd339a02005-08-25 15:35:24 -07004908 bp->current_interval = SERDES_AN_TIMEOUT;
4909 bp->serdes_an_pending = 1;
4910 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004911 }
4912 }
4913
4914 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4915 bmcr &= ~BMCR_LOOPBACK;
4916 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4917
Michael Chanc770a652005-08-25 15:38:39 -07004918 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004919
4920 return 0;
4921}
4922
4923static int
4924bnx2_get_eeprom_len(struct net_device *dev)
4925{
Michael Chan972ec0d2006-01-23 16:12:43 -08004926 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004927
Michael Chan1122db72006-01-23 16:11:42 -08004928 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004929 return 0;
4930
Michael Chan1122db72006-01-23 16:11:42 -08004931 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004932}
4933
4934static int
4935bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4936 u8 *eebuf)
4937{
Michael Chan972ec0d2006-01-23 16:12:43 -08004938 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004939 int rc;
4940
John W. Linville1064e942005-11-10 12:58:24 -08004941 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004942
4943 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4944
4945 return rc;
4946}
4947
4948static int
4949bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4950 u8 *eebuf)
4951{
Michael Chan972ec0d2006-01-23 16:12:43 -08004952 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004953 int rc;
4954
John W. Linville1064e942005-11-10 12:58:24 -08004955 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004956
4957 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4958
4959 return rc;
4960}
4961
4962static int
4963bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4964{
Michael Chan972ec0d2006-01-23 16:12:43 -08004965 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004966
4967 memset(coal, 0, sizeof(struct ethtool_coalesce));
4968
4969 coal->rx_coalesce_usecs = bp->rx_ticks;
4970 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4971 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4972 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4973
4974 coal->tx_coalesce_usecs = bp->tx_ticks;
4975 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4976 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4977 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4978
4979 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4980
4981 return 0;
4982}
4983
4984static int
4985bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4986{
Michael Chan972ec0d2006-01-23 16:12:43 -08004987 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004988
4989 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4990 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4991
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004992 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07004993 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4994
4995 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4996 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4997
4998 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4999 if (bp->rx_quick_cons_trip_int > 0xff)
5000 bp->rx_quick_cons_trip_int = 0xff;
5001
5002 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5003 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5004
5005 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5006 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5007
5008 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5009 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5010
5011 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5012 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5013 0xff;
5014
5015 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5016 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5017 bp->stats_ticks &= 0xffff00;
5018
5019 if (netif_running(bp->dev)) {
5020 bnx2_netif_stop(bp);
5021 bnx2_init_nic(bp);
5022 bnx2_netif_start(bp);
5023 }
5024
5025 return 0;
5026}
5027
5028static void
5029bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5030{
Michael Chan972ec0d2006-01-23 16:12:43 -08005031 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005032
Michael Chan13daffa2006-03-20 17:49:20 -08005033 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005034 ering->rx_mini_max_pending = 0;
5035 ering->rx_jumbo_max_pending = 0;
5036
5037 ering->rx_pending = bp->rx_ring_size;
5038 ering->rx_mini_pending = 0;
5039 ering->rx_jumbo_pending = 0;
5040
5041 ering->tx_max_pending = MAX_TX_DESC_CNT;
5042 ering->tx_pending = bp->tx_ring_size;
5043}
5044
5045static int
5046bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5047{
Michael Chan972ec0d2006-01-23 16:12:43 -08005048 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005049
Michael Chan13daffa2006-03-20 17:49:20 -08005050 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005051 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5052 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5053
5054 return -EINVAL;
5055 }
Michael Chan13daffa2006-03-20 17:49:20 -08005056 if (netif_running(bp->dev)) {
5057 bnx2_netif_stop(bp);
5058 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5059 bnx2_free_skbs(bp);
5060 bnx2_free_mem(bp);
5061 }
5062
5063 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005064 bp->tx_ring_size = ering->tx_pending;
5065
5066 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005067 int rc;
5068
5069 rc = bnx2_alloc_mem(bp);
5070 if (rc)
5071 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005072 bnx2_init_nic(bp);
5073 bnx2_netif_start(bp);
5074 }
5075
5076 return 0;
5077}
5078
5079static void
5080bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5081{
Michael Chan972ec0d2006-01-23 16:12:43 -08005082 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005083
5084 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5085 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5086 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5087}
5088
5089static int
5090bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5091{
Michael Chan972ec0d2006-01-23 16:12:43 -08005092 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005093
5094 bp->req_flow_ctrl = 0;
5095 if (epause->rx_pause)
5096 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5097 if (epause->tx_pause)
5098 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5099
5100 if (epause->autoneg) {
5101 bp->autoneg |= AUTONEG_FLOW_CTRL;
5102 }
5103 else {
5104 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5105 }
5106
Michael Chanc770a652005-08-25 15:38:39 -07005107 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005108
5109 bnx2_setup_phy(bp);
5110
Michael Chanc770a652005-08-25 15:38:39 -07005111 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005112
5113 return 0;
5114}
5115
5116static u32
5117bnx2_get_rx_csum(struct net_device *dev)
5118{
Michael Chan972ec0d2006-01-23 16:12:43 -08005119 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005120
5121 return bp->rx_csum;
5122}
5123
5124static int
5125bnx2_set_rx_csum(struct net_device *dev, u32 data)
5126{
Michael Chan972ec0d2006-01-23 16:12:43 -08005127 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005128
5129 bp->rx_csum = data;
5130 return 0;
5131}
5132
Michael Chanb11d6212006-06-29 12:31:21 -07005133static int
5134bnx2_set_tso(struct net_device *dev, u32 data)
5135{
5136 if (data)
5137 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5138 else
5139 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5140 return 0;
5141}
5142
Michael Chancea94db2006-06-12 22:16:13 -07005143#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005144
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005145static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005146 char string[ETH_GSTRING_LEN];
5147} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5148 { "rx_bytes" },
5149 { "rx_error_bytes" },
5150 { "tx_bytes" },
5151 { "tx_error_bytes" },
5152 { "rx_ucast_packets" },
5153 { "rx_mcast_packets" },
5154 { "rx_bcast_packets" },
5155 { "tx_ucast_packets" },
5156 { "tx_mcast_packets" },
5157 { "tx_bcast_packets" },
5158 { "tx_mac_errors" },
5159 { "tx_carrier_errors" },
5160 { "rx_crc_errors" },
5161 { "rx_align_errors" },
5162 { "tx_single_collisions" },
5163 { "tx_multi_collisions" },
5164 { "tx_deferred" },
5165 { "tx_excess_collisions" },
5166 { "tx_late_collisions" },
5167 { "tx_total_collisions" },
5168 { "rx_fragments" },
5169 { "rx_jabbers" },
5170 { "rx_undersize_packets" },
5171 { "rx_oversize_packets" },
5172 { "rx_64_byte_packets" },
5173 { "rx_65_to_127_byte_packets" },
5174 { "rx_128_to_255_byte_packets" },
5175 { "rx_256_to_511_byte_packets" },
5176 { "rx_512_to_1023_byte_packets" },
5177 { "rx_1024_to_1522_byte_packets" },
5178 { "rx_1523_to_9022_byte_packets" },
5179 { "tx_64_byte_packets" },
5180 { "tx_65_to_127_byte_packets" },
5181 { "tx_128_to_255_byte_packets" },
5182 { "tx_256_to_511_byte_packets" },
5183 { "tx_512_to_1023_byte_packets" },
5184 { "tx_1024_to_1522_byte_packets" },
5185 { "tx_1523_to_9022_byte_packets" },
5186 { "rx_xon_frames" },
5187 { "rx_xoff_frames" },
5188 { "tx_xon_frames" },
5189 { "tx_xoff_frames" },
5190 { "rx_mac_ctrl_frames" },
5191 { "rx_filtered_packets" },
5192 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005193 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005194};
5195
5196#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5197
Arjan van de Venf71e1302006-03-03 21:33:57 -05005198static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005199 STATS_OFFSET32(stat_IfHCInOctets_hi),
5200 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5201 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5202 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5203 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5204 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5205 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5206 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5207 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5208 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5209 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005210 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5211 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5212 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5213 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5214 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5215 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5216 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5217 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5218 STATS_OFFSET32(stat_EtherStatsCollisions),
5219 STATS_OFFSET32(stat_EtherStatsFragments),
5220 STATS_OFFSET32(stat_EtherStatsJabbers),
5221 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5222 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5223 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5224 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5225 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5226 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5227 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5228 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5229 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5230 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5231 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5232 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5233 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5234 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5235 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5236 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5237 STATS_OFFSET32(stat_XonPauseFramesReceived),
5238 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5239 STATS_OFFSET32(stat_OutXonSent),
5240 STATS_OFFSET32(stat_OutXoffSent),
5241 STATS_OFFSET32(stat_MacControlFramesReceived),
5242 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5243 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005244 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005245};
5246
5247/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5248 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005249 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005250static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005251 8,0,8,8,8,8,8,8,8,8,
5252 4,0,4,4,4,4,4,4,4,4,
5253 4,4,4,4,4,4,4,4,4,4,
5254 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005255 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005256};
5257
Michael Chan5b0c76a2005-11-04 08:45:49 -08005258static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5259 8,0,8,8,8,8,8,8,8,8,
5260 4,4,4,4,4,4,4,4,4,4,
5261 4,4,4,4,4,4,4,4,4,4,
5262 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005263 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005264};
5265
Michael Chanb6016b72005-05-26 13:03:09 -07005266#define BNX2_NUM_TESTS 6
5267
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005268static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005269 char string[ETH_GSTRING_LEN];
5270} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5271 { "register_test (offline)" },
5272 { "memory_test (offline)" },
5273 { "loopback_test (offline)" },
5274 { "nvram_test (online)" },
5275 { "interrupt_test (online)" },
5276 { "link_test (online)" },
5277};
5278
5279static int
5280bnx2_self_test_count(struct net_device *dev)
5281{
5282 return BNX2_NUM_TESTS;
5283}
5284
5285static void
5286bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5287{
Michael Chan972ec0d2006-01-23 16:12:43 -08005288 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005289
5290 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5291 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5292 bnx2_netif_stop(bp);
5293 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5294 bnx2_free_skbs(bp);
5295
5296 if (bnx2_test_registers(bp) != 0) {
5297 buf[0] = 1;
5298 etest->flags |= ETH_TEST_FL_FAILED;
5299 }
5300 if (bnx2_test_memory(bp) != 0) {
5301 buf[1] = 1;
5302 etest->flags |= ETH_TEST_FL_FAILED;
5303 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005304 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005305 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005306
5307 if (!netif_running(bp->dev)) {
5308 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5309 }
5310 else {
5311 bnx2_init_nic(bp);
5312 bnx2_netif_start(bp);
5313 }
5314
5315 /* wait for link up */
5316 msleep_interruptible(3000);
5317 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5318 msleep_interruptible(4000);
5319 }
5320
5321 if (bnx2_test_nvram(bp) != 0) {
5322 buf[3] = 1;
5323 etest->flags |= ETH_TEST_FL_FAILED;
5324 }
5325 if (bnx2_test_intr(bp) != 0) {
5326 buf[4] = 1;
5327 etest->flags |= ETH_TEST_FL_FAILED;
5328 }
5329
5330 if (bnx2_test_link(bp) != 0) {
5331 buf[5] = 1;
5332 etest->flags |= ETH_TEST_FL_FAILED;
5333
5334 }
5335}
5336
5337static void
5338bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5339{
5340 switch (stringset) {
5341 case ETH_SS_STATS:
5342 memcpy(buf, bnx2_stats_str_arr,
5343 sizeof(bnx2_stats_str_arr));
5344 break;
5345 case ETH_SS_TEST:
5346 memcpy(buf, bnx2_tests_str_arr,
5347 sizeof(bnx2_tests_str_arr));
5348 break;
5349 }
5350}
5351
5352static int
5353bnx2_get_stats_count(struct net_device *dev)
5354{
5355 return BNX2_NUM_STATS;
5356}
5357
5358static void
5359bnx2_get_ethtool_stats(struct net_device *dev,
5360 struct ethtool_stats *stats, u64 *buf)
5361{
Michael Chan972ec0d2006-01-23 16:12:43 -08005362 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005363 int i;
5364 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005365 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005366
5367 if (hw_stats == NULL) {
5368 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5369 return;
5370 }
5371
Michael Chan5b0c76a2005-11-04 08:45:49 -08005372 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5373 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5374 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5375 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005376 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005377 else
5378 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005379
5380 for (i = 0; i < BNX2_NUM_STATS; i++) {
5381 if (stats_len_arr[i] == 0) {
5382 /* skip this counter */
5383 buf[i] = 0;
5384 continue;
5385 }
5386 if (stats_len_arr[i] == 4) {
5387 /* 4-byte counter */
5388 buf[i] = (u64)
5389 *(hw_stats + bnx2_stats_offset_arr[i]);
5390 continue;
5391 }
5392 /* 8-byte counter */
5393 buf[i] = (((u64) *(hw_stats +
5394 bnx2_stats_offset_arr[i])) << 32) +
5395 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5396 }
5397}
5398
5399static int
5400bnx2_phys_id(struct net_device *dev, u32 data)
5401{
Michael Chan972ec0d2006-01-23 16:12:43 -08005402 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005403 int i;
5404 u32 save;
5405
5406 if (data == 0)
5407 data = 2;
5408
5409 save = REG_RD(bp, BNX2_MISC_CFG);
5410 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5411
5412 for (i = 0; i < (data * 2); i++) {
5413 if ((i % 2) == 0) {
5414 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5415 }
5416 else {
5417 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5418 BNX2_EMAC_LED_1000MB_OVERRIDE |
5419 BNX2_EMAC_LED_100MB_OVERRIDE |
5420 BNX2_EMAC_LED_10MB_OVERRIDE |
5421 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5422 BNX2_EMAC_LED_TRAFFIC);
5423 }
5424 msleep_interruptible(500);
5425 if (signal_pending(current))
5426 break;
5427 }
5428 REG_WR(bp, BNX2_EMAC_LED, 0);
5429 REG_WR(bp, BNX2_MISC_CFG, save);
5430 return 0;
5431}
5432
Jeff Garzik7282d492006-09-13 14:30:00 -04005433static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005434 .get_settings = bnx2_get_settings,
5435 .set_settings = bnx2_set_settings,
5436 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005437 .get_regs_len = bnx2_get_regs_len,
5438 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005439 .get_wol = bnx2_get_wol,
5440 .set_wol = bnx2_set_wol,
5441 .nway_reset = bnx2_nway_reset,
5442 .get_link = ethtool_op_get_link,
5443 .get_eeprom_len = bnx2_get_eeprom_len,
5444 .get_eeprom = bnx2_get_eeprom,
5445 .set_eeprom = bnx2_set_eeprom,
5446 .get_coalesce = bnx2_get_coalesce,
5447 .set_coalesce = bnx2_set_coalesce,
5448 .get_ringparam = bnx2_get_ringparam,
5449 .set_ringparam = bnx2_set_ringparam,
5450 .get_pauseparam = bnx2_get_pauseparam,
5451 .set_pauseparam = bnx2_set_pauseparam,
5452 .get_rx_csum = bnx2_get_rx_csum,
5453 .set_rx_csum = bnx2_set_rx_csum,
5454 .get_tx_csum = ethtool_op_get_tx_csum,
5455 .set_tx_csum = ethtool_op_set_tx_csum,
5456 .get_sg = ethtool_op_get_sg,
5457 .set_sg = ethtool_op_set_sg,
5458#ifdef BCM_TSO
5459 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005460 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005461#endif
5462 .self_test_count = bnx2_self_test_count,
5463 .self_test = bnx2_self_test,
5464 .get_strings = bnx2_get_strings,
5465 .phys_id = bnx2_phys_id,
5466 .get_stats_count = bnx2_get_stats_count,
5467 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005468 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005469};
5470
5471/* Called with rtnl_lock */
5472static int
5473bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5474{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005475 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005476 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005477 int err;
5478
5479 switch(cmd) {
5480 case SIOCGMIIPHY:
5481 data->phy_id = bp->phy_addr;
5482
5483 /* fallthru */
5484 case SIOCGMIIREG: {
5485 u32 mii_regval;
5486
Michael Chanc770a652005-08-25 15:38:39 -07005487 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005488 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005489 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005490
5491 data->val_out = mii_regval;
5492
5493 return err;
5494 }
5495
5496 case SIOCSMIIREG:
5497 if (!capable(CAP_NET_ADMIN))
5498 return -EPERM;
5499
Michael Chanc770a652005-08-25 15:38:39 -07005500 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005501 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005502 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005503
5504 return err;
5505
5506 default:
5507 /* do nothing */
5508 break;
5509 }
5510 return -EOPNOTSUPP;
5511}
5512
5513/* Called with rtnl_lock */
5514static int
5515bnx2_change_mac_addr(struct net_device *dev, void *p)
5516{
5517 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005518 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005519
Michael Chan73eef4c2005-08-25 15:39:15 -07005520 if (!is_valid_ether_addr(addr->sa_data))
5521 return -EINVAL;
5522
Michael Chanb6016b72005-05-26 13:03:09 -07005523 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5524 if (netif_running(dev))
5525 bnx2_set_mac_addr(bp);
5526
5527 return 0;
5528}
5529
5530/* Called with rtnl_lock */
5531static int
5532bnx2_change_mtu(struct net_device *dev, int new_mtu)
5533{
Michael Chan972ec0d2006-01-23 16:12:43 -08005534 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005535
5536 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5537 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5538 return -EINVAL;
5539
5540 dev->mtu = new_mtu;
5541 if (netif_running(dev)) {
5542 bnx2_netif_stop(bp);
5543
5544 bnx2_init_nic(bp);
5545
5546 bnx2_netif_start(bp);
5547 }
5548 return 0;
5549}
5550
5551#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5552static void
5553poll_bnx2(struct net_device *dev)
5554{
Michael Chan972ec0d2006-01-23 16:12:43 -08005555 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005556
5557 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005558 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005559 enable_irq(bp->pdev->irq);
5560}
5561#endif
5562
5563static int __devinit
5564bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5565{
5566 struct bnx2 *bp;
5567 unsigned long mem_len;
5568 int rc;
5569 u32 reg;
5570
5571 SET_MODULE_OWNER(dev);
5572 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005573 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005574
5575 bp->flags = 0;
5576 bp->phy_flags = 0;
5577
5578 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5579 rc = pci_enable_device(pdev);
5580 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005581 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005582 goto err_out;
5583 }
5584
5585 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005586 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005587 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005588 rc = -ENODEV;
5589 goto err_out_disable;
5590 }
5591
5592 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5593 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005594 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005595 goto err_out_disable;
5596 }
5597
5598 pci_set_master(pdev);
5599
5600 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5601 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005602 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005603 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005604 rc = -EIO;
5605 goto err_out_release;
5606 }
5607
5608 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5609 if (bp->pcix_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005610 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005611 rc = -EIO;
5612 goto err_out_release;
5613 }
5614
5615 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5616 bp->flags |= USING_DAC_FLAG;
5617 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005618 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005619 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005620 rc = -EIO;
5621 goto err_out_release;
5622 }
5623 }
5624 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005625 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005626 rc = -EIO;
5627 goto err_out_release;
5628 }
5629
5630 bp->dev = dev;
5631 bp->pdev = pdev;
5632
5633 spin_lock_init(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005634 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5635
5636 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5637 mem_len = MB_GET_CID_ADDR(17);
5638 dev->mem_end = dev->mem_start + mem_len;
5639 dev->irq = pdev->irq;
5640
5641 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5642
5643 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005644 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005645 rc = -ENOMEM;
5646 goto err_out_release;
5647 }
5648
5649 /* Configure byte swap and enable write to the reg_window registers.
5650 * Rely on CPU to do target byte swapping on big endian systems
5651 * The chip's target access swapping will not swap all accesses
5652 */
5653 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5654 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5655 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5656
Pavel Machek829ca9a2005-09-03 15:56:56 -07005657 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005658
5659 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5660
Michael Chanb6016b72005-05-26 13:03:09 -07005661 /* Get bus information. */
5662 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5663 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5664 u32 clkreg;
5665
5666 bp->flags |= PCIX_FLAG;
5667
5668 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005669
Michael Chanb6016b72005-05-26 13:03:09 -07005670 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5671 switch (clkreg) {
5672 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5673 bp->bus_speed_mhz = 133;
5674 break;
5675
5676 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5677 bp->bus_speed_mhz = 100;
5678 break;
5679
5680 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5681 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5682 bp->bus_speed_mhz = 66;
5683 break;
5684
5685 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5686 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5687 bp->bus_speed_mhz = 50;
5688 break;
5689
5690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5691 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5692 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5693 bp->bus_speed_mhz = 33;
5694 break;
5695 }
5696 }
5697 else {
5698 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5699 bp->bus_speed_mhz = 66;
5700 else
5701 bp->bus_speed_mhz = 33;
5702 }
5703
5704 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5705 bp->flags |= PCI_32BIT_FLAG;
5706
5707 /* 5706A0 may falsely detect SERR and PERR. */
5708 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5709 reg = REG_RD(bp, PCI_COMMAND);
5710 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5711 REG_WR(bp, PCI_COMMAND, reg);
5712 }
5713 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5714 !(bp->flags & PCIX_FLAG)) {
5715
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005716 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005717 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005718 goto err_out_unmap;
5719 }
5720
5721 bnx2_init_nvram(bp);
5722
Michael Chane3648b32005-11-04 08:51:21 -08005723 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5724
5725 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5726 BNX2_SHM_HDR_SIGNATURE_SIG)
5727 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5728 else
5729 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5730
Michael Chanb6016b72005-05-26 13:03:09 -07005731 /* Get the permanent MAC address. First we need to make sure the
5732 * firmware is actually running.
5733 */
Michael Chane3648b32005-11-04 08:51:21 -08005734 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005735
5736 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5737 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005738 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005739 rc = -ENODEV;
5740 goto err_out_unmap;
5741 }
5742
Michael Chane3648b32005-11-04 08:51:21 -08005743 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005744
Michael Chane3648b32005-11-04 08:51:21 -08005745 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005746 bp->mac_addr[0] = (u8) (reg >> 8);
5747 bp->mac_addr[1] = (u8) reg;
5748
Michael Chane3648b32005-11-04 08:51:21 -08005749 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005750 bp->mac_addr[2] = (u8) (reg >> 24);
5751 bp->mac_addr[3] = (u8) (reg >> 16);
5752 bp->mac_addr[4] = (u8) (reg >> 8);
5753 bp->mac_addr[5] = (u8) reg;
5754
5755 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005756 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005757
5758 bp->rx_csum = 1;
5759
5760 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5761
5762 bp->tx_quick_cons_trip_int = 20;
5763 bp->tx_quick_cons_trip = 20;
5764 bp->tx_ticks_int = 80;
5765 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005766
Michael Chanb6016b72005-05-26 13:03:09 -07005767 bp->rx_quick_cons_trip_int = 6;
5768 bp->rx_quick_cons_trip = 6;
5769 bp->rx_ticks_int = 18;
5770 bp->rx_ticks = 18;
5771
5772 bp->stats_ticks = 1000000 & 0xffff00;
5773
5774 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005775 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005776
Michael Chan5b0c76a2005-11-04 08:45:49 -08005777 bp->phy_addr = 1;
5778
Michael Chanb6016b72005-05-26 13:03:09 -07005779 /* Disable WOL support if we are running on a SERDES chip. */
5780 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5781 bp->phy_flags |= PHY_SERDES_FLAG;
5782 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005783 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5784 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005785 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005786 BNX2_SHARED_HW_CFG_CONFIG);
5787 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5788 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5789 }
Michael Chanb6016b72005-05-26 13:03:09 -07005790 }
5791
Michael Chan16088272006-06-12 22:16:43 -07005792 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5793 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5794 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005795 bp->flags |= NO_WOL_FLAG;
5796
Michael Chanb6016b72005-05-26 13:03:09 -07005797 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5798 bp->tx_quick_cons_trip_int =
5799 bp->tx_quick_cons_trip;
5800 bp->tx_ticks_int = bp->tx_ticks;
5801 bp->rx_quick_cons_trip_int =
5802 bp->rx_quick_cons_trip;
5803 bp->rx_ticks_int = bp->rx_ticks;
5804 bp->comp_prod_trip_int = bp->comp_prod_trip;
5805 bp->com_ticks_int = bp->com_ticks;
5806 bp->cmd_ticks_int = bp->cmd_ticks;
5807 }
5808
Michael Chanf9317a42006-09-29 17:06:23 -07005809 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5810 *
5811 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5812 * with byte enables disabled on the unused 32-bit word. This is legal
5813 * but causes problems on the AMD 8132 which will eventually stop
5814 * responding after a while.
5815 *
5816 * AMD believes this incompatibility is unique to the 5706, and
5817 * prefers to locally disable MSI rather than globally disabling it
5818 * using pci_msi_quirk.
5819 */
5820 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5821 struct pci_dev *amd_8132 = NULL;
5822
5823 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5824 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5825 amd_8132))) {
5826 u8 rev;
5827
5828 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5829 if (rev >= 0x10 && rev <= 0x13) {
5830 disable_msi = 1;
5831 pci_dev_put(amd_8132);
5832 break;
5833 }
5834 }
5835 }
5836
Michael Chanb6016b72005-05-26 13:03:09 -07005837 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5838 bp->req_line_speed = 0;
5839 if (bp->phy_flags & PHY_SERDES_FLAG) {
5840 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005841
Michael Chane3648b32005-11-04 08:51:21 -08005842 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005843 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5844 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5845 bp->autoneg = 0;
5846 bp->req_line_speed = bp->line_speed = SPEED_1000;
5847 bp->req_duplex = DUPLEX_FULL;
5848 }
Michael Chanb6016b72005-05-26 13:03:09 -07005849 }
5850 else {
5851 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5852 }
5853
5854 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5855
Michael Chancd339a02005-08-25 15:35:24 -07005856 init_timer(&bp->timer);
5857 bp->timer.expires = RUN_AT(bp->timer_interval);
5858 bp->timer.data = (unsigned long) bp;
5859 bp->timer.function = bnx2_timer;
5860
Michael Chanb6016b72005-05-26 13:03:09 -07005861 return 0;
5862
5863err_out_unmap:
5864 if (bp->regview) {
5865 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005866 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005867 }
5868
5869err_out_release:
5870 pci_release_regions(pdev);
5871
5872err_out_disable:
5873 pci_disable_device(pdev);
5874 pci_set_drvdata(pdev, NULL);
5875
5876err_out:
5877 return rc;
5878}
5879
5880static int __devinit
5881bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5882{
5883 static int version_printed = 0;
5884 struct net_device *dev = NULL;
5885 struct bnx2 *bp;
5886 int rc, i;
5887
5888 if (version_printed++ == 0)
5889 printk(KERN_INFO "%s", version);
5890
5891 /* dev zeroed in init_etherdev */
5892 dev = alloc_etherdev(sizeof(*bp));
5893
5894 if (!dev)
5895 return -ENOMEM;
5896
5897 rc = bnx2_init_board(pdev, dev);
5898 if (rc < 0) {
5899 free_netdev(dev);
5900 return rc;
5901 }
5902
5903 dev->open = bnx2_open;
5904 dev->hard_start_xmit = bnx2_start_xmit;
5905 dev->stop = bnx2_close;
5906 dev->get_stats = bnx2_get_stats;
5907 dev->set_multicast_list = bnx2_set_rx_mode;
5908 dev->do_ioctl = bnx2_ioctl;
5909 dev->set_mac_address = bnx2_change_mac_addr;
5910 dev->change_mtu = bnx2_change_mtu;
5911 dev->tx_timeout = bnx2_tx_timeout;
5912 dev->watchdog_timeo = TX_TIMEOUT;
5913#ifdef BCM_VLAN
5914 dev->vlan_rx_register = bnx2_vlan_rx_register;
5915 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5916#endif
5917 dev->poll = bnx2_poll;
5918 dev->ethtool_ops = &bnx2_ethtool_ops;
5919 dev->weight = 64;
5920
Michael Chan972ec0d2006-01-23 16:12:43 -08005921 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005922
5923#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5924 dev->poll_controller = poll_bnx2;
5925#endif
5926
5927 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005928 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005929 if (bp->regview)
5930 iounmap(bp->regview);
5931 pci_release_regions(pdev);
5932 pci_disable_device(pdev);
5933 pci_set_drvdata(pdev, NULL);
5934 free_netdev(dev);
5935 return rc;
5936 }
5937
5938 pci_set_drvdata(pdev, dev);
5939
5940 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07005941 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07005942 bp->name = board_info[ent->driver_data].name,
5943 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5944 "IRQ %d, ",
5945 dev->name,
5946 bp->name,
5947 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5948 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5949 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5950 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5951 bp->bus_speed_mhz,
5952 dev->base_addr,
5953 bp->pdev->irq);
5954
5955 printk("node addr ");
5956 for (i = 0; i < 6; i++)
5957 printk("%2.2x", dev->dev_addr[i]);
5958 printk("\n");
5959
5960 dev->features |= NETIF_F_SG;
5961 if (bp->flags & USING_DAC_FLAG)
5962 dev->features |= NETIF_F_HIGHDMA;
5963 dev->features |= NETIF_F_IP_CSUM;
5964#ifdef BCM_VLAN
5965 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5966#endif
5967#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07005968 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07005969#endif
5970
5971 netif_carrier_off(bp->dev);
5972
5973 return 0;
5974}
5975
5976static void __devexit
5977bnx2_remove_one(struct pci_dev *pdev)
5978{
5979 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005980 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005981
Michael Chanafdc08b2005-08-25 15:34:29 -07005982 flush_scheduled_work();
5983
Michael Chanb6016b72005-05-26 13:03:09 -07005984 unregister_netdev(dev);
5985
5986 if (bp->regview)
5987 iounmap(bp->regview);
5988
5989 free_netdev(dev);
5990 pci_release_regions(pdev);
5991 pci_disable_device(pdev);
5992 pci_set_drvdata(pdev, NULL);
5993}
5994
5995static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07005996bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07005997{
5998 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005999 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006000 u32 reset_code;
6001
6002 if (!netif_running(dev))
6003 return 0;
6004
Michael Chan1d60290f2006-03-20 17:50:08 -08006005 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006006 bnx2_netif_stop(bp);
6007 netif_device_detach(dev);
6008 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006009 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006010 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006011 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006012 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6013 else
6014 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6015 bnx2_reset_chip(bp, reset_code);
6016 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006017 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006018 return 0;
6019}
6020
6021static int
6022bnx2_resume(struct pci_dev *pdev)
6023{
6024 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006025 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006026
6027 if (!netif_running(dev))
6028 return 0;
6029
Pavel Machek829ca9a2005-09-03 15:56:56 -07006030 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006031 netif_device_attach(dev);
6032 bnx2_init_nic(bp);
6033 bnx2_netif_start(bp);
6034 return 0;
6035}
6036
6037static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006038 .name = DRV_MODULE_NAME,
6039 .id_table = bnx2_pci_tbl,
6040 .probe = bnx2_init_one,
6041 .remove = __devexit_p(bnx2_remove_one),
6042 .suspend = bnx2_suspend,
6043 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006044};
6045
6046static int __init bnx2_init(void)
6047{
Jeff Garzik29917622006-08-19 17:48:59 -04006048 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006049}
6050
6051static void __exit bnx2_cleanup(void)
6052{
6053 pci_unregister_driver(&bnx2_pci_driver);
6054}
6055
6056module_init(bnx2_init);
6057module_exit(bnx2_cleanup);
6058
6059
6060