blob: 0a46b45f21dd39cfb7d8cb785f351122e777ac0a [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080051#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070052#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080053
Michael Chanb6016b72005-05-26 13:03:09 -070054#include "bnx2.h"
55#include "bnx2_fw.h"
56
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
Michael Chanf9317a42006-09-29 17:06:23 -070059#define DRV_MODULE_VERSION "1.4.45"
60#define DRV_MODULE_RELDATE "September 29, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070061
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
Randy Dunlape19360f2006-04-10 23:22:06 -070067static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070068 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080071MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070072MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080086 BCM5708,
87 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -0700118 { 0, }
119};
120
121static struct flash_spec flash_table[] =
122{
123 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700206};
207
208MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
Michael Chane89bbf12005-08-25 15:36:58 -0700210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{
Michael Chan2f8af122006-08-15 01:39:10 -0700212 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700213
Michael Chan2f8af122006-08-15 01:39:10 -0700214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
Michael Chane89bbf12005-08-25 15:36:58 -0700216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219}
220
Michael Chanb6016b72005-05-26 13:03:09 -0700221static u32
222bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223{
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226}
227
228static void
229bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230{
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233}
234
235static void
236bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237{
238 offset += cid_addr;
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
241}
242
243static int
244bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245{
246 u32 val1;
247 int i, ret;
248
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256 udelay(40);
257 }
258
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264 for (i = 0; i < 50; i++) {
265 udelay(10);
266
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269 udelay(5);
270
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274 break;
275 }
276 }
277
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279 *val = 0x0;
280 ret = -EBUSY;
281 }
282 else {
283 *val = val1;
284 ret = 0;
285 }
286
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294 udelay(40);
295 }
296
297 return ret;
298}
299
300static int
301bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302{
303 u32 val1;
304 int i, ret;
305
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313 udelay(40);
314 }
315
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400320
Michael Chanb6016b72005-05-26 13:03:09 -0700321 for (i = 0; i < 50; i++) {
322 udelay(10);
323
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326 udelay(5);
327 break;
328 }
329 }
330
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332 ret = -EBUSY;
333 else
334 ret = 0;
335
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343 udelay(40);
344 }
345
346 return ret;
347}
348
349static void
350bnx2_disable_int(struct bnx2 *bp)
351{
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355}
356
357static void
358bnx2_enable_int(struct bnx2 *bp)
359{
Michael Chanb6016b72005-05-26 13:03:09 -0700360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
Michael Chanbf5295b2006-03-23 01:11:56 -0800367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700368}
369
370static void
371bnx2_disable_int_sync(struct bnx2 *bp)
372{
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
376}
377
378static void
379bnx2_netif_stop(struct bnx2 *bp)
380{
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386 }
387}
388
389static void
390bnx2_netif_start(struct bnx2 *bp)
391{
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
396 bnx2_enable_int(bp);
397 }
398 }
399}
400
401static void
402bnx2_free_mem(struct bnx2 *bp)
403{
Michael Chan13daffa2006-03-20 17:49:20 -0800404 int i;
405
Michael Chanb6016b72005-05-26 13:03:09 -0700406 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800407 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800410 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700411 }
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
417 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_ring[i],
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700427 }
Michael Chan13daffa2006-03-20 17:49:20 -0800428 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400429 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700430}
431
432static int
433bnx2_alloc_mem(struct bnx2 *bp)
434{
Michael Chan0f31f992006-03-23 01:12:38 -0800435 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800436
Michael Chan0f31f992006-03-23 01:12:38 -0800437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700439 if (bp->tx_buf_ring == NULL)
440 return -ENOMEM;
441
Michael Chanb6016b72005-05-26 13:03:09 -0700442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
444 TX_DESC_CNT,
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
447 goto alloc_mem_err;
448
Michael Chan13daffa2006-03-20 17:49:20 -0800449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->rx_buf_ring == NULL)
452 goto alloc_mem_err;
453
Michael Chan13daffa2006-03-20 17:49:20 -0800454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455 bp->rx_max_ring);
456
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
463 goto alloc_mem_err;
464
465 }
Michael Chanb6016b72005-05-26 13:03:09 -0700466
Michael Chan0f31f992006-03-23 01:12:38 -0800467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
471
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
475 goto alloc_mem_err;
476
Michael Chan0f31f992006-03-23 01:12:38 -0800477 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700478
Michael Chan0f31f992006-03-23 01:12:38 -0800479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700483
484 return 0;
485
486alloc_mem_err:
487 bnx2_free_mem(bp);
488 return -ENOMEM;
489}
490
491static void
Michael Chane3648b32005-11-04 08:51:21 -0800492bnx2_report_fw_link(struct bnx2 *bp)
493{
494 u32 fw_link_status = 0;
495
496 if (bp->link_up) {
497 u32 bmsr;
498
499 switch (bp->line_speed) {
500 case SPEED_10:
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 else
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
505 break;
506 case SPEED_100:
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 else
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
511 break;
512 case SPEED_1000:
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 else
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517 break;
518 case SPEED_2500:
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 else
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 break;
524 }
525
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528 if (bp->autoneg) {
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 else
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 }
540 }
541 else
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545}
546
547static void
Michael Chanb6016b72005-05-26 13:03:09 -0700548bnx2_report_link(struct bnx2 *bp)
549{
550 if (bp->link_up) {
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554 printk("%d Mbps ", bp->line_speed);
555
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
558 else
559 printk("half duplex");
560
561 if (bp->flow_ctrl) {
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
566 }
567 else {
568 printk(", transmit ");
569 }
570 printk("flow control ON");
571 }
572 printk("\n");
573 }
574 else {
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577 }
Michael Chane3648b32005-11-04 08:51:21 -0800578
579 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700580}
581
582static void
583bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584{
585 u32 local_adv, remote_adv;
586
587 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
593 }
594 return;
595 }
596
597 if (bp->duplex != DUPLEX_FULL) {
598 return;
599 }
600
Michael Chan5b0c76a2005-11-04 08:45:49 -0800601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603 u32 val;
604
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
610 return;
611 }
612
Michael Chanb6016b72005-05-26 13:03:09 -0700613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
619
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
631 }
632
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 }
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
641 }
642 }
643 else {
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 }
647 }
648 }
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653 bp->flow_ctrl = FLOW_CTRL_TX;
654 }
655 }
656}
657
658static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800659bnx2_5708s_linkup(struct bnx2 *bp)
660{
661 u32 val;
662
663 bp->link_up = 1;
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
674 break;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
677 break;
678 }
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
681 else
682 bp->duplex = DUPLEX_HALF;
683
684 return 0;
685}
686
687static int
688bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700689{
690 u32 bmcr, local_adv, remote_adv, common;
691
692 bp->link_up = 1;
693 bp->line_speed = SPEED_1000;
694
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
698 }
699 else {
700 bp->duplex = DUPLEX_HALF;
701 }
702
703 if (!(bmcr & BMCR_ANENABLE)) {
704 return 0;
705 }
706
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
715 }
716 else {
717 bp->duplex = DUPLEX_HALF;
718 }
719 }
720
721 return 0;
722}
723
724static int
725bnx2_copper_linkup(struct bnx2 *bp)
726{
727 u32 bmcr;
728
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
732
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
740 }
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
744 }
745 else {
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
753 }
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
757 }
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
761 }
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
765 }
766 else {
767 bp->line_speed = 0;
768 bp->link_up = 0;
769 }
770 }
771 }
772 else {
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
775 }
776 else {
777 bp->line_speed = SPEED_10;
778 }
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
781 }
782 else {
783 bp->duplex = DUPLEX_HALF;
784 }
785 }
786
787 return 0;
788}
789
790static int
791bnx2_set_mac_link(struct bnx2 *bp)
792{
793 u32 val;
794
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799 }
800
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
803
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 BNX2_EMAC_MODE_25G);
Michael Chanb6016b72005-05-26 13:03:09 -0700807
808 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800809 switch (bp->line_speed) {
810 case SPEED_10:
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
813 break;
814 }
815 /* fall through */
816 case SPEED_100:
817 val |= BNX2_EMAC_MODE_PORT_MII;
818 break;
819 case SPEED_2500:
820 val |= BNX2_EMAC_MODE_25G;
821 /* fall through */
822 case SPEED_1000:
823 val |= BNX2_EMAC_MODE_PORT_GMII;
824 break;
825 }
Michael Chanb6016b72005-05-26 13:03:09 -0700826 }
827 else {
828 val |= BNX2_EMAC_MODE_PORT_GMII;
829 }
830
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
835
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854 return 0;
855}
856
857static int
858bnx2_set_link(struct bnx2 *bp)
859{
860 u32 bmsr;
861 u8 link_up;
862
Michael Chan80be4432006-11-19 14:07:28 -0800863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700864 bp->link_up = 1;
865 return 0;
866 }
867
868 link_up = bp->link_up;
869
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875 u32 val;
876
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
880 else
881 bmsr &= ~BMSR_LSTATUS;
882 }
883
884 if (bmsr & BMSR_LSTATUS) {
885 bp->link_up = 1;
886
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700892 }
893 else {
894 bnx2_copper_linkup(bp);
895 }
896 bnx2_resolve_flow_ctrl(bp);
897 }
898 else {
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
901
902 u32 bmcr;
903
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
908 BMCR_ANENABLE);
909 }
910 }
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
912 bp->link_up = 0;
913 }
914
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
917 }
918
919 bnx2_set_mac_link(bp);
920
921 return 0;
922}
923
924static int
925bnx2_reset_phy(struct bnx2 *bp)
926{
927 int i;
928 u32 reg;
929
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931
932#define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
934 udelay(10);
935
936 bnx2_read_phy(bp, MII_BMCR, &reg);
937 if (!(reg & BMCR_RESET)) {
938 udelay(20);
939 break;
940 }
941 }
942 if (i == PHY_RESET_MAX_WAIT) {
943 return -EBUSY;
944 }
945 return 0;
946}
947
948static u32
949bnx2_phy_get_pause_adv(struct bnx2 *bp)
950{
951 u32 adv = 0;
952
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
958 }
959 else {
960 adv = ADVERTISE_PAUSE_CAP;
961 }
962 }
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
966 }
967 else {
968 adv = ADVERTISE_PAUSE_ASYM;
969 }
970 }
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
974 }
975 else {
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
977 }
978 }
979 return adv;
980}
981
982static int
983bnx2_setup_serdes_phy(struct bnx2 *bp)
984{
Michael Chan5b0c76a2005-11-04 08:45:49 -0800985 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -0700986 u32 new_adv = 0;
987
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
989 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800990 int force_link_down = 0;
991
Michael Chan80be4432006-11-19 14:07:28 -0800992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
994
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1005 }
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1012 }
1013 }
1014
Michael Chanb6016b72005-05-26 13:03:09 -07001015 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001016 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001017 new_bmcr |= BMCR_FULLDPLX;
1018 }
1019 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001020 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001021 new_bmcr &= ~BMCR_FULLDPLX;
1022 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001023 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001024 /* Force a link down visible on the other side */
1025 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1031
1032 bp->link_up = 0;
1033 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001035 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001036 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1039 }
1040 return 0;
1041 }
1042
Michael Chan5b0c76a2005-11-04 08:45:49 -08001043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047 }
1048
Michael Chanb6016b72005-05-26 13:03:09 -07001049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1051
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1053
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1056
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1060 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001062 spin_unlock_bh(&bp->phy_lock);
1063 msleep(20);
1064 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001065 }
1066
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1069 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001070 /* Speed up link-up time when the link partner
1071 * does not autonegotiate which is very common
1072 * in blade servers. Some blade servers use
1073 * IPMI for kerboard input and it's important
1074 * to minimize link disruptions. Autoneg. involves
1075 * exchanging base pages plus 3 next pages and
1076 * normally completes in about 120 msec.
1077 */
1078 bp->current_interval = SERDES_AN_TIMEOUT;
1079 bp->serdes_an_pending = 1;
1080 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001081 }
1082
1083 return 0;
1084}
1085
1086#define ETHTOOL_ALL_FIBRE_SPEED \
1087 (ADVERTISED_1000baseT_Full)
1088
1089#define ETHTOOL_ALL_COPPER_SPEED \
1090 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1091 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1092 ADVERTISED_1000baseT_Full)
1093
1094#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1095 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001096
Michael Chanb6016b72005-05-26 13:03:09 -07001097#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1098
1099static int
1100bnx2_setup_copper_phy(struct bnx2 *bp)
1101{
1102 u32 bmcr;
1103 u32 new_bmcr;
1104
1105 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1106
1107 if (bp->autoneg & AUTONEG_SPEED) {
1108 u32 adv_reg, adv1000_reg;
1109 u32 new_adv_reg = 0;
1110 u32 new_adv1000_reg = 0;
1111
1112 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1113 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1114 ADVERTISE_PAUSE_ASYM);
1115
1116 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1117 adv1000_reg &= PHY_ALL_1000_SPEED;
1118
1119 if (bp->advertising & ADVERTISED_10baseT_Half)
1120 new_adv_reg |= ADVERTISE_10HALF;
1121 if (bp->advertising & ADVERTISED_10baseT_Full)
1122 new_adv_reg |= ADVERTISE_10FULL;
1123 if (bp->advertising & ADVERTISED_100baseT_Half)
1124 new_adv_reg |= ADVERTISE_100HALF;
1125 if (bp->advertising & ADVERTISED_100baseT_Full)
1126 new_adv_reg |= ADVERTISE_100FULL;
1127 if (bp->advertising & ADVERTISED_1000baseT_Full)
1128 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001129
Michael Chanb6016b72005-05-26 13:03:09 -07001130 new_adv_reg |= ADVERTISE_CSMA;
1131
1132 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1133
1134 if ((adv1000_reg != new_adv1000_reg) ||
1135 (adv_reg != new_adv_reg) ||
1136 ((bmcr & BMCR_ANENABLE) == 0)) {
1137
1138 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1139 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1140 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1141 BMCR_ANENABLE);
1142 }
1143 else if (bp->link_up) {
1144 /* Flow ctrl may have changed from auto to forced */
1145 /* or vice-versa. */
1146
1147 bnx2_resolve_flow_ctrl(bp);
1148 bnx2_set_mac_link(bp);
1149 }
1150 return 0;
1151 }
1152
1153 new_bmcr = 0;
1154 if (bp->req_line_speed == SPEED_100) {
1155 new_bmcr |= BMCR_SPEED100;
1156 }
1157 if (bp->req_duplex == DUPLEX_FULL) {
1158 new_bmcr |= BMCR_FULLDPLX;
1159 }
1160 if (new_bmcr != bmcr) {
1161 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001165
Michael Chanb6016b72005-05-26 13:03:09 -07001166 if (bmsr & BMSR_LSTATUS) {
1167 /* Force link down */
1168 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001169 spin_unlock_bh(&bp->phy_lock);
1170 msleep(50);
1171 spin_lock_bh(&bp->phy_lock);
1172
1173 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1174 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001175 }
1176
1177 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1178
1179 /* Normally, the new speed is setup after the link has
1180 * gone down and up again. In some cases, link will not go
1181 * down so we need to set up the new speed here.
1182 */
1183 if (bmsr & BMSR_LSTATUS) {
1184 bp->line_speed = bp->req_line_speed;
1185 bp->duplex = bp->req_duplex;
1186 bnx2_resolve_flow_ctrl(bp);
1187 bnx2_set_mac_link(bp);
1188 }
1189 }
1190 return 0;
1191}
1192
1193static int
1194bnx2_setup_phy(struct bnx2 *bp)
1195{
1196 if (bp->loopback == MAC_LOOPBACK)
1197 return 0;
1198
1199 if (bp->phy_flags & PHY_SERDES_FLAG) {
1200 return (bnx2_setup_serdes_phy(bp));
1201 }
1202 else {
1203 return (bnx2_setup_copper_phy(bp));
1204 }
1205}
1206
1207static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001208bnx2_init_5708s_phy(struct bnx2 *bp)
1209{
1210 u32 val;
1211
1212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1213 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1214 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1215
1216 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1217 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1218 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1219
1220 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1221 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1222 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1223
1224 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1225 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1226 val |= BCM5708S_UP1_2G5;
1227 bnx2_write_phy(bp, BCM5708S_UP1, val);
1228 }
1229
1230 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001231 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1232 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001233 /* increase tx signal amplitude */
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1235 BCM5708S_BLK_ADDR_TX_MISC);
1236 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1237 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1238 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1239 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1240 }
1241
Michael Chane3648b32005-11-04 08:51:21 -08001242 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001243 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1244
1245 if (val) {
1246 u32 is_backplane;
1247
Michael Chane3648b32005-11-04 08:51:21 -08001248 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001249 BNX2_SHARED_HW_CFG_CONFIG);
1250 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1252 BCM5708S_BLK_ADDR_TX_MISC);
1253 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1255 BCM5708S_BLK_ADDR_DIG);
1256 }
1257 }
1258 return 0;
1259}
1260
1261static int
1262bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001263{
1264 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1265
1266 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1267 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1268 }
1269
1270 if (bp->dev->mtu > 1500) {
1271 u32 val;
1272
1273 /* Set extended packet length bit */
1274 bnx2_write_phy(bp, 0x18, 0x7);
1275 bnx2_read_phy(bp, 0x18, &val);
1276 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1277
1278 bnx2_write_phy(bp, 0x1c, 0x6c00);
1279 bnx2_read_phy(bp, 0x1c, &val);
1280 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1281 }
1282 else {
1283 u32 val;
1284
1285 bnx2_write_phy(bp, 0x18, 0x7);
1286 bnx2_read_phy(bp, 0x18, &val);
1287 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1288
1289 bnx2_write_phy(bp, 0x1c, 0x6c00);
1290 bnx2_read_phy(bp, 0x1c, &val);
1291 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1292 }
1293
1294 return 0;
1295}
1296
1297static int
1298bnx2_init_copper_phy(struct bnx2 *bp)
1299{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001300 u32 val;
1301
Michael Chanb6016b72005-05-26 13:03:09 -07001302 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1303
1304 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1305 bnx2_write_phy(bp, 0x18, 0x0c00);
1306 bnx2_write_phy(bp, 0x17, 0x000a);
1307 bnx2_write_phy(bp, 0x15, 0x310b);
1308 bnx2_write_phy(bp, 0x17, 0x201f);
1309 bnx2_write_phy(bp, 0x15, 0x9506);
1310 bnx2_write_phy(bp, 0x17, 0x401f);
1311 bnx2_write_phy(bp, 0x15, 0x14e2);
1312 bnx2_write_phy(bp, 0x18, 0x0400);
1313 }
1314
1315 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, val | 0x4000);
1320
1321 bnx2_read_phy(bp, 0x10, &val);
1322 bnx2_write_phy(bp, 0x10, val | 0x1);
1323 }
1324 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001325 bnx2_write_phy(bp, 0x18, 0x7);
1326 bnx2_read_phy(bp, 0x18, &val);
1327 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1328
1329 bnx2_read_phy(bp, 0x10, &val);
1330 bnx2_write_phy(bp, 0x10, val & ~0x1);
1331 }
1332
Michael Chan5b0c76a2005-11-04 08:45:49 -08001333 /* ethernet@wirespeed */
1334 bnx2_write_phy(bp, 0x18, 0x7007);
1335 bnx2_read_phy(bp, 0x18, &val);
1336 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001337 return 0;
1338}
1339
1340
1341static int
1342bnx2_init_phy(struct bnx2 *bp)
1343{
1344 u32 val;
1345 int rc = 0;
1346
1347 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1348 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1349
1350 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1351
1352 bnx2_reset_phy(bp);
1353
1354 bnx2_read_phy(bp, MII_PHYSID1, &val);
1355 bp->phy_id = val << 16;
1356 bnx2_read_phy(bp, MII_PHYSID2, &val);
1357 bp->phy_id |= val & 0xffff;
1358
1359 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001360 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1361 rc = bnx2_init_5706s_phy(bp);
1362 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1363 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001364 }
1365 else {
1366 rc = bnx2_init_copper_phy(bp);
1367 }
1368
1369 bnx2_setup_phy(bp);
1370
1371 return rc;
1372}
1373
1374static int
1375bnx2_set_mac_loopback(struct bnx2 *bp)
1376{
1377 u32 mac_mode;
1378
1379 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1380 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1381 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1382 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1383 bp->link_up = 1;
1384 return 0;
1385}
1386
Michael Chanbc5a0692006-01-23 16:13:22 -08001387static int bnx2_test_link(struct bnx2 *);
1388
1389static int
1390bnx2_set_phy_loopback(struct bnx2 *bp)
1391{
1392 u32 mac_mode;
1393 int rc, i;
1394
1395 spin_lock_bh(&bp->phy_lock);
1396 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1397 BMCR_SPEED1000);
1398 spin_unlock_bh(&bp->phy_lock);
1399 if (rc)
1400 return rc;
1401
1402 for (i = 0; i < 10; i++) {
1403 if (bnx2_test_link(bp) == 0)
1404 break;
Michael Chan80be4432006-11-19 14:07:28 -08001405 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001406 }
1407
1408 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1409 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1410 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1411 BNX2_EMAC_MODE_25G);
1412
1413 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1414 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1415 bp->link_up = 1;
1416 return 0;
1417}
1418
Michael Chanb6016b72005-05-26 13:03:09 -07001419static int
Michael Chanb090ae22006-01-23 16:07:10 -08001420bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001421{
1422 int i;
1423 u32 val;
1424
Michael Chanb6016b72005-05-26 13:03:09 -07001425 bp->fw_wr_seq++;
1426 msg_data |= bp->fw_wr_seq;
1427
Michael Chane3648b32005-11-04 08:51:21 -08001428 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001429
1430 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001431 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1432 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001433
Michael Chane3648b32005-11-04 08:51:21 -08001434 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001435
1436 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1437 break;
1438 }
Michael Chanb090ae22006-01-23 16:07:10 -08001439 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1440 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001441
1442 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001443 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1444 if (!silent)
1445 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1446 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001447
1448 msg_data &= ~BNX2_DRV_MSG_CODE;
1449 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1450
Michael Chane3648b32005-11-04 08:51:21 -08001451 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001452
Michael Chanb6016b72005-05-26 13:03:09 -07001453 return -EBUSY;
1454 }
1455
Michael Chanb090ae22006-01-23 16:07:10 -08001456 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1457 return -EIO;
1458
Michael Chanb6016b72005-05-26 13:03:09 -07001459 return 0;
1460}
1461
1462static void
1463bnx2_init_context(struct bnx2 *bp)
1464{
1465 u32 vcid;
1466
1467 vcid = 96;
1468 while (vcid) {
1469 u32 vcid_addr, pcid_addr, offset;
1470
1471 vcid--;
1472
1473 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1474 u32 new_vcid;
1475
1476 vcid_addr = GET_PCID_ADDR(vcid);
1477 if (vcid & 0x8) {
1478 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1479 }
1480 else {
1481 new_vcid = vcid;
1482 }
1483 pcid_addr = GET_PCID_ADDR(new_vcid);
1484 }
1485 else {
1486 vcid_addr = GET_CID_ADDR(vcid);
1487 pcid_addr = vcid_addr;
1488 }
1489
1490 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1491 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1492
1493 /* Zero out the context. */
1494 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1495 CTX_WR(bp, 0x00, offset, 0);
1496 }
1497
1498 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1499 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1500 }
1501}
1502
1503static int
1504bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1505{
1506 u16 *good_mbuf;
1507 u32 good_mbuf_cnt;
1508 u32 val;
1509
1510 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1511 if (good_mbuf == NULL) {
1512 printk(KERN_ERR PFX "Failed to allocate memory in "
1513 "bnx2_alloc_bad_rbuf\n");
1514 return -ENOMEM;
1515 }
1516
1517 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1518 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1519
1520 good_mbuf_cnt = 0;
1521
1522 /* Allocate a bunch of mbufs and save the good ones in an array. */
1523 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1524 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1525 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1526
1527 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1528
1529 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1530
1531 /* The addresses with Bit 9 set are bad memory blocks. */
1532 if (!(val & (1 << 9))) {
1533 good_mbuf[good_mbuf_cnt] = (u16) val;
1534 good_mbuf_cnt++;
1535 }
1536
1537 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1538 }
1539
1540 /* Free the good ones back to the mbuf pool thus discarding
1541 * all the bad ones. */
1542 while (good_mbuf_cnt) {
1543 good_mbuf_cnt--;
1544
1545 val = good_mbuf[good_mbuf_cnt];
1546 val = (val << 9) | val | 1;
1547
1548 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1549 }
1550 kfree(good_mbuf);
1551 return 0;
1552}
1553
1554static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001555bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001556{
1557 u32 val;
1558 u8 *mac_addr = bp->dev->dev_addr;
1559
1560 val = (mac_addr[0] << 8) | mac_addr[1];
1561
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1563
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001564 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001565 (mac_addr[4] << 8) | mac_addr[5];
1566
1567 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1568}
1569
1570static inline int
1571bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1572{
1573 struct sk_buff *skb;
1574 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1575 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001576 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001577 unsigned long align;
1578
Michael Chan932f3772006-08-15 01:39:36 -07001579 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001580 if (skb == NULL) {
1581 return -ENOMEM;
1582 }
1583
1584 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1585 skb_reserve(skb, 8 - align);
1586 }
1587
Michael Chanb6016b72005-05-26 13:03:09 -07001588 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1589 PCI_DMA_FROMDEVICE);
1590
1591 rx_buf->skb = skb;
1592 pci_unmap_addr_set(rx_buf, mapping, mapping);
1593
1594 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1595 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1596
1597 bp->rx_prod_bseq += bp->rx_buf_use_size;
1598
1599 return 0;
1600}
1601
1602static void
1603bnx2_phy_int(struct bnx2 *bp)
1604{
1605 u32 new_link_state, old_link_state;
1606
1607 new_link_state = bp->status_blk->status_attn_bits &
1608 STATUS_ATTN_BITS_LINK_STATE;
1609 old_link_state = bp->status_blk->status_attn_bits_ack &
1610 STATUS_ATTN_BITS_LINK_STATE;
1611 if (new_link_state != old_link_state) {
1612 if (new_link_state) {
1613 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1614 STATUS_ATTN_BITS_LINK_STATE);
1615 }
1616 else {
1617 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1618 STATUS_ATTN_BITS_LINK_STATE);
1619 }
1620 bnx2_set_link(bp);
1621 }
1622}
1623
1624static void
1625bnx2_tx_int(struct bnx2 *bp)
1626{
Michael Chanf4e418f2005-11-04 08:53:48 -08001627 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001628 u16 hw_cons, sw_cons, sw_ring_cons;
1629 int tx_free_bd = 0;
1630
Michael Chanf4e418f2005-11-04 08:53:48 -08001631 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001632 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1633 hw_cons++;
1634 }
1635 sw_cons = bp->tx_cons;
1636
1637 while (sw_cons != hw_cons) {
1638 struct sw_bd *tx_buf;
1639 struct sk_buff *skb;
1640 int i, last;
1641
1642 sw_ring_cons = TX_RING_IDX(sw_cons);
1643
1644 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1645 skb = tx_buf->skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001646#ifdef BCM_TSO
Michael Chanb6016b72005-05-26 13:03:09 -07001647 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001648 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001649 u16 last_idx, last_ring_idx;
1650
1651 last_idx = sw_cons +
1652 skb_shinfo(skb)->nr_frags + 1;
1653 last_ring_idx = sw_ring_cons +
1654 skb_shinfo(skb)->nr_frags + 1;
1655 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1656 last_idx++;
1657 }
1658 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1659 break;
1660 }
1661 }
1662#endif
1663 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1664 skb_headlen(skb), PCI_DMA_TODEVICE);
1665
1666 tx_buf->skb = NULL;
1667 last = skb_shinfo(skb)->nr_frags;
1668
1669 for (i = 0; i < last; i++) {
1670 sw_cons = NEXT_TX_BD(sw_cons);
1671
1672 pci_unmap_page(bp->pdev,
1673 pci_unmap_addr(
1674 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1675 mapping),
1676 skb_shinfo(skb)->frags[i].size,
1677 PCI_DMA_TODEVICE);
1678 }
1679
1680 sw_cons = NEXT_TX_BD(sw_cons);
1681
1682 tx_free_bd += last + 1;
1683
Michael Chan745720e2006-06-29 12:37:41 -07001684 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001685
Michael Chanf4e418f2005-11-04 08:53:48 -08001686 hw_cons = bp->hw_tx_cons =
1687 sblk->status_tx_quick_consumer_index0;
1688
Michael Chanb6016b72005-05-26 13:03:09 -07001689 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1690 hw_cons++;
1691 }
1692 }
1693
Michael Chane89bbf12005-08-25 15:36:58 -07001694 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001695 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1696 * before checking for netif_queue_stopped(). Without the
1697 * memory barrier, there is a small possibility that bnx2_start_xmit()
1698 * will miss it and cause the queue to be stopped forever.
1699 */
1700 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001701
Michael Chan2f8af122006-08-15 01:39:10 -07001702 if (unlikely(netif_queue_stopped(bp->dev)) &&
1703 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1704 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001705 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001706 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001707 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001708 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001709 }
Michael Chanb6016b72005-05-26 13:03:09 -07001710}
1711
1712static inline void
1713bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1714 u16 cons, u16 prod)
1715{
Michael Chan236b6392006-03-20 17:49:02 -08001716 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1717 struct rx_bd *cons_bd, *prod_bd;
1718
1719 cons_rx_buf = &bp->rx_buf_ring[cons];
1720 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001721
1722 pci_dma_sync_single_for_device(bp->pdev,
1723 pci_unmap_addr(cons_rx_buf, mapping),
1724 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1725
Michael Chan236b6392006-03-20 17:49:02 -08001726 bp->rx_prod_bseq += bp->rx_buf_use_size;
1727
1728 prod_rx_buf->skb = skb;
1729
1730 if (cons == prod)
1731 return;
1732
Michael Chanb6016b72005-05-26 13:03:09 -07001733 pci_unmap_addr_set(prod_rx_buf, mapping,
1734 pci_unmap_addr(cons_rx_buf, mapping));
1735
Michael Chan3fdfcc22006-03-20 17:49:49 -08001736 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1737 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001738 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1739 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001740}
1741
1742static int
1743bnx2_rx_int(struct bnx2 *bp, int budget)
1744{
Michael Chanf4e418f2005-11-04 08:53:48 -08001745 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001746 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1747 struct l2_fhdr *rx_hdr;
1748 int rx_pkt = 0;
1749
Michael Chanf4e418f2005-11-04 08:53:48 -08001750 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001751 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1752 hw_cons++;
1753 }
1754 sw_cons = bp->rx_cons;
1755 sw_prod = bp->rx_prod;
1756
1757 /* Memory barrier necessary as speculative reads of the rx
1758 * buffer can be ahead of the index in the status block
1759 */
1760 rmb();
1761 while (sw_cons != hw_cons) {
1762 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001763 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001764 struct sw_bd *rx_buf;
1765 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001766 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001767
1768 sw_ring_cons = RX_RING_IDX(sw_cons);
1769 sw_ring_prod = RX_RING_IDX(sw_prod);
1770
1771 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1772 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001773
1774 rx_buf->skb = NULL;
1775
1776 dma_addr = pci_unmap_addr(rx_buf, mapping);
1777
1778 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001779 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1780
1781 rx_hdr = (struct l2_fhdr *) skb->data;
1782 len = rx_hdr->l2_fhdr_pkt_len - 4;
1783
Michael Chanade2bfe2006-01-23 16:09:51 -08001784 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001785 (L2_FHDR_ERRORS_BAD_CRC |
1786 L2_FHDR_ERRORS_PHY_DECODE |
1787 L2_FHDR_ERRORS_ALIGNMENT |
1788 L2_FHDR_ERRORS_TOO_SHORT |
1789 L2_FHDR_ERRORS_GIANT_FRAME)) {
1790
1791 goto reuse_rx;
1792 }
1793
1794 /* Since we don't have a jumbo ring, copy small packets
1795 * if mtu > 1500
1796 */
1797 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1798 struct sk_buff *new_skb;
1799
Michael Chan932f3772006-08-15 01:39:36 -07001800 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001801 if (new_skb == NULL)
1802 goto reuse_rx;
1803
1804 /* aligned copy */
1805 memcpy(new_skb->data,
1806 skb->data + bp->rx_offset - 2,
1807 len + 2);
1808
1809 skb_reserve(new_skb, 2);
1810 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001811
1812 bnx2_reuse_rx_skb(bp, skb,
1813 sw_ring_cons, sw_ring_prod);
1814
1815 skb = new_skb;
1816 }
1817 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001818 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001819 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1820
1821 skb_reserve(skb, bp->rx_offset);
1822 skb_put(skb, len);
1823 }
1824 else {
1825reuse_rx:
1826 bnx2_reuse_rx_skb(bp, skb,
1827 sw_ring_cons, sw_ring_prod);
1828 goto next_rx;
1829 }
1830
1831 skb->protocol = eth_type_trans(skb, bp->dev);
1832
1833 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001834 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001835
Michael Chan745720e2006-06-29 12:37:41 -07001836 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001837 goto next_rx;
1838
1839 }
1840
Michael Chanb6016b72005-05-26 13:03:09 -07001841 skb->ip_summed = CHECKSUM_NONE;
1842 if (bp->rx_csum &&
1843 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1844 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1845
Michael Chanade2bfe2006-01-23 16:09:51 -08001846 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1847 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001848 skb->ip_summed = CHECKSUM_UNNECESSARY;
1849 }
1850
1851#ifdef BCM_VLAN
1852 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1853 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1854 rx_hdr->l2_fhdr_vlan_tag);
1855 }
1856 else
1857#endif
1858 netif_receive_skb(skb);
1859
1860 bp->dev->last_rx = jiffies;
1861 rx_pkt++;
1862
1863next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001864 sw_cons = NEXT_RX_BD(sw_cons);
1865 sw_prod = NEXT_RX_BD(sw_prod);
1866
1867 if ((rx_pkt == budget))
1868 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001869
1870 /* Refresh hw_cons to see if there is new work */
1871 if (sw_cons == hw_cons) {
1872 hw_cons = bp->hw_rx_cons =
1873 sblk->status_rx_quick_consumer_index0;
1874 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1875 hw_cons++;
1876 rmb();
1877 }
Michael Chanb6016b72005-05-26 13:03:09 -07001878 }
1879 bp->rx_cons = sw_cons;
1880 bp->rx_prod = sw_prod;
1881
1882 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1883
1884 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1885
1886 mmiowb();
1887
1888 return rx_pkt;
1889
1890}
1891
1892/* MSI ISR - The only difference between this and the INTx ISR
1893 * is that the MSI interrupt is always serviced.
1894 */
1895static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001896bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001897{
1898 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001899 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001900
Michael Chanc921e4c2005-09-08 13:15:32 -07001901 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001902 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1903 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1904 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1905
1906 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001907 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1908 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001909
Michael Chan73eef4c2005-08-25 15:39:15 -07001910 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001911
Michael Chan73eef4c2005-08-25 15:39:15 -07001912 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001913}
1914
1915static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001916bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001917{
1918 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001919 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001920
1921 /* When using INTx, it is possible for the interrupt to arrive
1922 * at the CPU before the status block posted prior to the
1923 * interrupt. Reading a register will flush the status block.
1924 * When using MSI, the MSI message will always complete after
1925 * the status block write.
1926 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001927 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001928 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1929 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001930 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001931
1932 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1933 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1934 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1935
1936 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001937 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1938 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001939
Michael Chan73eef4c2005-08-25 15:39:15 -07001940 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001941
Michael Chan73eef4c2005-08-25 15:39:15 -07001942 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001943}
1944
Michael Chanf4e418f2005-11-04 08:53:48 -08001945static inline int
1946bnx2_has_work(struct bnx2 *bp)
1947{
1948 struct status_block *sblk = bp->status_blk;
1949
1950 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1951 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1952 return 1;
1953
1954 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1955 bp->link_up)
1956 return 1;
1957
1958 return 0;
1959}
1960
Michael Chanb6016b72005-05-26 13:03:09 -07001961static int
1962bnx2_poll(struct net_device *dev, int *budget)
1963{
Michael Chan972ec0d2006-01-23 16:12:43 -08001964 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001965
Michael Chanb6016b72005-05-26 13:03:09 -07001966 if ((bp->status_blk->status_attn_bits &
1967 STATUS_ATTN_BITS_LINK_STATE) !=
1968 (bp->status_blk->status_attn_bits_ack &
1969 STATUS_ATTN_BITS_LINK_STATE)) {
1970
Michael Chanc770a652005-08-25 15:38:39 -07001971 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001972 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07001973 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08001974
1975 /* This is needed to take care of transient status
1976 * during link changes.
1977 */
1978 REG_WR(bp, BNX2_HC_COMMAND,
1979 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1980 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07001981 }
1982
Michael Chanf4e418f2005-11-04 08:53:48 -08001983 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07001984 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001985
Michael Chanf4e418f2005-11-04 08:53:48 -08001986 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07001987 int orig_budget = *budget;
1988 int work_done;
1989
1990 if (orig_budget > dev->quota)
1991 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001992
Michael Chanb6016b72005-05-26 13:03:09 -07001993 work_done = bnx2_rx_int(bp, orig_budget);
1994 *budget -= work_done;
1995 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07001996 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001997
Michael Chanf4e418f2005-11-04 08:53:48 -08001998 bp->last_status_idx = bp->status_blk->status_idx;
1999 rmb();
2000
2001 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002002 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002003 if (likely(bp->flags & USING_MSI_FLAG)) {
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2006 bp->last_status_idx);
2007 return 0;
2008 }
Michael Chanb6016b72005-05-26 13:03:09 -07002009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002010 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2012 bp->last_status_idx);
2013
2014 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2015 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2016 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002017 return 0;
2018 }
2019
2020 return 1;
2021}
2022
Herbert Xu932ff272006-06-09 12:20:56 -07002023/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002024 * from set_multicast.
2025 */
2026static void
2027bnx2_set_rx_mode(struct net_device *dev)
2028{
Michael Chan972ec0d2006-01-23 16:12:43 -08002029 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002030 u32 rx_mode, sort_mode;
2031 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002032
Michael Chanc770a652005-08-25 15:38:39 -07002033 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002034
2035 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2036 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2037 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2038#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002039 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002040 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002041#else
Michael Chane29054f2006-01-23 16:06:06 -08002042 if (!(bp->flags & ASF_ENABLE_FLAG))
2043 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002044#endif
2045 if (dev->flags & IFF_PROMISC) {
2046 /* Promiscuous mode. */
2047 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002048 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2049 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002050 }
2051 else if (dev->flags & IFF_ALLMULTI) {
2052 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2053 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2054 0xffffffff);
2055 }
2056 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2057 }
2058 else {
2059 /* Accept one or more multicast(s). */
2060 struct dev_mc_list *mclist;
2061 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2062 u32 regidx;
2063 u32 bit;
2064 u32 crc;
2065
2066 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2067
2068 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2069 i++, mclist = mclist->next) {
2070
2071 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2072 bit = crc & 0xff;
2073 regidx = (bit & 0xe0) >> 5;
2074 bit &= 0x1f;
2075 mc_filter[regidx] |= (1 << bit);
2076 }
2077
2078 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2079 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2080 mc_filter[i]);
2081 }
2082
2083 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2084 }
2085
2086 if (rx_mode != bp->rx_mode) {
2087 bp->rx_mode = rx_mode;
2088 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2089 }
2090
2091 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2092 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2093 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2094
Michael Chanc770a652005-08-25 15:38:39 -07002095 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002096}
2097
Michael Chanfba9fe92006-06-12 22:21:25 -07002098#define FW_BUF_SIZE 0x8000
2099
2100static int
2101bnx2_gunzip_init(struct bnx2 *bp)
2102{
2103 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2104 goto gunzip_nomem1;
2105
2106 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2107 goto gunzip_nomem2;
2108
2109 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2110 if (bp->strm->workspace == NULL)
2111 goto gunzip_nomem3;
2112
2113 return 0;
2114
2115gunzip_nomem3:
2116 kfree(bp->strm);
2117 bp->strm = NULL;
2118
2119gunzip_nomem2:
2120 vfree(bp->gunzip_buf);
2121 bp->gunzip_buf = NULL;
2122
2123gunzip_nomem1:
2124 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2125 "uncompression.\n", bp->dev->name);
2126 return -ENOMEM;
2127}
2128
2129static void
2130bnx2_gunzip_end(struct bnx2 *bp)
2131{
2132 kfree(bp->strm->workspace);
2133
2134 kfree(bp->strm);
2135 bp->strm = NULL;
2136
2137 if (bp->gunzip_buf) {
2138 vfree(bp->gunzip_buf);
2139 bp->gunzip_buf = NULL;
2140 }
2141}
2142
2143static int
2144bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2145{
2146 int n, rc;
2147
2148 /* check gzip header */
2149 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2150 return -EINVAL;
2151
2152 n = 10;
2153
2154#define FNAME 0x8
2155 if (zbuf[3] & FNAME)
2156 while ((zbuf[n++] != 0) && (n < len));
2157
2158 bp->strm->next_in = zbuf + n;
2159 bp->strm->avail_in = len - n;
2160 bp->strm->next_out = bp->gunzip_buf;
2161 bp->strm->avail_out = FW_BUF_SIZE;
2162
2163 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2164 if (rc != Z_OK)
2165 return rc;
2166
2167 rc = zlib_inflate(bp->strm, Z_FINISH);
2168
2169 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2170 *outbuf = bp->gunzip_buf;
2171
2172 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2173 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2174 bp->dev->name, bp->strm->msg);
2175
2176 zlib_inflateEnd(bp->strm);
2177
2178 if (rc == Z_STREAM_END)
2179 return 0;
2180
2181 return rc;
2182}
2183
Michael Chanb6016b72005-05-26 13:03:09 -07002184static void
2185load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2186 u32 rv2p_proc)
2187{
2188 int i;
2189 u32 val;
2190
2191
2192 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002193 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002194 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002195 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002196 rv2p_code++;
2197
2198 if (rv2p_proc == RV2P_PROC1) {
2199 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2200 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2201 }
2202 else {
2203 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2204 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2205 }
2206 }
2207
2208 /* Reset the processor, un-stall is done later. */
2209 if (rv2p_proc == RV2P_PROC1) {
2210 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2211 }
2212 else {
2213 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2214 }
2215}
2216
2217static void
2218load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2219{
2220 u32 offset;
2221 u32 val;
2222
2223 /* Halt the CPU. */
2224 val = REG_RD_IND(bp, cpu_reg->mode);
2225 val |= cpu_reg->mode_value_halt;
2226 REG_WR_IND(bp, cpu_reg->mode, val);
2227 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2228
2229 /* Load the Text area. */
2230 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2231 if (fw->text) {
2232 int j;
2233
2234 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002235 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002236 }
2237 }
2238
2239 /* Load the Data area. */
2240 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2241 if (fw->data) {
2242 int j;
2243
2244 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2245 REG_WR_IND(bp, offset, fw->data[j]);
2246 }
2247 }
2248
2249 /* Load the SBSS area. */
2250 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2251 if (fw->sbss) {
2252 int j;
2253
2254 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2255 REG_WR_IND(bp, offset, fw->sbss[j]);
2256 }
2257 }
2258
2259 /* Load the BSS area. */
2260 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2261 if (fw->bss) {
2262 int j;
2263
2264 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2265 REG_WR_IND(bp, offset, fw->bss[j]);
2266 }
2267 }
2268
2269 /* Load the Read-Only area. */
2270 offset = cpu_reg->spad_base +
2271 (fw->rodata_addr - cpu_reg->mips_view_base);
2272 if (fw->rodata) {
2273 int j;
2274
2275 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2276 REG_WR_IND(bp, offset, fw->rodata[j]);
2277 }
2278 }
2279
2280 /* Clear the pre-fetch instruction. */
2281 REG_WR_IND(bp, cpu_reg->inst, 0);
2282 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2283
2284 /* Start the CPU. */
2285 val = REG_RD_IND(bp, cpu_reg->mode);
2286 val &= ~cpu_reg->mode_value_halt;
2287 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2288 REG_WR_IND(bp, cpu_reg->mode, val);
2289}
2290
Michael Chanfba9fe92006-06-12 22:21:25 -07002291static int
Michael Chanb6016b72005-05-26 13:03:09 -07002292bnx2_init_cpus(struct bnx2 *bp)
2293{
2294 struct cpu_reg cpu_reg;
2295 struct fw_info fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002296 int rc = 0;
2297 void *text;
2298 u32 text_len;
2299
2300 if ((rc = bnx2_gunzip_init(bp)) != 0)
2301 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002302
2303 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002304 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2305 &text_len);
2306 if (rc)
2307 goto init_cpu_err;
2308
2309 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2310
2311 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2312 &text_len);
2313 if (rc)
2314 goto init_cpu_err;
2315
2316 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002317
2318 /* Initialize the RX Processor. */
2319 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2320 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2321 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2322 cpu_reg.state = BNX2_RXP_CPU_STATE;
2323 cpu_reg.state_value_clear = 0xffffff;
2324 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2325 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2326 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2327 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2328 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2329 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2330 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002331
Michael Chanb6016b72005-05-26 13:03:09 -07002332 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2333 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2334 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2335 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2336
2337 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2338 fw.text_len = bnx2_RXP_b06FwTextLen;
2339 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002340
2341 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2342 &text, &text_len);
2343 if (rc)
2344 goto init_cpu_err;
2345
2346 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002347
2348 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2349 fw.data_len = bnx2_RXP_b06FwDataLen;
2350 fw.data_index = 0;
2351 fw.data = bnx2_RXP_b06FwData;
2352
2353 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2354 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2355 fw.sbss_index = 0;
2356 fw.sbss = bnx2_RXP_b06FwSbss;
2357
2358 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2359 fw.bss_len = bnx2_RXP_b06FwBssLen;
2360 fw.bss_index = 0;
2361 fw.bss = bnx2_RXP_b06FwBss;
2362
2363 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2364 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2365 fw.rodata_index = 0;
2366 fw.rodata = bnx2_RXP_b06FwRodata;
2367
2368 load_cpu_fw(bp, &cpu_reg, &fw);
2369
2370 /* Initialize the TX Processor. */
2371 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2372 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2373 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2374 cpu_reg.state = BNX2_TXP_CPU_STATE;
2375 cpu_reg.state_value_clear = 0xffffff;
2376 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2377 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2378 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2379 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2380 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2381 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2382 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002383
Michael Chanb6016b72005-05-26 13:03:09 -07002384 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2385 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2386 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2387 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2388
2389 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2390 fw.text_len = bnx2_TXP_b06FwTextLen;
2391 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002392
2393 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2394 &text, &text_len);
2395 if (rc)
2396 goto init_cpu_err;
2397
2398 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002399
2400 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2401 fw.data_len = bnx2_TXP_b06FwDataLen;
2402 fw.data_index = 0;
2403 fw.data = bnx2_TXP_b06FwData;
2404
2405 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2406 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2407 fw.sbss_index = 0;
2408 fw.sbss = bnx2_TXP_b06FwSbss;
2409
2410 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2411 fw.bss_len = bnx2_TXP_b06FwBssLen;
2412 fw.bss_index = 0;
2413 fw.bss = bnx2_TXP_b06FwBss;
2414
2415 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2416 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2417 fw.rodata_index = 0;
2418 fw.rodata = bnx2_TXP_b06FwRodata;
2419
2420 load_cpu_fw(bp, &cpu_reg, &fw);
2421
2422 /* Initialize the TX Patch-up Processor. */
2423 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2424 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2425 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2426 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2427 cpu_reg.state_value_clear = 0xffffff;
2428 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2429 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2430 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2431 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2432 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2433 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2434 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002435
Michael Chanb6016b72005-05-26 13:03:09 -07002436 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2437 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2438 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2439 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2440
2441 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2442 fw.text_len = bnx2_TPAT_b06FwTextLen;
2443 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002444
2445 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2446 &text, &text_len);
2447 if (rc)
2448 goto init_cpu_err;
2449
2450 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002451
2452 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2453 fw.data_len = bnx2_TPAT_b06FwDataLen;
2454 fw.data_index = 0;
2455 fw.data = bnx2_TPAT_b06FwData;
2456
2457 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2458 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2459 fw.sbss_index = 0;
2460 fw.sbss = bnx2_TPAT_b06FwSbss;
2461
2462 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2463 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2464 fw.bss_index = 0;
2465 fw.bss = bnx2_TPAT_b06FwBss;
2466
2467 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2468 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2469 fw.rodata_index = 0;
2470 fw.rodata = bnx2_TPAT_b06FwRodata;
2471
2472 load_cpu_fw(bp, &cpu_reg, &fw);
2473
2474 /* Initialize the Completion Processor. */
2475 cpu_reg.mode = BNX2_COM_CPU_MODE;
2476 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2477 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2478 cpu_reg.state = BNX2_COM_CPU_STATE;
2479 cpu_reg.state_value_clear = 0xffffff;
2480 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2481 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2482 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2483 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2484 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2485 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2486 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002487
Michael Chanb6016b72005-05-26 13:03:09 -07002488 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2489 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2490 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2491 fw.start_addr = bnx2_COM_b06FwStartAddr;
2492
2493 fw.text_addr = bnx2_COM_b06FwTextAddr;
2494 fw.text_len = bnx2_COM_b06FwTextLen;
2495 fw.text_index = 0;
Michael Chanfba9fe92006-06-12 22:21:25 -07002496
2497 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2498 &text, &text_len);
2499 if (rc)
2500 goto init_cpu_err;
2501
2502 fw.text = text;
Michael Chanb6016b72005-05-26 13:03:09 -07002503
2504 fw.data_addr = bnx2_COM_b06FwDataAddr;
2505 fw.data_len = bnx2_COM_b06FwDataLen;
2506 fw.data_index = 0;
2507 fw.data = bnx2_COM_b06FwData;
2508
2509 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2510 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2511 fw.sbss_index = 0;
2512 fw.sbss = bnx2_COM_b06FwSbss;
2513
2514 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2515 fw.bss_len = bnx2_COM_b06FwBssLen;
2516 fw.bss_index = 0;
2517 fw.bss = bnx2_COM_b06FwBss;
2518
2519 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2520 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2521 fw.rodata_index = 0;
2522 fw.rodata = bnx2_COM_b06FwRodata;
2523
2524 load_cpu_fw(bp, &cpu_reg, &fw);
2525
Michael Chanfba9fe92006-06-12 22:21:25 -07002526init_cpu_err:
2527 bnx2_gunzip_end(bp);
2528 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002529}
2530
2531static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002532bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002533{
2534 u16 pmcsr;
2535
2536 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2537
2538 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002539 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002540 u32 val;
2541
2542 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2543 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2544 PCI_PM_CTRL_PME_STATUS);
2545
2546 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2547 /* delay required during transition out of D3hot */
2548 msleep(20);
2549
2550 val = REG_RD(bp, BNX2_EMAC_MODE);
2551 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2552 val &= ~BNX2_EMAC_MODE_MPKT;
2553 REG_WR(bp, BNX2_EMAC_MODE, val);
2554
2555 val = REG_RD(bp, BNX2_RPM_CONFIG);
2556 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2557 REG_WR(bp, BNX2_RPM_CONFIG, val);
2558 break;
2559 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002560 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002561 int i;
2562 u32 val, wol_msg;
2563
2564 if (bp->wol) {
2565 u32 advertising;
2566 u8 autoneg;
2567
2568 autoneg = bp->autoneg;
2569 advertising = bp->advertising;
2570
2571 bp->autoneg = AUTONEG_SPEED;
2572 bp->advertising = ADVERTISED_10baseT_Half |
2573 ADVERTISED_10baseT_Full |
2574 ADVERTISED_100baseT_Half |
2575 ADVERTISED_100baseT_Full |
2576 ADVERTISED_Autoneg;
2577
2578 bnx2_setup_copper_phy(bp);
2579
2580 bp->autoneg = autoneg;
2581 bp->advertising = advertising;
2582
2583 bnx2_set_mac_addr(bp);
2584
2585 val = REG_RD(bp, BNX2_EMAC_MODE);
2586
2587 /* Enable port mode. */
2588 val &= ~BNX2_EMAC_MODE_PORT;
2589 val |= BNX2_EMAC_MODE_PORT_MII |
2590 BNX2_EMAC_MODE_MPKT_RCVD |
2591 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002592 BNX2_EMAC_MODE_MPKT;
2593
2594 REG_WR(bp, BNX2_EMAC_MODE, val);
2595
2596 /* receive all multicast */
2597 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2598 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2599 0xffffffff);
2600 }
2601 REG_WR(bp, BNX2_EMAC_RX_MODE,
2602 BNX2_EMAC_RX_MODE_SORT_MODE);
2603
2604 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2605 BNX2_RPM_SORT_USER0_MC_EN;
2606 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2609 BNX2_RPM_SORT_USER0_ENA);
2610
2611 /* Need to enable EMAC and RPM for WOL. */
2612 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2613 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2614 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2615 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2616
2617 val = REG_RD(bp, BNX2_RPM_CONFIG);
2618 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2619 REG_WR(bp, BNX2_RPM_CONFIG, val);
2620
2621 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2622 }
2623 else {
2624 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2625 }
2626
Michael Chandda1e392006-01-23 16:08:14 -08002627 if (!(bp->flags & NO_WOL_FLAG))
2628 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002629
2630 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2631 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2632 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2633
2634 if (bp->wol)
2635 pmcsr |= 3;
2636 }
2637 else {
2638 pmcsr |= 3;
2639 }
2640 if (bp->wol) {
2641 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2642 }
2643 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2644 pmcsr);
2645
2646 /* No more memory access after this point until
2647 * device is brought back to D0.
2648 */
2649 udelay(50);
2650 break;
2651 }
2652 default:
2653 return -EINVAL;
2654 }
2655 return 0;
2656}
2657
2658static int
2659bnx2_acquire_nvram_lock(struct bnx2 *bp)
2660{
2661 u32 val;
2662 int j;
2663
2664 /* Request access to the flash interface. */
2665 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2666 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2667 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2668 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2669 break;
2670
2671 udelay(5);
2672 }
2673
2674 if (j >= NVRAM_TIMEOUT_COUNT)
2675 return -EBUSY;
2676
2677 return 0;
2678}
2679
2680static int
2681bnx2_release_nvram_lock(struct bnx2 *bp)
2682{
2683 int j;
2684 u32 val;
2685
2686 /* Relinquish nvram interface. */
2687 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2688
2689 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2690 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2691 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2692 break;
2693
2694 udelay(5);
2695 }
2696
2697 if (j >= NVRAM_TIMEOUT_COUNT)
2698 return -EBUSY;
2699
2700 return 0;
2701}
2702
2703
2704static int
2705bnx2_enable_nvram_write(struct bnx2 *bp)
2706{
2707 u32 val;
2708
2709 val = REG_RD(bp, BNX2_MISC_CFG);
2710 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2711
2712 if (!bp->flash_info->buffered) {
2713 int j;
2714
2715 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2716 REG_WR(bp, BNX2_NVM_COMMAND,
2717 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2718
2719 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2720 udelay(5);
2721
2722 val = REG_RD(bp, BNX2_NVM_COMMAND);
2723 if (val & BNX2_NVM_COMMAND_DONE)
2724 break;
2725 }
2726
2727 if (j >= NVRAM_TIMEOUT_COUNT)
2728 return -EBUSY;
2729 }
2730 return 0;
2731}
2732
2733static void
2734bnx2_disable_nvram_write(struct bnx2 *bp)
2735{
2736 u32 val;
2737
2738 val = REG_RD(bp, BNX2_MISC_CFG);
2739 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2740}
2741
2742
2743static void
2744bnx2_enable_nvram_access(struct bnx2 *bp)
2745{
2746 u32 val;
2747
2748 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2749 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002750 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002751 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2752}
2753
2754static void
2755bnx2_disable_nvram_access(struct bnx2 *bp)
2756{
2757 u32 val;
2758
2759 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2760 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002761 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002762 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2763 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2764}
2765
2766static int
2767bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2768{
2769 u32 cmd;
2770 int j;
2771
2772 if (bp->flash_info->buffered)
2773 /* Buffered flash, no erase needed */
2774 return 0;
2775
2776 /* Build an erase command */
2777 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2778 BNX2_NVM_COMMAND_DOIT;
2779
2780 /* Need to clear DONE bit separately. */
2781 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2782
2783 /* Address of the NVRAM to read from. */
2784 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2785
2786 /* Issue an erase command. */
2787 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2788
2789 /* Wait for completion. */
2790 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2791 u32 val;
2792
2793 udelay(5);
2794
2795 val = REG_RD(bp, BNX2_NVM_COMMAND);
2796 if (val & BNX2_NVM_COMMAND_DONE)
2797 break;
2798 }
2799
2800 if (j >= NVRAM_TIMEOUT_COUNT)
2801 return -EBUSY;
2802
2803 return 0;
2804}
2805
2806static int
2807bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2808{
2809 u32 cmd;
2810 int j;
2811
2812 /* Build the command word. */
2813 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2814
2815 /* Calculate an offset of a buffered flash. */
2816 if (bp->flash_info->buffered) {
2817 offset = ((offset / bp->flash_info->page_size) <<
2818 bp->flash_info->page_bits) +
2819 (offset % bp->flash_info->page_size);
2820 }
2821
2822 /* Need to clear DONE bit separately. */
2823 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2824
2825 /* Address of the NVRAM to read from. */
2826 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2827
2828 /* Issue a read command. */
2829 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2830
2831 /* Wait for completion. */
2832 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2833 u32 val;
2834
2835 udelay(5);
2836
2837 val = REG_RD(bp, BNX2_NVM_COMMAND);
2838 if (val & BNX2_NVM_COMMAND_DONE) {
2839 val = REG_RD(bp, BNX2_NVM_READ);
2840
2841 val = be32_to_cpu(val);
2842 memcpy(ret_val, &val, 4);
2843 break;
2844 }
2845 }
2846 if (j >= NVRAM_TIMEOUT_COUNT)
2847 return -EBUSY;
2848
2849 return 0;
2850}
2851
2852
2853static int
2854bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2855{
2856 u32 cmd, val32;
2857 int j;
2858
2859 /* Build the command word. */
2860 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2861
2862 /* Calculate an offset of a buffered flash. */
2863 if (bp->flash_info->buffered) {
2864 offset = ((offset / bp->flash_info->page_size) <<
2865 bp->flash_info->page_bits) +
2866 (offset % bp->flash_info->page_size);
2867 }
2868
2869 /* Need to clear DONE bit separately. */
2870 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2871
2872 memcpy(&val32, val, 4);
2873 val32 = cpu_to_be32(val32);
2874
2875 /* Write the data. */
2876 REG_WR(bp, BNX2_NVM_WRITE, val32);
2877
2878 /* Address of the NVRAM to write to. */
2879 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2880
2881 /* Issue the write command. */
2882 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2883
2884 /* Wait for completion. */
2885 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2886 udelay(5);
2887
2888 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2889 break;
2890 }
2891 if (j >= NVRAM_TIMEOUT_COUNT)
2892 return -EBUSY;
2893
2894 return 0;
2895}
2896
2897static int
2898bnx2_init_nvram(struct bnx2 *bp)
2899{
2900 u32 val;
2901 int j, entry_count, rc;
2902 struct flash_spec *flash;
2903
2904 /* Determine the selected interface. */
2905 val = REG_RD(bp, BNX2_NVM_CFG1);
2906
2907 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2908
2909 rc = 0;
2910 if (val & 0x40000000) {
2911
2912 /* Flash interface has been reconfigured */
2913 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002914 j++, flash++) {
2915 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2916 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002917 bp->flash_info = flash;
2918 break;
2919 }
2920 }
2921 }
2922 else {
Michael Chan37137702005-11-04 08:49:17 -08002923 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002924 /* Not yet been reconfigured */
2925
Michael Chan37137702005-11-04 08:49:17 -08002926 if (val & (1 << 23))
2927 mask = FLASH_BACKUP_STRAP_MASK;
2928 else
2929 mask = FLASH_STRAP_MASK;
2930
Michael Chanb6016b72005-05-26 13:03:09 -07002931 for (j = 0, flash = &flash_table[0]; j < entry_count;
2932 j++, flash++) {
2933
Michael Chan37137702005-11-04 08:49:17 -08002934 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002935 bp->flash_info = flash;
2936
2937 /* Request access to the flash interface. */
2938 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2939 return rc;
2940
2941 /* Enable access to flash interface */
2942 bnx2_enable_nvram_access(bp);
2943
2944 /* Reconfigure the flash interface */
2945 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2946 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2947 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2948 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2949
2950 /* Disable access to flash interface */
2951 bnx2_disable_nvram_access(bp);
2952 bnx2_release_nvram_lock(bp);
2953
2954 break;
2955 }
2956 }
2957 } /* if (val & 0x40000000) */
2958
2959 if (j == entry_count) {
2960 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002961 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002962 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002963 }
2964
Michael Chan1122db72006-01-23 16:11:42 -08002965 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2966 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2967 if (val)
2968 bp->flash_size = val;
2969 else
2970 bp->flash_size = bp->flash_info->total_size;
2971
Michael Chanb6016b72005-05-26 13:03:09 -07002972 return rc;
2973}
2974
2975static int
2976bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2977 int buf_size)
2978{
2979 int rc = 0;
2980 u32 cmd_flags, offset32, len32, extra;
2981
2982 if (buf_size == 0)
2983 return 0;
2984
2985 /* Request access to the flash interface. */
2986 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2987 return rc;
2988
2989 /* Enable access to flash interface */
2990 bnx2_enable_nvram_access(bp);
2991
2992 len32 = buf_size;
2993 offset32 = offset;
2994 extra = 0;
2995
2996 cmd_flags = 0;
2997
2998 if (offset32 & 3) {
2999 u8 buf[4];
3000 u32 pre_len;
3001
3002 offset32 &= ~3;
3003 pre_len = 4 - (offset & 3);
3004
3005 if (pre_len >= len32) {
3006 pre_len = len32;
3007 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3008 BNX2_NVM_COMMAND_LAST;
3009 }
3010 else {
3011 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3012 }
3013
3014 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3015
3016 if (rc)
3017 return rc;
3018
3019 memcpy(ret_buf, buf + (offset & 3), pre_len);
3020
3021 offset32 += 4;
3022 ret_buf += pre_len;
3023 len32 -= pre_len;
3024 }
3025 if (len32 & 3) {
3026 extra = 4 - (len32 & 3);
3027 len32 = (len32 + 4) & ~3;
3028 }
3029
3030 if (len32 == 4) {
3031 u8 buf[4];
3032
3033 if (cmd_flags)
3034 cmd_flags = BNX2_NVM_COMMAND_LAST;
3035 else
3036 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3037 BNX2_NVM_COMMAND_LAST;
3038
3039 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3040
3041 memcpy(ret_buf, buf, 4 - extra);
3042 }
3043 else if (len32 > 0) {
3044 u8 buf[4];
3045
3046 /* Read the first word. */
3047 if (cmd_flags)
3048 cmd_flags = 0;
3049 else
3050 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3051
3052 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3053
3054 /* Advance to the next dword. */
3055 offset32 += 4;
3056 ret_buf += 4;
3057 len32 -= 4;
3058
3059 while (len32 > 4 && rc == 0) {
3060 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3061
3062 /* Advance to the next dword. */
3063 offset32 += 4;
3064 ret_buf += 4;
3065 len32 -= 4;
3066 }
3067
3068 if (rc)
3069 return rc;
3070
3071 cmd_flags = BNX2_NVM_COMMAND_LAST;
3072 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3073
3074 memcpy(ret_buf, buf, 4 - extra);
3075 }
3076
3077 /* Disable access to flash interface */
3078 bnx2_disable_nvram_access(bp);
3079
3080 bnx2_release_nvram_lock(bp);
3081
3082 return rc;
3083}
3084
3085static int
3086bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3087 int buf_size)
3088{
3089 u32 written, offset32, len32;
Michael Chanae181bc2006-05-22 16:39:20 -07003090 u8 *buf, start[4], end[4], *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003091 int rc = 0;
3092 int align_start, align_end;
3093
3094 buf = data_buf;
3095 offset32 = offset;
3096 len32 = buf_size;
3097 align_start = align_end = 0;
3098
3099 if ((align_start = (offset32 & 3))) {
3100 offset32 &= ~3;
3101 len32 += align_start;
3102 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3103 return rc;
3104 }
3105
3106 if (len32 & 3) {
3107 if ((len32 > 4) || !align_start) {
3108 align_end = 4 - (len32 & 3);
3109 len32 += align_end;
3110 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3111 end, 4))) {
3112 return rc;
3113 }
3114 }
3115 }
3116
3117 if (align_start || align_end) {
3118 buf = kmalloc(len32, GFP_KERNEL);
3119 if (buf == 0)
3120 return -ENOMEM;
3121 if (align_start) {
3122 memcpy(buf, start, 4);
3123 }
3124 if (align_end) {
3125 memcpy(buf + len32 - 4, end, 4);
3126 }
3127 memcpy(buf + align_start, data_buf, buf_size);
3128 }
3129
Michael Chanae181bc2006-05-22 16:39:20 -07003130 if (bp->flash_info->buffered == 0) {
3131 flash_buffer = kmalloc(264, GFP_KERNEL);
3132 if (flash_buffer == NULL) {
3133 rc = -ENOMEM;
3134 goto nvram_write_end;
3135 }
3136 }
3137
Michael Chanb6016b72005-05-26 13:03:09 -07003138 written = 0;
3139 while ((written < len32) && (rc == 0)) {
3140 u32 page_start, page_end, data_start, data_end;
3141 u32 addr, cmd_flags;
3142 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003143
3144 /* Find the page_start addr */
3145 page_start = offset32 + written;
3146 page_start -= (page_start % bp->flash_info->page_size);
3147 /* Find the page_end addr */
3148 page_end = page_start + bp->flash_info->page_size;
3149 /* Find the data_start addr */
3150 data_start = (written == 0) ? offset32 : page_start;
3151 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003152 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003153 (offset32 + len32) : page_end;
3154
3155 /* Request access to the flash interface. */
3156 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3157 goto nvram_write_end;
3158
3159 /* Enable access to flash interface */
3160 bnx2_enable_nvram_access(bp);
3161
3162 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3163 if (bp->flash_info->buffered == 0) {
3164 int j;
3165
3166 /* Read the whole page into the buffer
3167 * (non-buffer flash only) */
3168 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3169 if (j == (bp->flash_info->page_size - 4)) {
3170 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3171 }
3172 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003173 page_start + j,
3174 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003175 cmd_flags);
3176
3177 if (rc)
3178 goto nvram_write_end;
3179
3180 cmd_flags = 0;
3181 }
3182 }
3183
3184 /* Enable writes to flash interface (unlock write-protect) */
3185 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3186 goto nvram_write_end;
3187
3188 /* Erase the page */
3189 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3190 goto nvram_write_end;
3191
3192 /* Re-enable the write again for the actual write */
3193 bnx2_enable_nvram_write(bp);
3194
3195 /* Loop to write back the buffer data from page_start to
3196 * data_start */
3197 i = 0;
3198 if (bp->flash_info->buffered == 0) {
3199 for (addr = page_start; addr < data_start;
3200 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003201
Michael Chanb6016b72005-05-26 13:03:09 -07003202 rc = bnx2_nvram_write_dword(bp, addr,
3203 &flash_buffer[i], cmd_flags);
3204
3205 if (rc != 0)
3206 goto nvram_write_end;
3207
3208 cmd_flags = 0;
3209 }
3210 }
3211
3212 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003213 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003214 if ((addr == page_end - 4) ||
3215 ((bp->flash_info->buffered) &&
3216 (addr == data_end - 4))) {
3217
3218 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3219 }
3220 rc = bnx2_nvram_write_dword(bp, addr, buf,
3221 cmd_flags);
3222
3223 if (rc != 0)
3224 goto nvram_write_end;
3225
3226 cmd_flags = 0;
3227 buf += 4;
3228 }
3229
3230 /* Loop to write back the buffer data from data_end
3231 * to page_end */
3232 if (bp->flash_info->buffered == 0) {
3233 for (addr = data_end; addr < page_end;
3234 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003235
Michael Chanb6016b72005-05-26 13:03:09 -07003236 if (addr == page_end-4) {
3237 cmd_flags = BNX2_NVM_COMMAND_LAST;
3238 }
3239 rc = bnx2_nvram_write_dword(bp, addr,
3240 &flash_buffer[i], cmd_flags);
3241
3242 if (rc != 0)
3243 goto nvram_write_end;
3244
3245 cmd_flags = 0;
3246 }
3247 }
3248
3249 /* Disable writes to flash interface (lock write-protect) */
3250 bnx2_disable_nvram_write(bp);
3251
3252 /* Disable access to flash interface */
3253 bnx2_disable_nvram_access(bp);
3254 bnx2_release_nvram_lock(bp);
3255
3256 /* Increment written */
3257 written += data_end - data_start;
3258 }
3259
3260nvram_write_end:
Michael Chanae181bc2006-05-22 16:39:20 -07003261 if (bp->flash_info->buffered == 0)
3262 kfree(flash_buffer);
3263
Michael Chanb6016b72005-05-26 13:03:09 -07003264 if (align_start || align_end)
3265 kfree(buf);
3266 return rc;
3267}
3268
3269static int
3270bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3271{
3272 u32 val;
3273 int i, rc = 0;
3274
3275 /* Wait for the current PCI transaction to complete before
3276 * issuing a reset. */
3277 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3278 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3279 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3280 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3281 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3282 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3283 udelay(5);
3284
Michael Chanb090ae22006-01-23 16:07:10 -08003285 /* Wait for the firmware to tell us it is ok to issue a reset. */
3286 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3287
Michael Chanb6016b72005-05-26 13:03:09 -07003288 /* Deposit a driver reset signature so the firmware knows that
3289 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003291 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3292
Michael Chanb6016b72005-05-26 13:03:09 -07003293 /* Do a dummy read to force the chip to complete all current transaction
3294 * before we issue a reset. */
3295 val = REG_RD(bp, BNX2_MISC_ID);
3296
3297 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3298 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3299 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3300
3301 /* Chip reset. */
3302 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3303
3304 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3305 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3306 msleep(15);
3307
3308 /* Reset takes approximate 30 usec */
3309 for (i = 0; i < 10; i++) {
3310 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3311 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3312 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3313 break;
3314 }
3315 udelay(10);
3316 }
3317
3318 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3319 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3320 printk(KERN_ERR PFX "Chip reset did not complete\n");
3321 return -EBUSY;
3322 }
3323
3324 /* Make sure byte swapping is properly configured. */
3325 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3326 if (val != 0x01020304) {
3327 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3328 return -ENODEV;
3329 }
3330
Michael Chanb6016b72005-05-26 13:03:09 -07003331 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003332 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3333 if (rc)
3334 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003335
3336 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3337 /* Adjust the voltage regular to two steps lower. The default
3338 * of this register is 0x0000000e. */
3339 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3340
3341 /* Remove bad rbuf memory from the free pool. */
3342 rc = bnx2_alloc_bad_rbuf(bp);
3343 }
3344
3345 return rc;
3346}
3347
3348static int
3349bnx2_init_chip(struct bnx2 *bp)
3350{
3351 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003352 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003353
3354 /* Make sure the interrupt is not active. */
3355 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3356
3357 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3358 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3359#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003360 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003361#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003362 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003363 DMA_READ_CHANS << 12 |
3364 DMA_WRITE_CHANS << 16;
3365
3366 val |= (0x2 << 20) | (1 << 11);
3367
Michael Chandda1e392006-01-23 16:08:14 -08003368 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003369 val |= (1 << 23);
3370
3371 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3372 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3373 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3374
3375 REG_WR(bp, BNX2_DMA_CONFIG, val);
3376
3377 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3378 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3379 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3380 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3381 }
3382
3383 if (bp->flags & PCIX_FLAG) {
3384 u16 val16;
3385
3386 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3387 &val16);
3388 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3389 val16 & ~PCI_X_CMD_ERO);
3390 }
3391
3392 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3393 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3394 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3395 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3396
3397 /* Initialize context mapping and zero out the quick contexts. The
3398 * context block must have already been enabled. */
3399 bnx2_init_context(bp);
3400
Michael Chanfba9fe92006-06-12 22:21:25 -07003401 if ((rc = bnx2_init_cpus(bp)) != 0)
3402 return rc;
3403
Michael Chanb6016b72005-05-26 13:03:09 -07003404 bnx2_init_nvram(bp);
3405
3406 bnx2_set_mac_addr(bp);
3407
3408 val = REG_RD(bp, BNX2_MQ_CONFIG);
3409 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3410 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3411 REG_WR(bp, BNX2_MQ_CONFIG, val);
3412
3413 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3414 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3415 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3416
3417 val = (BCM_PAGE_BITS - 8) << 24;
3418 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3419
3420 /* Configure page size. */
3421 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3422 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3423 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3424 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3425
3426 val = bp->mac_addr[0] +
3427 (bp->mac_addr[1] << 8) +
3428 (bp->mac_addr[2] << 16) +
3429 bp->mac_addr[3] +
3430 (bp->mac_addr[4] << 8) +
3431 (bp->mac_addr[5] << 16);
3432 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3433
3434 /* Program the MTU. Also include 4 bytes for CRC32. */
3435 val = bp->dev->mtu + ETH_HLEN + 4;
3436 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3437 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3438 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3439
3440 bp->last_status_idx = 0;
3441 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3442
3443 /* Set up how to generate a link change interrupt. */
3444 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3445
3446 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3447 (u64) bp->status_blk_mapping & 0xffffffff);
3448 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3449
3450 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3451 (u64) bp->stats_blk_mapping & 0xffffffff);
3452 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3453 (u64) bp->stats_blk_mapping >> 32);
3454
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003455 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003456 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3457
3458 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3459 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3460
3461 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3462 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3463
3464 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3465
3466 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3467
3468 REG_WR(bp, BNX2_HC_COM_TICKS,
3469 (bp->com_ticks_int << 16) | bp->com_ticks);
3470
3471 REG_WR(bp, BNX2_HC_CMD_TICKS,
3472 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3473
3474 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3475 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3476
3477 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3478 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3479 else {
3480 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3481 BNX2_HC_CONFIG_TX_TMR_MODE |
3482 BNX2_HC_CONFIG_COLLECT_STATS);
3483 }
3484
3485 /* Clear internal stats counters. */
3486 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3487
3488 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3489
Michael Chane29054f2006-01-23 16:06:06 -08003490 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3491 BNX2_PORT_FEATURE_ASF_ENABLED)
3492 bp->flags |= ASF_ENABLE_FLAG;
3493
Michael Chanb6016b72005-05-26 13:03:09 -07003494 /* Initialize the receive filter. */
3495 bnx2_set_rx_mode(bp->dev);
3496
Michael Chanb090ae22006-01-23 16:07:10 -08003497 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3498 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003499
3500 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3501 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3502
3503 udelay(20);
3504
Michael Chanbf5295b2006-03-23 01:11:56 -08003505 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3506
Michael Chanb090ae22006-01-23 16:07:10 -08003507 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003508}
3509
3510
3511static void
3512bnx2_init_tx_ring(struct bnx2 *bp)
3513{
3514 struct tx_bd *txbd;
3515 u32 val;
3516
Michael Chan2f8af122006-08-15 01:39:10 -07003517 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3518
Michael Chanb6016b72005-05-26 13:03:09 -07003519 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003520
Michael Chanb6016b72005-05-26 13:03:09 -07003521 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3522 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3523
3524 bp->tx_prod = 0;
3525 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003526 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003527 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003528
Michael Chanb6016b72005-05-26 13:03:09 -07003529 val = BNX2_L2CTX_TYPE_TYPE_L2;
3530 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3531 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3532
3533 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3534 val |= 8 << 16;
3535 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3536
3537 val = (u64) bp->tx_desc_mapping >> 32;
3538 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3539
3540 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3541 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3542}
3543
3544static void
3545bnx2_init_rx_ring(struct bnx2 *bp)
3546{
3547 struct rx_bd *rxbd;
3548 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003549 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003550 u32 val;
3551
3552 /* 8 for CRC and VLAN */
3553 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3554 /* 8 for alignment */
3555 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3556
3557 ring_prod = prod = bp->rx_prod = 0;
3558 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003559 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003560 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003561
Michael Chan13daffa2006-03-20 17:49:20 -08003562 for (i = 0; i < bp->rx_max_ring; i++) {
3563 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003564
Michael Chan13daffa2006-03-20 17:49:20 -08003565 rxbd = &bp->rx_desc_ring[i][0];
3566 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3567 rxbd->rx_bd_len = bp->rx_buf_use_size;
3568 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3569 }
3570 if (i == (bp->rx_max_ring - 1))
3571 j = 0;
3572 else
3573 j = i + 1;
3574 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3575 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3576 0xffffffff;
3577 }
Michael Chanb6016b72005-05-26 13:03:09 -07003578
3579 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3580 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3581 val |= 0x02 << 8;
3582 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3583
Michael Chan13daffa2006-03-20 17:49:20 -08003584 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003585 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3586
Michael Chan13daffa2006-03-20 17:49:20 -08003587 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003588 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3589
Michael Chan236b6392006-03-20 17:49:02 -08003590 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003591 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3592 break;
3593 }
3594 prod = NEXT_RX_BD(prod);
3595 ring_prod = RX_RING_IDX(prod);
3596 }
3597 bp->rx_prod = prod;
3598
3599 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3600
3601 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3602}
3603
3604static void
Michael Chan13daffa2006-03-20 17:49:20 -08003605bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3606{
3607 u32 num_rings, max;
3608
3609 bp->rx_ring_size = size;
3610 num_rings = 1;
3611 while (size > MAX_RX_DESC_CNT) {
3612 size -= MAX_RX_DESC_CNT;
3613 num_rings++;
3614 }
3615 /* round to next power of 2 */
3616 max = MAX_RX_RINGS;
3617 while ((max & num_rings) == 0)
3618 max >>= 1;
3619
3620 if (num_rings != max)
3621 max <<= 1;
3622
3623 bp->rx_max_ring = max;
3624 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3625}
3626
3627static void
Michael Chanb6016b72005-05-26 13:03:09 -07003628bnx2_free_tx_skbs(struct bnx2 *bp)
3629{
3630 int i;
3631
3632 if (bp->tx_buf_ring == NULL)
3633 return;
3634
3635 for (i = 0; i < TX_DESC_CNT; ) {
3636 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3637 struct sk_buff *skb = tx_buf->skb;
3638 int j, last;
3639
3640 if (skb == NULL) {
3641 i++;
3642 continue;
3643 }
3644
3645 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3646 skb_headlen(skb), PCI_DMA_TODEVICE);
3647
3648 tx_buf->skb = NULL;
3649
3650 last = skb_shinfo(skb)->nr_frags;
3651 for (j = 0; j < last; j++) {
3652 tx_buf = &bp->tx_buf_ring[i + j + 1];
3653 pci_unmap_page(bp->pdev,
3654 pci_unmap_addr(tx_buf, mapping),
3655 skb_shinfo(skb)->frags[j].size,
3656 PCI_DMA_TODEVICE);
3657 }
Michael Chan745720e2006-06-29 12:37:41 -07003658 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003659 i += j + 1;
3660 }
3661
3662}
3663
3664static void
3665bnx2_free_rx_skbs(struct bnx2 *bp)
3666{
3667 int i;
3668
3669 if (bp->rx_buf_ring == NULL)
3670 return;
3671
Michael Chan13daffa2006-03-20 17:49:20 -08003672 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003673 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3674 struct sk_buff *skb = rx_buf->skb;
3675
Michael Chan05d0f1c2005-11-04 08:53:48 -08003676 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003677 continue;
3678
3679 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3680 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3681
3682 rx_buf->skb = NULL;
3683
Michael Chan745720e2006-06-29 12:37:41 -07003684 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003685 }
3686}
3687
3688static void
3689bnx2_free_skbs(struct bnx2 *bp)
3690{
3691 bnx2_free_tx_skbs(bp);
3692 bnx2_free_rx_skbs(bp);
3693}
3694
3695static int
3696bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3697{
3698 int rc;
3699
3700 rc = bnx2_reset_chip(bp, reset_code);
3701 bnx2_free_skbs(bp);
3702 if (rc)
3703 return rc;
3704
Michael Chanfba9fe92006-06-12 22:21:25 -07003705 if ((rc = bnx2_init_chip(bp)) != 0)
3706 return rc;
3707
Michael Chanb6016b72005-05-26 13:03:09 -07003708 bnx2_init_tx_ring(bp);
3709 bnx2_init_rx_ring(bp);
3710 return 0;
3711}
3712
3713static int
3714bnx2_init_nic(struct bnx2 *bp)
3715{
3716 int rc;
3717
3718 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3719 return rc;
3720
Michael Chan80be4432006-11-19 14:07:28 -08003721 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003722 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003723 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003724 bnx2_set_link(bp);
3725 return 0;
3726}
3727
3728static int
3729bnx2_test_registers(struct bnx2 *bp)
3730{
3731 int ret;
3732 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003733 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003734 u16 offset;
3735 u16 flags;
3736 u32 rw_mask;
3737 u32 ro_mask;
3738 } reg_tbl[] = {
3739 { 0x006c, 0, 0x00000000, 0x0000003f },
3740 { 0x0090, 0, 0xffffffff, 0x00000000 },
3741 { 0x0094, 0, 0x00000000, 0x00000000 },
3742
3743 { 0x0404, 0, 0x00003f00, 0x00000000 },
3744 { 0x0418, 0, 0x00000000, 0xffffffff },
3745 { 0x041c, 0, 0x00000000, 0xffffffff },
3746 { 0x0420, 0, 0x00000000, 0x80ffffff },
3747 { 0x0424, 0, 0x00000000, 0x00000000 },
3748 { 0x0428, 0, 0x00000000, 0x00000001 },
3749 { 0x0450, 0, 0x00000000, 0x0000ffff },
3750 { 0x0454, 0, 0x00000000, 0xffffffff },
3751 { 0x0458, 0, 0x00000000, 0xffffffff },
3752
3753 { 0x0808, 0, 0x00000000, 0xffffffff },
3754 { 0x0854, 0, 0x00000000, 0xffffffff },
3755 { 0x0868, 0, 0x00000000, 0x77777777 },
3756 { 0x086c, 0, 0x00000000, 0x77777777 },
3757 { 0x0870, 0, 0x00000000, 0x77777777 },
3758 { 0x0874, 0, 0x00000000, 0x77777777 },
3759
3760 { 0x0c00, 0, 0x00000000, 0x00000001 },
3761 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3762 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003763
3764 { 0x1000, 0, 0x00000000, 0x00000001 },
3765 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003766
3767 { 0x1408, 0, 0x01c00800, 0x00000000 },
3768 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3769 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003770 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003771 { 0x14b0, 0, 0x00000002, 0x00000001 },
3772 { 0x14b8, 0, 0x00000000, 0x00000000 },
3773 { 0x14c0, 0, 0x00000000, 0x00000009 },
3774 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3775 { 0x14cc, 0, 0x00000000, 0x00000001 },
3776 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003777
3778 { 0x1800, 0, 0x00000000, 0x00000001 },
3779 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003780
3781 { 0x2800, 0, 0x00000000, 0x00000001 },
3782 { 0x2804, 0, 0x00000000, 0x00003f01 },
3783 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3784 { 0x2810, 0, 0xffff0000, 0x00000000 },
3785 { 0x2814, 0, 0xffff0000, 0x00000000 },
3786 { 0x2818, 0, 0xffff0000, 0x00000000 },
3787 { 0x281c, 0, 0xffff0000, 0x00000000 },
3788 { 0x2834, 0, 0xffffffff, 0x00000000 },
3789 { 0x2840, 0, 0x00000000, 0xffffffff },
3790 { 0x2844, 0, 0x00000000, 0xffffffff },
3791 { 0x2848, 0, 0xffffffff, 0x00000000 },
3792 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3793
3794 { 0x2c00, 0, 0x00000000, 0x00000011 },
3795 { 0x2c04, 0, 0x00000000, 0x00030007 },
3796
Michael Chanb6016b72005-05-26 13:03:09 -07003797 { 0x3c00, 0, 0x00000000, 0x00000001 },
3798 { 0x3c04, 0, 0x00000000, 0x00070000 },
3799 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3800 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3801 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3802 { 0x3c14, 0, 0x00000000, 0xffffffff },
3803 { 0x3c18, 0, 0x00000000, 0xffffffff },
3804 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3805 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003806
3807 { 0x5004, 0, 0x00000000, 0x0000007f },
3808 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3809 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3810
Michael Chanb6016b72005-05-26 13:03:09 -07003811 { 0x5c00, 0, 0x00000000, 0x00000001 },
3812 { 0x5c04, 0, 0x00000000, 0x0003000f },
3813 { 0x5c08, 0, 0x00000003, 0x00000000 },
3814 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3815 { 0x5c10, 0, 0x00000000, 0xffffffff },
3816 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3817 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3818 { 0x5c88, 0, 0x00000000, 0x00077373 },
3819 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3820
3821 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3822 { 0x680c, 0, 0xffffffff, 0x00000000 },
3823 { 0x6810, 0, 0xffffffff, 0x00000000 },
3824 { 0x6814, 0, 0xffffffff, 0x00000000 },
3825 { 0x6818, 0, 0xffffffff, 0x00000000 },
3826 { 0x681c, 0, 0xffffffff, 0x00000000 },
3827 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3828 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3829 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3830 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3831 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3832 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3833 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3834 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3835 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3836 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3837 { 0x684c, 0, 0xffffffff, 0x00000000 },
3838 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3839 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3840 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3841 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3842 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3843 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3844
3845 { 0xffff, 0, 0x00000000, 0x00000000 },
3846 };
3847
3848 ret = 0;
3849 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3850 u32 offset, rw_mask, ro_mask, save_val, val;
3851
3852 offset = (u32) reg_tbl[i].offset;
3853 rw_mask = reg_tbl[i].rw_mask;
3854 ro_mask = reg_tbl[i].ro_mask;
3855
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003856 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003857
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003858 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003859
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003860 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003861 if ((val & rw_mask) != 0) {
3862 goto reg_test_err;
3863 }
3864
3865 if ((val & ro_mask) != (save_val & ro_mask)) {
3866 goto reg_test_err;
3867 }
3868
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003869 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003870
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003871 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003872 if ((val & rw_mask) != rw_mask) {
3873 goto reg_test_err;
3874 }
3875
3876 if ((val & ro_mask) != (save_val & ro_mask)) {
3877 goto reg_test_err;
3878 }
3879
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003880 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003881 continue;
3882
3883reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003884 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003885 ret = -ENODEV;
3886 break;
3887 }
3888 return ret;
3889}
3890
3891static int
3892bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3893{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003894 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003895 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3896 int i;
3897
3898 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3899 u32 offset;
3900
3901 for (offset = 0; offset < size; offset += 4) {
3902
3903 REG_WR_IND(bp, start + offset, test_pattern[i]);
3904
3905 if (REG_RD_IND(bp, start + offset) !=
3906 test_pattern[i]) {
3907 return -ENODEV;
3908 }
3909 }
3910 }
3911 return 0;
3912}
3913
3914static int
3915bnx2_test_memory(struct bnx2 *bp)
3916{
3917 int ret = 0;
3918 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003919 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003920 u32 offset;
3921 u32 len;
3922 } mem_tbl[] = {
3923 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003924 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003925 { 0xe0000, 0x4000 },
3926 { 0x120000, 0x4000 },
3927 { 0x1a0000, 0x4000 },
3928 { 0x160000, 0x4000 },
3929 { 0xffffffff, 0 },
3930 };
3931
3932 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3933 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3934 mem_tbl[i].len)) != 0) {
3935 return ret;
3936 }
3937 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003938
Michael Chanb6016b72005-05-26 13:03:09 -07003939 return ret;
3940}
3941
Michael Chanbc5a0692006-01-23 16:13:22 -08003942#define BNX2_MAC_LOOPBACK 0
3943#define BNX2_PHY_LOOPBACK 1
3944
Michael Chanb6016b72005-05-26 13:03:09 -07003945static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003946bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003947{
3948 unsigned int pkt_size, num_pkts, i;
3949 struct sk_buff *skb, *rx_skb;
3950 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003951 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003952 dma_addr_t map;
3953 struct tx_bd *txbd;
3954 struct sw_bd *rx_buf;
3955 struct l2_fhdr *rx_hdr;
3956 int ret = -ENODEV;
3957
Michael Chanbc5a0692006-01-23 16:13:22 -08003958 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3959 bp->loopback = MAC_LOOPBACK;
3960 bnx2_set_mac_loopback(bp);
3961 }
3962 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003963 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08003964 bnx2_set_phy_loopback(bp);
3965 }
3966 else
3967 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003968
3969 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07003970 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003971 if (!skb)
3972 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003973 packet = skb_put(skb, pkt_size);
3974 memcpy(packet, bp->mac_addr, 6);
3975 memset(packet + 6, 0x0, 8);
3976 for (i = 14; i < pkt_size; i++)
3977 packet[i] = (unsigned char) (i & 0xff);
3978
3979 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3980 PCI_DMA_TODEVICE);
3981
Michael Chanbf5295b2006-03-23 01:11:56 -08003982 REG_WR(bp, BNX2_HC_COMMAND,
3983 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3984
Michael Chanb6016b72005-05-26 13:03:09 -07003985 REG_RD(bp, BNX2_HC_COMMAND);
3986
3987 udelay(5);
3988 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3989
Michael Chanb6016b72005-05-26 13:03:09 -07003990 num_pkts = 0;
3991
Michael Chanbc5a0692006-01-23 16:13:22 -08003992 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07003993
3994 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3995 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3996 txbd->tx_bd_mss_nbytes = pkt_size;
3997 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3998
3999 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004000 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4001 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004002
Michael Chanbc5a0692006-01-23 16:13:22 -08004003 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4004 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004005
4006 udelay(100);
4007
Michael Chanbf5295b2006-03-23 01:11:56 -08004008 REG_WR(bp, BNX2_HC_COMMAND,
4009 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4010
Michael Chanb6016b72005-05-26 13:03:09 -07004011 REG_RD(bp, BNX2_HC_COMMAND);
4012
4013 udelay(5);
4014
4015 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004016 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004017
Michael Chanbc5a0692006-01-23 16:13:22 -08004018 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004019 goto loopback_test_done;
4020 }
4021
4022 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4023 if (rx_idx != rx_start_idx + num_pkts) {
4024 goto loopback_test_done;
4025 }
4026
4027 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4028 rx_skb = rx_buf->skb;
4029
4030 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4031 skb_reserve(rx_skb, bp->rx_offset);
4032
4033 pci_dma_sync_single_for_cpu(bp->pdev,
4034 pci_unmap_addr(rx_buf, mapping),
4035 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4036
Michael Chanade2bfe2006-01-23 16:09:51 -08004037 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004038 (L2_FHDR_ERRORS_BAD_CRC |
4039 L2_FHDR_ERRORS_PHY_DECODE |
4040 L2_FHDR_ERRORS_ALIGNMENT |
4041 L2_FHDR_ERRORS_TOO_SHORT |
4042 L2_FHDR_ERRORS_GIANT_FRAME)) {
4043
4044 goto loopback_test_done;
4045 }
4046
4047 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4048 goto loopback_test_done;
4049 }
4050
4051 for (i = 14; i < pkt_size; i++) {
4052 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4053 goto loopback_test_done;
4054 }
4055 }
4056
4057 ret = 0;
4058
4059loopback_test_done:
4060 bp->loopback = 0;
4061 return ret;
4062}
4063
Michael Chanbc5a0692006-01-23 16:13:22 -08004064#define BNX2_MAC_LOOPBACK_FAILED 1
4065#define BNX2_PHY_LOOPBACK_FAILED 2
4066#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4067 BNX2_PHY_LOOPBACK_FAILED)
4068
4069static int
4070bnx2_test_loopback(struct bnx2 *bp)
4071{
4072 int rc = 0;
4073
4074 if (!netif_running(bp->dev))
4075 return BNX2_LOOPBACK_FAILED;
4076
4077 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4078 spin_lock_bh(&bp->phy_lock);
4079 bnx2_init_phy(bp);
4080 spin_unlock_bh(&bp->phy_lock);
4081 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4082 rc |= BNX2_MAC_LOOPBACK_FAILED;
4083 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4084 rc |= BNX2_PHY_LOOPBACK_FAILED;
4085 return rc;
4086}
4087
Michael Chanb6016b72005-05-26 13:03:09 -07004088#define NVRAM_SIZE 0x200
4089#define CRC32_RESIDUAL 0xdebb20e3
4090
4091static int
4092bnx2_test_nvram(struct bnx2 *bp)
4093{
4094 u32 buf[NVRAM_SIZE / 4];
4095 u8 *data = (u8 *) buf;
4096 int rc = 0;
4097 u32 magic, csum;
4098
4099 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4100 goto test_nvram_done;
4101
4102 magic = be32_to_cpu(buf[0]);
4103 if (magic != 0x669955aa) {
4104 rc = -ENODEV;
4105 goto test_nvram_done;
4106 }
4107
4108 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4109 goto test_nvram_done;
4110
4111 csum = ether_crc_le(0x100, data);
4112 if (csum != CRC32_RESIDUAL) {
4113 rc = -ENODEV;
4114 goto test_nvram_done;
4115 }
4116
4117 csum = ether_crc_le(0x100, data + 0x100);
4118 if (csum != CRC32_RESIDUAL) {
4119 rc = -ENODEV;
4120 }
4121
4122test_nvram_done:
4123 return rc;
4124}
4125
4126static int
4127bnx2_test_link(struct bnx2 *bp)
4128{
4129 u32 bmsr;
4130
Michael Chanc770a652005-08-25 15:38:39 -07004131 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004132 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4133 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004134 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004135
Michael Chanb6016b72005-05-26 13:03:09 -07004136 if (bmsr & BMSR_LSTATUS) {
4137 return 0;
4138 }
4139 return -ENODEV;
4140}
4141
4142static int
4143bnx2_test_intr(struct bnx2 *bp)
4144{
4145 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004146 u16 status_idx;
4147
4148 if (!netif_running(bp->dev))
4149 return -ENODEV;
4150
4151 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4152
4153 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004154 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004155 REG_RD(bp, BNX2_HC_COMMAND);
4156
4157 for (i = 0; i < 10; i++) {
4158 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4159 status_idx) {
4160
4161 break;
4162 }
4163
4164 msleep_interruptible(10);
4165 }
4166 if (i < 10)
4167 return 0;
4168
4169 return -ENODEV;
4170}
4171
4172static void
Michael Chan48b01e22006-11-19 14:08:00 -08004173bnx2_5706_serdes_timer(struct bnx2 *bp)
4174{
4175 spin_lock(&bp->phy_lock);
4176 if (bp->serdes_an_pending)
4177 bp->serdes_an_pending--;
4178 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4179 u32 bmcr;
4180
4181 bp->current_interval = bp->timer_interval;
4182
4183 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4184
4185 if (bmcr & BMCR_ANENABLE) {
4186 u32 phy1, phy2;
4187
4188 bnx2_write_phy(bp, 0x1c, 0x7c00);
4189 bnx2_read_phy(bp, 0x1c, &phy1);
4190
4191 bnx2_write_phy(bp, 0x17, 0x0f01);
4192 bnx2_read_phy(bp, 0x15, &phy2);
4193 bnx2_write_phy(bp, 0x17, 0x0f01);
4194 bnx2_read_phy(bp, 0x15, &phy2);
4195
4196 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4197 !(phy2 & 0x20)) { /* no CONFIG */
4198
4199 bmcr &= ~BMCR_ANENABLE;
4200 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4201 bnx2_write_phy(bp, MII_BMCR, bmcr);
4202 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4203 }
4204 }
4205 }
4206 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4207 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4208 u32 phy2;
4209
4210 bnx2_write_phy(bp, 0x17, 0x0f01);
4211 bnx2_read_phy(bp, 0x15, &phy2);
4212 if (phy2 & 0x20) {
4213 u32 bmcr;
4214
4215 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4216 bmcr |= BMCR_ANENABLE;
4217 bnx2_write_phy(bp, MII_BMCR, bmcr);
4218
4219 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4220 }
4221 } else
4222 bp->current_interval = bp->timer_interval;
4223
4224 spin_unlock(&bp->phy_lock);
4225}
4226
4227static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004228bnx2_5708_serdes_timer(struct bnx2 *bp)
4229{
4230 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4231 bp->serdes_an_pending = 0;
4232 return;
4233 }
4234
4235 spin_lock(&bp->phy_lock);
4236 if (bp->serdes_an_pending)
4237 bp->serdes_an_pending--;
4238 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4239 u32 bmcr;
4240
4241 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4242
4243 if (bmcr & BMCR_ANENABLE) {
4244 bmcr &= ~BMCR_ANENABLE;
4245 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4246 bnx2_write_phy(bp, MII_BMCR, bmcr);
4247 bp->current_interval = SERDES_FORCED_TIMEOUT;
4248 } else {
4249 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4250 bmcr |= BMCR_ANENABLE;
4251 bnx2_write_phy(bp, MII_BMCR, bmcr);
4252 bp->serdes_an_pending = 2;
4253 bp->current_interval = bp->timer_interval;
4254 }
4255
4256 } else
4257 bp->current_interval = bp->timer_interval;
4258
4259 spin_unlock(&bp->phy_lock);
4260}
4261
4262static void
Michael Chanb6016b72005-05-26 13:03:09 -07004263bnx2_timer(unsigned long data)
4264{
4265 struct bnx2 *bp = (struct bnx2 *) data;
4266 u32 msg;
4267
Michael Chancd339a02005-08-25 15:35:24 -07004268 if (!netif_running(bp->dev))
4269 return;
4270
Michael Chanb6016b72005-05-26 13:03:09 -07004271 if (atomic_read(&bp->intr_sem) != 0)
4272 goto bnx2_restart_timer;
4273
4274 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004275 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004276
Michael Chancea94db2006-06-12 22:16:13 -07004277 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4278
Michael Chanf8dd0642006-11-19 14:08:29 -08004279 if (bp->phy_flags & PHY_SERDES_FLAG) {
4280 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4281 bnx2_5706_serdes_timer(bp);
4282 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4283 bnx2_5708_serdes_timer(bp);
4284 }
Michael Chanb6016b72005-05-26 13:03:09 -07004285
4286bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004287 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004288}
4289
4290/* Called with rtnl_lock */
4291static int
4292bnx2_open(struct net_device *dev)
4293{
Michael Chan972ec0d2006-01-23 16:12:43 -08004294 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004295 int rc;
4296
Pavel Machek829ca9a2005-09-03 15:56:56 -07004297 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004298 bnx2_disable_int(bp);
4299
4300 rc = bnx2_alloc_mem(bp);
4301 if (rc)
4302 return rc;
4303
4304 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4305 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4306 !disable_msi) {
4307
4308 if (pci_enable_msi(bp->pdev) == 0) {
4309 bp->flags |= USING_MSI_FLAG;
4310 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4311 dev);
4312 }
4313 else {
4314 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004315 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004316 }
4317 }
4318 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004319 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004320 dev->name, dev);
4321 }
4322 if (rc) {
4323 bnx2_free_mem(bp);
4324 return rc;
4325 }
4326
4327 rc = bnx2_init_nic(bp);
4328
4329 if (rc) {
4330 free_irq(bp->pdev->irq, dev);
4331 if (bp->flags & USING_MSI_FLAG) {
4332 pci_disable_msi(bp->pdev);
4333 bp->flags &= ~USING_MSI_FLAG;
4334 }
4335 bnx2_free_skbs(bp);
4336 bnx2_free_mem(bp);
4337 return rc;
4338 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004339
Michael Chancd339a02005-08-25 15:35:24 -07004340 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004341
4342 atomic_set(&bp->intr_sem, 0);
4343
4344 bnx2_enable_int(bp);
4345
4346 if (bp->flags & USING_MSI_FLAG) {
4347 /* Test MSI to make sure it is working
4348 * If MSI test fails, go back to INTx mode
4349 */
4350 if (bnx2_test_intr(bp) != 0) {
4351 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4352 " using MSI, switching to INTx mode. Please"
4353 " report this failure to the PCI maintainer"
4354 " and include system chipset information.\n",
4355 bp->dev->name);
4356
4357 bnx2_disable_int(bp);
4358 free_irq(bp->pdev->irq, dev);
4359 pci_disable_msi(bp->pdev);
4360 bp->flags &= ~USING_MSI_FLAG;
4361
4362 rc = bnx2_init_nic(bp);
4363
4364 if (!rc) {
4365 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004366 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004367 }
4368 if (rc) {
4369 bnx2_free_skbs(bp);
4370 bnx2_free_mem(bp);
4371 del_timer_sync(&bp->timer);
4372 return rc;
4373 }
4374 bnx2_enable_int(bp);
4375 }
4376 }
4377 if (bp->flags & USING_MSI_FLAG) {
4378 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4379 }
4380
4381 netif_start_queue(dev);
4382
4383 return 0;
4384}
4385
4386static void
4387bnx2_reset_task(void *data)
4388{
4389 struct bnx2 *bp = data;
4390
Michael Chanafdc08b2005-08-25 15:34:29 -07004391 if (!netif_running(bp->dev))
4392 return;
4393
4394 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004395 bnx2_netif_stop(bp);
4396
4397 bnx2_init_nic(bp);
4398
4399 atomic_set(&bp->intr_sem, 1);
4400 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004401 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004402}
4403
4404static void
4405bnx2_tx_timeout(struct net_device *dev)
4406{
Michael Chan972ec0d2006-01-23 16:12:43 -08004407 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004408
4409 /* This allows the netif to be shutdown gracefully before resetting */
4410 schedule_work(&bp->reset_task);
4411}
4412
4413#ifdef BCM_VLAN
4414/* Called with rtnl_lock */
4415static void
4416bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4417{
Michael Chan972ec0d2006-01-23 16:12:43 -08004418 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004419
4420 bnx2_netif_stop(bp);
4421
4422 bp->vlgrp = vlgrp;
4423 bnx2_set_rx_mode(dev);
4424
4425 bnx2_netif_start(bp);
4426}
4427
4428/* Called with rtnl_lock */
4429static void
4430bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4431{
Michael Chan972ec0d2006-01-23 16:12:43 -08004432 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004433
4434 bnx2_netif_stop(bp);
4435
4436 if (bp->vlgrp)
4437 bp->vlgrp->vlan_devices[vid] = NULL;
4438 bnx2_set_rx_mode(dev);
4439
4440 bnx2_netif_start(bp);
4441}
4442#endif
4443
Herbert Xu932ff272006-06-09 12:20:56 -07004444/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004445 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4446 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004447 */
4448static int
4449bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4450{
Michael Chan972ec0d2006-01-23 16:12:43 -08004451 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004452 dma_addr_t mapping;
4453 struct tx_bd *txbd;
4454 struct sw_bd *tx_buf;
4455 u32 len, vlan_tag_flags, last_frag, mss;
4456 u16 prod, ring_prod;
4457 int i;
4458
Michael Chane89bbf12005-08-25 15:36:58 -07004459 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004460 netif_stop_queue(dev);
4461 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4462 dev->name);
4463
4464 return NETDEV_TX_BUSY;
4465 }
4466 len = skb_headlen(skb);
4467 prod = bp->tx_prod;
4468 ring_prod = TX_RING_IDX(prod);
4469
4470 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004471 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004472 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4473 }
4474
4475 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4476 vlan_tag_flags |=
4477 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4478 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004479#ifdef BCM_TSO
Herbert Xu79671682006-06-22 02:40:14 -07004480 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004481 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4482 u32 tcp_opt_len, ip_tcp_len;
4483
4484 if (skb_header_cloned(skb) &&
4485 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4486 dev_kfree_skb(skb);
4487 return NETDEV_TX_OK;
4488 }
4489
4490 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4491 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4492
4493 tcp_opt_len = 0;
4494 if (skb->h.th->doff > 5) {
4495 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4496 }
4497 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4498
4499 skb->nh.iph->check = 0;
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07004500 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb6016b72005-05-26 13:03:09 -07004501 skb->h.th->check =
4502 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4503 skb->nh.iph->daddr,
4504 0, IPPROTO_TCP, 0);
4505
4506 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4507 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4508 (tcp_opt_len >> 2)) << 8;
4509 }
4510 }
4511 else
4512#endif
4513 {
4514 mss = 0;
4515 }
4516
4517 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004518
Michael Chanb6016b72005-05-26 13:03:09 -07004519 tx_buf = &bp->tx_buf_ring[ring_prod];
4520 tx_buf->skb = skb;
4521 pci_unmap_addr_set(tx_buf, mapping, mapping);
4522
4523 txbd = &bp->tx_desc_ring[ring_prod];
4524
4525 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4526 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4527 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4528 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4529
4530 last_frag = skb_shinfo(skb)->nr_frags;
4531
4532 for (i = 0; i < last_frag; i++) {
4533 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4534
4535 prod = NEXT_TX_BD(prod);
4536 ring_prod = TX_RING_IDX(prod);
4537 txbd = &bp->tx_desc_ring[ring_prod];
4538
4539 len = frag->size;
4540 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4541 len, PCI_DMA_TODEVICE);
4542 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4543 mapping, mapping);
4544
4545 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4546 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4547 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4548 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4549
4550 }
4551 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4552
4553 prod = NEXT_TX_BD(prod);
4554 bp->tx_prod_bseq += skb->len;
4555
Michael Chanb6016b72005-05-26 13:03:09 -07004556 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4557 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4558
4559 mmiowb();
4560
4561 bp->tx_prod = prod;
4562 dev->trans_start = jiffies;
4563
Michael Chane89bbf12005-08-25 15:36:58 -07004564 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004565 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004566 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004567 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004568 }
4569
4570 return NETDEV_TX_OK;
4571}
4572
4573/* Called with rtnl_lock */
4574static int
4575bnx2_close(struct net_device *dev)
4576{
Michael Chan972ec0d2006-01-23 16:12:43 -08004577 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004578 u32 reset_code;
4579
Michael Chanafdc08b2005-08-25 15:34:29 -07004580 /* Calling flush_scheduled_work() may deadlock because
4581 * linkwatch_event() may be on the workqueue and it will try to get
4582 * the rtnl_lock which we are holding.
4583 */
4584 while (bp->in_reset_task)
4585 msleep(1);
4586
Michael Chanb6016b72005-05-26 13:03:09 -07004587 bnx2_netif_stop(bp);
4588 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004589 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004590 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004591 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004592 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4593 else
4594 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4595 bnx2_reset_chip(bp, reset_code);
4596 free_irq(bp->pdev->irq, dev);
4597 if (bp->flags & USING_MSI_FLAG) {
4598 pci_disable_msi(bp->pdev);
4599 bp->flags &= ~USING_MSI_FLAG;
4600 }
4601 bnx2_free_skbs(bp);
4602 bnx2_free_mem(bp);
4603 bp->link_up = 0;
4604 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004605 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004606 return 0;
4607}
4608
4609#define GET_NET_STATS64(ctr) \
4610 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4611 (unsigned long) (ctr##_lo)
4612
4613#define GET_NET_STATS32(ctr) \
4614 (ctr##_lo)
4615
4616#if (BITS_PER_LONG == 64)
4617#define GET_NET_STATS GET_NET_STATS64
4618#else
4619#define GET_NET_STATS GET_NET_STATS32
4620#endif
4621
4622static struct net_device_stats *
4623bnx2_get_stats(struct net_device *dev)
4624{
Michael Chan972ec0d2006-01-23 16:12:43 -08004625 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004626 struct statistics_block *stats_blk = bp->stats_blk;
4627 struct net_device_stats *net_stats = &bp->net_stats;
4628
4629 if (bp->stats_blk == NULL) {
4630 return net_stats;
4631 }
4632 net_stats->rx_packets =
4633 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4634 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4635 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4636
4637 net_stats->tx_packets =
4638 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4639 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4640 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4641
4642 net_stats->rx_bytes =
4643 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4644
4645 net_stats->tx_bytes =
4646 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4647
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004648 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004649 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4650
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004651 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004652 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4653
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004654 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004655 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4656 stats_blk->stat_EtherStatsOverrsizePkts);
4657
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004658 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004659 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4660
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004661 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004662 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4663
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004664 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004665 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4666
4667 net_stats->rx_errors = net_stats->rx_length_errors +
4668 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4669 net_stats->rx_crc_errors;
4670
4671 net_stats->tx_aborted_errors =
4672 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4673 stats_blk->stat_Dot3StatsLateCollisions);
4674
Michael Chan5b0c76a2005-11-04 08:45:49 -08004675 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4676 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004677 net_stats->tx_carrier_errors = 0;
4678 else {
4679 net_stats->tx_carrier_errors =
4680 (unsigned long)
4681 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4682 }
4683
4684 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004685 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004686 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4687 +
4688 net_stats->tx_aborted_errors +
4689 net_stats->tx_carrier_errors;
4690
Michael Chancea94db2006-06-12 22:16:13 -07004691 net_stats->rx_missed_errors =
4692 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4693 stats_blk->stat_FwRxDrop);
4694
Michael Chanb6016b72005-05-26 13:03:09 -07004695 return net_stats;
4696}
4697
4698/* All ethtool functions called with rtnl_lock */
4699
4700static int
4701bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4702{
Michael Chan972ec0d2006-01-23 16:12:43 -08004703 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004704
4705 cmd->supported = SUPPORTED_Autoneg;
4706 if (bp->phy_flags & PHY_SERDES_FLAG) {
4707 cmd->supported |= SUPPORTED_1000baseT_Full |
4708 SUPPORTED_FIBRE;
4709
4710 cmd->port = PORT_FIBRE;
4711 }
4712 else {
4713 cmd->supported |= SUPPORTED_10baseT_Half |
4714 SUPPORTED_10baseT_Full |
4715 SUPPORTED_100baseT_Half |
4716 SUPPORTED_100baseT_Full |
4717 SUPPORTED_1000baseT_Full |
4718 SUPPORTED_TP;
4719
4720 cmd->port = PORT_TP;
4721 }
4722
4723 cmd->advertising = bp->advertising;
4724
4725 if (bp->autoneg & AUTONEG_SPEED) {
4726 cmd->autoneg = AUTONEG_ENABLE;
4727 }
4728 else {
4729 cmd->autoneg = AUTONEG_DISABLE;
4730 }
4731
4732 if (netif_carrier_ok(dev)) {
4733 cmd->speed = bp->line_speed;
4734 cmd->duplex = bp->duplex;
4735 }
4736 else {
4737 cmd->speed = -1;
4738 cmd->duplex = -1;
4739 }
4740
4741 cmd->transceiver = XCVR_INTERNAL;
4742 cmd->phy_address = bp->phy_addr;
4743
4744 return 0;
4745}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004746
Michael Chanb6016b72005-05-26 13:03:09 -07004747static int
4748bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4749{
Michael Chan972ec0d2006-01-23 16:12:43 -08004750 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004751 u8 autoneg = bp->autoneg;
4752 u8 req_duplex = bp->req_duplex;
4753 u16 req_line_speed = bp->req_line_speed;
4754 u32 advertising = bp->advertising;
4755
4756 if (cmd->autoneg == AUTONEG_ENABLE) {
4757 autoneg |= AUTONEG_SPEED;
4758
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004759 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004760
4761 /* allow advertising 1 speed */
4762 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4763 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4764 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4765 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4766
4767 if (bp->phy_flags & PHY_SERDES_FLAG)
4768 return -EINVAL;
4769
4770 advertising = cmd->advertising;
4771
4772 }
4773 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4774 advertising = cmd->advertising;
4775 }
4776 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4777 return -EINVAL;
4778 }
4779 else {
4780 if (bp->phy_flags & PHY_SERDES_FLAG) {
4781 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4782 }
4783 else {
4784 advertising = ETHTOOL_ALL_COPPER_SPEED;
4785 }
4786 }
4787 advertising |= ADVERTISED_Autoneg;
4788 }
4789 else {
4790 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004791 if ((cmd->speed != SPEED_1000 &&
4792 cmd->speed != SPEED_2500) ||
4793 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004794 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004795
4796 if (cmd->speed == SPEED_2500 &&
4797 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4798 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004799 }
4800 else if (cmd->speed == SPEED_1000) {
4801 return -EINVAL;
4802 }
4803 autoneg &= ~AUTONEG_SPEED;
4804 req_line_speed = cmd->speed;
4805 req_duplex = cmd->duplex;
4806 advertising = 0;
4807 }
4808
4809 bp->autoneg = autoneg;
4810 bp->advertising = advertising;
4811 bp->req_line_speed = req_line_speed;
4812 bp->req_duplex = req_duplex;
4813
Michael Chanc770a652005-08-25 15:38:39 -07004814 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004815
4816 bnx2_setup_phy(bp);
4817
Michael Chanc770a652005-08-25 15:38:39 -07004818 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004819
4820 return 0;
4821}
4822
4823static void
4824bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4825{
Michael Chan972ec0d2006-01-23 16:12:43 -08004826 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004827
4828 strcpy(info->driver, DRV_MODULE_NAME);
4829 strcpy(info->version, DRV_MODULE_VERSION);
4830 strcpy(info->bus_info, pci_name(bp->pdev));
4831 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4832 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4833 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004834 info->fw_version[1] = info->fw_version[3] = '.';
4835 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004836}
4837
Michael Chan244ac4f2006-03-20 17:48:46 -08004838#define BNX2_REGDUMP_LEN (32 * 1024)
4839
4840static int
4841bnx2_get_regs_len(struct net_device *dev)
4842{
4843 return BNX2_REGDUMP_LEN;
4844}
4845
4846static void
4847bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4848{
4849 u32 *p = _p, i, offset;
4850 u8 *orig_p = _p;
4851 struct bnx2 *bp = netdev_priv(dev);
4852 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4853 0x0800, 0x0880, 0x0c00, 0x0c10,
4854 0x0c30, 0x0d08, 0x1000, 0x101c,
4855 0x1040, 0x1048, 0x1080, 0x10a4,
4856 0x1400, 0x1490, 0x1498, 0x14f0,
4857 0x1500, 0x155c, 0x1580, 0x15dc,
4858 0x1600, 0x1658, 0x1680, 0x16d8,
4859 0x1800, 0x1820, 0x1840, 0x1854,
4860 0x1880, 0x1894, 0x1900, 0x1984,
4861 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4862 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4863 0x2000, 0x2030, 0x23c0, 0x2400,
4864 0x2800, 0x2820, 0x2830, 0x2850,
4865 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4866 0x3c00, 0x3c94, 0x4000, 0x4010,
4867 0x4080, 0x4090, 0x43c0, 0x4458,
4868 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4869 0x4fc0, 0x5010, 0x53c0, 0x5444,
4870 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4871 0x5fc0, 0x6000, 0x6400, 0x6428,
4872 0x6800, 0x6848, 0x684c, 0x6860,
4873 0x6888, 0x6910, 0x8000 };
4874
4875 regs->version = 0;
4876
4877 memset(p, 0, BNX2_REGDUMP_LEN);
4878
4879 if (!netif_running(bp->dev))
4880 return;
4881
4882 i = 0;
4883 offset = reg_boundaries[0];
4884 p += offset;
4885 while (offset < BNX2_REGDUMP_LEN) {
4886 *p++ = REG_RD(bp, offset);
4887 offset += 4;
4888 if (offset == reg_boundaries[i + 1]) {
4889 offset = reg_boundaries[i + 2];
4890 p = (u32 *) (orig_p + offset);
4891 i += 2;
4892 }
4893 }
4894}
4895
Michael Chanb6016b72005-05-26 13:03:09 -07004896static void
4897bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4898{
Michael Chan972ec0d2006-01-23 16:12:43 -08004899 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004900
4901 if (bp->flags & NO_WOL_FLAG) {
4902 wol->supported = 0;
4903 wol->wolopts = 0;
4904 }
4905 else {
4906 wol->supported = WAKE_MAGIC;
4907 if (bp->wol)
4908 wol->wolopts = WAKE_MAGIC;
4909 else
4910 wol->wolopts = 0;
4911 }
4912 memset(&wol->sopass, 0, sizeof(wol->sopass));
4913}
4914
4915static int
4916bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4917{
Michael Chan972ec0d2006-01-23 16:12:43 -08004918 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004919
4920 if (wol->wolopts & ~WAKE_MAGIC)
4921 return -EINVAL;
4922
4923 if (wol->wolopts & WAKE_MAGIC) {
4924 if (bp->flags & NO_WOL_FLAG)
4925 return -EINVAL;
4926
4927 bp->wol = 1;
4928 }
4929 else {
4930 bp->wol = 0;
4931 }
4932 return 0;
4933}
4934
4935static int
4936bnx2_nway_reset(struct net_device *dev)
4937{
Michael Chan972ec0d2006-01-23 16:12:43 -08004938 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004939 u32 bmcr;
4940
4941 if (!(bp->autoneg & AUTONEG_SPEED)) {
4942 return -EINVAL;
4943 }
4944
Michael Chanc770a652005-08-25 15:38:39 -07004945 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004946
4947 /* Force a link down visible on the other side */
4948 if (bp->phy_flags & PHY_SERDES_FLAG) {
4949 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004950 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004951
4952 msleep(20);
4953
Michael Chanc770a652005-08-25 15:38:39 -07004954 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004955
4956 bp->current_interval = SERDES_AN_TIMEOUT;
4957 bp->serdes_an_pending = 1;
4958 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004959 }
4960
4961 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4962 bmcr &= ~BMCR_LOOPBACK;
4963 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4964
Michael Chanc770a652005-08-25 15:38:39 -07004965 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004966
4967 return 0;
4968}
4969
4970static int
4971bnx2_get_eeprom_len(struct net_device *dev)
4972{
Michael Chan972ec0d2006-01-23 16:12:43 -08004973 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004974
Michael Chan1122db72006-01-23 16:11:42 -08004975 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004976 return 0;
4977
Michael Chan1122db72006-01-23 16:11:42 -08004978 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004979}
4980
4981static int
4982bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4983 u8 *eebuf)
4984{
Michael Chan972ec0d2006-01-23 16:12:43 -08004985 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004986 int rc;
4987
John W. Linville1064e942005-11-10 12:58:24 -08004988 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004989
4990 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4991
4992 return rc;
4993}
4994
4995static int
4996bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4997 u8 *eebuf)
4998{
Michael Chan972ec0d2006-01-23 16:12:43 -08004999 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005000 int rc;
5001
John W. Linville1064e942005-11-10 12:58:24 -08005002 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005003
5004 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5005
5006 return rc;
5007}
5008
5009static int
5010bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5011{
Michael Chan972ec0d2006-01-23 16:12:43 -08005012 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005013
5014 memset(coal, 0, sizeof(struct ethtool_coalesce));
5015
5016 coal->rx_coalesce_usecs = bp->rx_ticks;
5017 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5018 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5019 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5020
5021 coal->tx_coalesce_usecs = bp->tx_ticks;
5022 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5023 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5024 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5025
5026 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5027
5028 return 0;
5029}
5030
5031static int
5032bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5033{
Michael Chan972ec0d2006-01-23 16:12:43 -08005034 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005035
5036 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5037 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5038
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005039 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005040 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5041
5042 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5043 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5044
5045 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5046 if (bp->rx_quick_cons_trip_int > 0xff)
5047 bp->rx_quick_cons_trip_int = 0xff;
5048
5049 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5050 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5051
5052 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5053 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5054
5055 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5056 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5057
5058 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5059 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5060 0xff;
5061
5062 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5063 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5064 bp->stats_ticks &= 0xffff00;
5065
5066 if (netif_running(bp->dev)) {
5067 bnx2_netif_stop(bp);
5068 bnx2_init_nic(bp);
5069 bnx2_netif_start(bp);
5070 }
5071
5072 return 0;
5073}
5074
5075static void
5076bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5077{
Michael Chan972ec0d2006-01-23 16:12:43 -08005078 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005079
Michael Chan13daffa2006-03-20 17:49:20 -08005080 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005081 ering->rx_mini_max_pending = 0;
5082 ering->rx_jumbo_max_pending = 0;
5083
5084 ering->rx_pending = bp->rx_ring_size;
5085 ering->rx_mini_pending = 0;
5086 ering->rx_jumbo_pending = 0;
5087
5088 ering->tx_max_pending = MAX_TX_DESC_CNT;
5089 ering->tx_pending = bp->tx_ring_size;
5090}
5091
5092static int
5093bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5094{
Michael Chan972ec0d2006-01-23 16:12:43 -08005095 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005096
Michael Chan13daffa2006-03-20 17:49:20 -08005097 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005098 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5099 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5100
5101 return -EINVAL;
5102 }
Michael Chan13daffa2006-03-20 17:49:20 -08005103 if (netif_running(bp->dev)) {
5104 bnx2_netif_stop(bp);
5105 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5106 bnx2_free_skbs(bp);
5107 bnx2_free_mem(bp);
5108 }
5109
5110 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005111 bp->tx_ring_size = ering->tx_pending;
5112
5113 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005114 int rc;
5115
5116 rc = bnx2_alloc_mem(bp);
5117 if (rc)
5118 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005119 bnx2_init_nic(bp);
5120 bnx2_netif_start(bp);
5121 }
5122
5123 return 0;
5124}
5125
5126static void
5127bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5128{
Michael Chan972ec0d2006-01-23 16:12:43 -08005129 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005130
5131 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5132 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5133 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5134}
5135
5136static int
5137bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5138{
Michael Chan972ec0d2006-01-23 16:12:43 -08005139 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005140
5141 bp->req_flow_ctrl = 0;
5142 if (epause->rx_pause)
5143 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5144 if (epause->tx_pause)
5145 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5146
5147 if (epause->autoneg) {
5148 bp->autoneg |= AUTONEG_FLOW_CTRL;
5149 }
5150 else {
5151 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5152 }
5153
Michael Chanc770a652005-08-25 15:38:39 -07005154 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005155
5156 bnx2_setup_phy(bp);
5157
Michael Chanc770a652005-08-25 15:38:39 -07005158 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005159
5160 return 0;
5161}
5162
5163static u32
5164bnx2_get_rx_csum(struct net_device *dev)
5165{
Michael Chan972ec0d2006-01-23 16:12:43 -08005166 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005167
5168 return bp->rx_csum;
5169}
5170
5171static int
5172bnx2_set_rx_csum(struct net_device *dev, u32 data)
5173{
Michael Chan972ec0d2006-01-23 16:12:43 -08005174 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005175
5176 bp->rx_csum = data;
5177 return 0;
5178}
5179
Michael Chanb11d6212006-06-29 12:31:21 -07005180static int
5181bnx2_set_tso(struct net_device *dev, u32 data)
5182{
5183 if (data)
5184 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5185 else
5186 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5187 return 0;
5188}
5189
Michael Chancea94db2006-06-12 22:16:13 -07005190#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005191
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005192static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005193 char string[ETH_GSTRING_LEN];
5194} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5195 { "rx_bytes" },
5196 { "rx_error_bytes" },
5197 { "tx_bytes" },
5198 { "tx_error_bytes" },
5199 { "rx_ucast_packets" },
5200 { "rx_mcast_packets" },
5201 { "rx_bcast_packets" },
5202 { "tx_ucast_packets" },
5203 { "tx_mcast_packets" },
5204 { "tx_bcast_packets" },
5205 { "tx_mac_errors" },
5206 { "tx_carrier_errors" },
5207 { "rx_crc_errors" },
5208 { "rx_align_errors" },
5209 { "tx_single_collisions" },
5210 { "tx_multi_collisions" },
5211 { "tx_deferred" },
5212 { "tx_excess_collisions" },
5213 { "tx_late_collisions" },
5214 { "tx_total_collisions" },
5215 { "rx_fragments" },
5216 { "rx_jabbers" },
5217 { "rx_undersize_packets" },
5218 { "rx_oversize_packets" },
5219 { "rx_64_byte_packets" },
5220 { "rx_65_to_127_byte_packets" },
5221 { "rx_128_to_255_byte_packets" },
5222 { "rx_256_to_511_byte_packets" },
5223 { "rx_512_to_1023_byte_packets" },
5224 { "rx_1024_to_1522_byte_packets" },
5225 { "rx_1523_to_9022_byte_packets" },
5226 { "tx_64_byte_packets" },
5227 { "tx_65_to_127_byte_packets" },
5228 { "tx_128_to_255_byte_packets" },
5229 { "tx_256_to_511_byte_packets" },
5230 { "tx_512_to_1023_byte_packets" },
5231 { "tx_1024_to_1522_byte_packets" },
5232 { "tx_1523_to_9022_byte_packets" },
5233 { "rx_xon_frames" },
5234 { "rx_xoff_frames" },
5235 { "tx_xon_frames" },
5236 { "tx_xoff_frames" },
5237 { "rx_mac_ctrl_frames" },
5238 { "rx_filtered_packets" },
5239 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005240 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005241};
5242
5243#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5244
Arjan van de Venf71e1302006-03-03 21:33:57 -05005245static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005246 STATS_OFFSET32(stat_IfHCInOctets_hi),
5247 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5248 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5249 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5250 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5251 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5252 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5253 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5254 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5255 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5256 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005257 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5258 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5259 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5260 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5261 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5262 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5263 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5264 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5265 STATS_OFFSET32(stat_EtherStatsCollisions),
5266 STATS_OFFSET32(stat_EtherStatsFragments),
5267 STATS_OFFSET32(stat_EtherStatsJabbers),
5268 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5269 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5270 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5271 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5272 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5273 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5274 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5275 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5276 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5277 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5278 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5279 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5280 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5281 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5282 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5283 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5284 STATS_OFFSET32(stat_XonPauseFramesReceived),
5285 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5286 STATS_OFFSET32(stat_OutXonSent),
5287 STATS_OFFSET32(stat_OutXoffSent),
5288 STATS_OFFSET32(stat_MacControlFramesReceived),
5289 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5290 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005291 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005292};
5293
5294/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5295 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005296 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005297static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005298 8,0,8,8,8,8,8,8,8,8,
5299 4,0,4,4,4,4,4,4,4,4,
5300 4,4,4,4,4,4,4,4,4,4,
5301 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005302 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005303};
5304
Michael Chan5b0c76a2005-11-04 08:45:49 -08005305static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5306 8,0,8,8,8,8,8,8,8,8,
5307 4,4,4,4,4,4,4,4,4,4,
5308 4,4,4,4,4,4,4,4,4,4,
5309 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005310 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005311};
5312
Michael Chanb6016b72005-05-26 13:03:09 -07005313#define BNX2_NUM_TESTS 6
5314
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005315static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005316 char string[ETH_GSTRING_LEN];
5317} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5318 { "register_test (offline)" },
5319 { "memory_test (offline)" },
5320 { "loopback_test (offline)" },
5321 { "nvram_test (online)" },
5322 { "interrupt_test (online)" },
5323 { "link_test (online)" },
5324};
5325
5326static int
5327bnx2_self_test_count(struct net_device *dev)
5328{
5329 return BNX2_NUM_TESTS;
5330}
5331
5332static void
5333bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5334{
Michael Chan972ec0d2006-01-23 16:12:43 -08005335 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005336
5337 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5338 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005339 int i;
5340
Michael Chanb6016b72005-05-26 13:03:09 -07005341 bnx2_netif_stop(bp);
5342 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5343 bnx2_free_skbs(bp);
5344
5345 if (bnx2_test_registers(bp) != 0) {
5346 buf[0] = 1;
5347 etest->flags |= ETH_TEST_FL_FAILED;
5348 }
5349 if (bnx2_test_memory(bp) != 0) {
5350 buf[1] = 1;
5351 etest->flags |= ETH_TEST_FL_FAILED;
5352 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005353 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005354 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005355
5356 if (!netif_running(bp->dev)) {
5357 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5358 }
5359 else {
5360 bnx2_init_nic(bp);
5361 bnx2_netif_start(bp);
5362 }
5363
5364 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005365 for (i = 0; i < 7; i++) {
5366 if (bp->link_up)
5367 break;
5368 msleep_interruptible(1000);
5369 }
Michael Chanb6016b72005-05-26 13:03:09 -07005370 }
5371
5372 if (bnx2_test_nvram(bp) != 0) {
5373 buf[3] = 1;
5374 etest->flags |= ETH_TEST_FL_FAILED;
5375 }
5376 if (bnx2_test_intr(bp) != 0) {
5377 buf[4] = 1;
5378 etest->flags |= ETH_TEST_FL_FAILED;
5379 }
5380
5381 if (bnx2_test_link(bp) != 0) {
5382 buf[5] = 1;
5383 etest->flags |= ETH_TEST_FL_FAILED;
5384
5385 }
5386}
5387
5388static void
5389bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5390{
5391 switch (stringset) {
5392 case ETH_SS_STATS:
5393 memcpy(buf, bnx2_stats_str_arr,
5394 sizeof(bnx2_stats_str_arr));
5395 break;
5396 case ETH_SS_TEST:
5397 memcpy(buf, bnx2_tests_str_arr,
5398 sizeof(bnx2_tests_str_arr));
5399 break;
5400 }
5401}
5402
5403static int
5404bnx2_get_stats_count(struct net_device *dev)
5405{
5406 return BNX2_NUM_STATS;
5407}
5408
5409static void
5410bnx2_get_ethtool_stats(struct net_device *dev,
5411 struct ethtool_stats *stats, u64 *buf)
5412{
Michael Chan972ec0d2006-01-23 16:12:43 -08005413 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005414 int i;
5415 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005416 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005417
5418 if (hw_stats == NULL) {
5419 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5420 return;
5421 }
5422
Michael Chan5b0c76a2005-11-04 08:45:49 -08005423 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5424 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5425 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5426 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005427 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005428 else
5429 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005430
5431 for (i = 0; i < BNX2_NUM_STATS; i++) {
5432 if (stats_len_arr[i] == 0) {
5433 /* skip this counter */
5434 buf[i] = 0;
5435 continue;
5436 }
5437 if (stats_len_arr[i] == 4) {
5438 /* 4-byte counter */
5439 buf[i] = (u64)
5440 *(hw_stats + bnx2_stats_offset_arr[i]);
5441 continue;
5442 }
5443 /* 8-byte counter */
5444 buf[i] = (((u64) *(hw_stats +
5445 bnx2_stats_offset_arr[i])) << 32) +
5446 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5447 }
5448}
5449
5450static int
5451bnx2_phys_id(struct net_device *dev, u32 data)
5452{
Michael Chan972ec0d2006-01-23 16:12:43 -08005453 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005454 int i;
5455 u32 save;
5456
5457 if (data == 0)
5458 data = 2;
5459
5460 save = REG_RD(bp, BNX2_MISC_CFG);
5461 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5462
5463 for (i = 0; i < (data * 2); i++) {
5464 if ((i % 2) == 0) {
5465 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5466 }
5467 else {
5468 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5469 BNX2_EMAC_LED_1000MB_OVERRIDE |
5470 BNX2_EMAC_LED_100MB_OVERRIDE |
5471 BNX2_EMAC_LED_10MB_OVERRIDE |
5472 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5473 BNX2_EMAC_LED_TRAFFIC);
5474 }
5475 msleep_interruptible(500);
5476 if (signal_pending(current))
5477 break;
5478 }
5479 REG_WR(bp, BNX2_EMAC_LED, 0);
5480 REG_WR(bp, BNX2_MISC_CFG, save);
5481 return 0;
5482}
5483
Jeff Garzik7282d492006-09-13 14:30:00 -04005484static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005485 .get_settings = bnx2_get_settings,
5486 .set_settings = bnx2_set_settings,
5487 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005488 .get_regs_len = bnx2_get_regs_len,
5489 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005490 .get_wol = bnx2_get_wol,
5491 .set_wol = bnx2_set_wol,
5492 .nway_reset = bnx2_nway_reset,
5493 .get_link = ethtool_op_get_link,
5494 .get_eeprom_len = bnx2_get_eeprom_len,
5495 .get_eeprom = bnx2_get_eeprom,
5496 .set_eeprom = bnx2_set_eeprom,
5497 .get_coalesce = bnx2_get_coalesce,
5498 .set_coalesce = bnx2_set_coalesce,
5499 .get_ringparam = bnx2_get_ringparam,
5500 .set_ringparam = bnx2_set_ringparam,
5501 .get_pauseparam = bnx2_get_pauseparam,
5502 .set_pauseparam = bnx2_set_pauseparam,
5503 .get_rx_csum = bnx2_get_rx_csum,
5504 .set_rx_csum = bnx2_set_rx_csum,
5505 .get_tx_csum = ethtool_op_get_tx_csum,
5506 .set_tx_csum = ethtool_op_set_tx_csum,
5507 .get_sg = ethtool_op_get_sg,
5508 .set_sg = ethtool_op_set_sg,
5509#ifdef BCM_TSO
5510 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005511 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005512#endif
5513 .self_test_count = bnx2_self_test_count,
5514 .self_test = bnx2_self_test,
5515 .get_strings = bnx2_get_strings,
5516 .phys_id = bnx2_phys_id,
5517 .get_stats_count = bnx2_get_stats_count,
5518 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005519 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005520};
5521
5522/* Called with rtnl_lock */
5523static int
5524bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5525{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005526 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005527 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005528 int err;
5529
5530 switch(cmd) {
5531 case SIOCGMIIPHY:
5532 data->phy_id = bp->phy_addr;
5533
5534 /* fallthru */
5535 case SIOCGMIIREG: {
5536 u32 mii_regval;
5537
Michael Chanc770a652005-08-25 15:38:39 -07005538 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005539 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005540 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005541
5542 data->val_out = mii_regval;
5543
5544 return err;
5545 }
5546
5547 case SIOCSMIIREG:
5548 if (!capable(CAP_NET_ADMIN))
5549 return -EPERM;
5550
Michael Chanc770a652005-08-25 15:38:39 -07005551 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005552 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005553 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005554
5555 return err;
5556
5557 default:
5558 /* do nothing */
5559 break;
5560 }
5561 return -EOPNOTSUPP;
5562}
5563
5564/* Called with rtnl_lock */
5565static int
5566bnx2_change_mac_addr(struct net_device *dev, void *p)
5567{
5568 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005569 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005570
Michael Chan73eef4c2005-08-25 15:39:15 -07005571 if (!is_valid_ether_addr(addr->sa_data))
5572 return -EINVAL;
5573
Michael Chanb6016b72005-05-26 13:03:09 -07005574 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5575 if (netif_running(dev))
5576 bnx2_set_mac_addr(bp);
5577
5578 return 0;
5579}
5580
5581/* Called with rtnl_lock */
5582static int
5583bnx2_change_mtu(struct net_device *dev, int new_mtu)
5584{
Michael Chan972ec0d2006-01-23 16:12:43 -08005585 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005586
5587 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5588 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5589 return -EINVAL;
5590
5591 dev->mtu = new_mtu;
5592 if (netif_running(dev)) {
5593 bnx2_netif_stop(bp);
5594
5595 bnx2_init_nic(bp);
5596
5597 bnx2_netif_start(bp);
5598 }
5599 return 0;
5600}
5601
5602#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5603static void
5604poll_bnx2(struct net_device *dev)
5605{
Michael Chan972ec0d2006-01-23 16:12:43 -08005606 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005607
5608 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005609 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005610 enable_irq(bp->pdev->irq);
5611}
5612#endif
5613
5614static int __devinit
5615bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5616{
5617 struct bnx2 *bp;
5618 unsigned long mem_len;
5619 int rc;
5620 u32 reg;
5621
5622 SET_MODULE_OWNER(dev);
5623 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005624 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005625
5626 bp->flags = 0;
5627 bp->phy_flags = 0;
5628
5629 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5630 rc = pci_enable_device(pdev);
5631 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005632 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005633 goto err_out;
5634 }
5635
5636 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005637 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005638 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005639 rc = -ENODEV;
5640 goto err_out_disable;
5641 }
5642
5643 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5644 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005645 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005646 goto err_out_disable;
5647 }
5648
5649 pci_set_master(pdev);
5650
5651 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5652 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005653 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005654 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005655 rc = -EIO;
5656 goto err_out_release;
5657 }
5658
5659 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5660 if (bp->pcix_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005661 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005662 rc = -EIO;
5663 goto err_out_release;
5664 }
5665
5666 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5667 bp->flags |= USING_DAC_FLAG;
5668 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005669 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005670 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005671 rc = -EIO;
5672 goto err_out_release;
5673 }
5674 }
5675 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005676 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005677 rc = -EIO;
5678 goto err_out_release;
5679 }
5680
5681 bp->dev = dev;
5682 bp->pdev = pdev;
5683
5684 spin_lock_init(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005685 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5686
5687 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5688 mem_len = MB_GET_CID_ADDR(17);
5689 dev->mem_end = dev->mem_start + mem_len;
5690 dev->irq = pdev->irq;
5691
5692 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5693
5694 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005695 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005696 rc = -ENOMEM;
5697 goto err_out_release;
5698 }
5699
5700 /* Configure byte swap and enable write to the reg_window registers.
5701 * Rely on CPU to do target byte swapping on big endian systems
5702 * The chip's target access swapping will not swap all accesses
5703 */
5704 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5705 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5706 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5707
Pavel Machek829ca9a2005-09-03 15:56:56 -07005708 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005709
5710 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5711
Michael Chanb6016b72005-05-26 13:03:09 -07005712 /* Get bus information. */
5713 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5714 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5715 u32 clkreg;
5716
5717 bp->flags |= PCIX_FLAG;
5718
5719 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005720
Michael Chanb6016b72005-05-26 13:03:09 -07005721 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5722 switch (clkreg) {
5723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5724 bp->bus_speed_mhz = 133;
5725 break;
5726
5727 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5728 bp->bus_speed_mhz = 100;
5729 break;
5730
5731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5732 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5733 bp->bus_speed_mhz = 66;
5734 break;
5735
5736 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5738 bp->bus_speed_mhz = 50;
5739 break;
5740
5741 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5742 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5743 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5744 bp->bus_speed_mhz = 33;
5745 break;
5746 }
5747 }
5748 else {
5749 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5750 bp->bus_speed_mhz = 66;
5751 else
5752 bp->bus_speed_mhz = 33;
5753 }
5754
5755 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5756 bp->flags |= PCI_32BIT_FLAG;
5757
5758 /* 5706A0 may falsely detect SERR and PERR. */
5759 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5760 reg = REG_RD(bp, PCI_COMMAND);
5761 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5762 REG_WR(bp, PCI_COMMAND, reg);
5763 }
5764 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5765 !(bp->flags & PCIX_FLAG)) {
5766
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005767 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005768 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005769 goto err_out_unmap;
5770 }
5771
5772 bnx2_init_nvram(bp);
5773
Michael Chane3648b32005-11-04 08:51:21 -08005774 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5775
5776 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5777 BNX2_SHM_HDR_SIGNATURE_SIG)
5778 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5779 else
5780 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5781
Michael Chanb6016b72005-05-26 13:03:09 -07005782 /* Get the permanent MAC address. First we need to make sure the
5783 * firmware is actually running.
5784 */
Michael Chane3648b32005-11-04 08:51:21 -08005785 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005786
5787 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5788 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005789 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005790 rc = -ENODEV;
5791 goto err_out_unmap;
5792 }
5793
Michael Chane3648b32005-11-04 08:51:21 -08005794 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005795
Michael Chane3648b32005-11-04 08:51:21 -08005796 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005797 bp->mac_addr[0] = (u8) (reg >> 8);
5798 bp->mac_addr[1] = (u8) reg;
5799
Michael Chane3648b32005-11-04 08:51:21 -08005800 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005801 bp->mac_addr[2] = (u8) (reg >> 24);
5802 bp->mac_addr[3] = (u8) (reg >> 16);
5803 bp->mac_addr[4] = (u8) (reg >> 8);
5804 bp->mac_addr[5] = (u8) reg;
5805
5806 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005807 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005808
5809 bp->rx_csum = 1;
5810
5811 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5812
5813 bp->tx_quick_cons_trip_int = 20;
5814 bp->tx_quick_cons_trip = 20;
5815 bp->tx_ticks_int = 80;
5816 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005817
Michael Chanb6016b72005-05-26 13:03:09 -07005818 bp->rx_quick_cons_trip_int = 6;
5819 bp->rx_quick_cons_trip = 6;
5820 bp->rx_ticks_int = 18;
5821 bp->rx_ticks = 18;
5822
5823 bp->stats_ticks = 1000000 & 0xffff00;
5824
5825 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005826 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005827
Michael Chan5b0c76a2005-11-04 08:45:49 -08005828 bp->phy_addr = 1;
5829
Michael Chanb6016b72005-05-26 13:03:09 -07005830 /* Disable WOL support if we are running on a SERDES chip. */
5831 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5832 bp->phy_flags |= PHY_SERDES_FLAG;
5833 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005834 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5835 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005836 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005837 BNX2_SHARED_HW_CFG_CONFIG);
5838 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5839 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5840 }
Michael Chanb6016b72005-05-26 13:03:09 -07005841 }
5842
Michael Chan16088272006-06-12 22:16:43 -07005843 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5844 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5845 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005846 bp->flags |= NO_WOL_FLAG;
5847
Michael Chanb6016b72005-05-26 13:03:09 -07005848 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5849 bp->tx_quick_cons_trip_int =
5850 bp->tx_quick_cons_trip;
5851 bp->tx_ticks_int = bp->tx_ticks;
5852 bp->rx_quick_cons_trip_int =
5853 bp->rx_quick_cons_trip;
5854 bp->rx_ticks_int = bp->rx_ticks;
5855 bp->comp_prod_trip_int = bp->comp_prod_trip;
5856 bp->com_ticks_int = bp->com_ticks;
5857 bp->cmd_ticks_int = bp->cmd_ticks;
5858 }
5859
Michael Chanf9317a42006-09-29 17:06:23 -07005860 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5861 *
5862 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5863 * with byte enables disabled on the unused 32-bit word. This is legal
5864 * but causes problems on the AMD 8132 which will eventually stop
5865 * responding after a while.
5866 *
5867 * AMD believes this incompatibility is unique to the 5706, and
5868 * prefers to locally disable MSI rather than globally disabling it
5869 * using pci_msi_quirk.
5870 */
5871 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5872 struct pci_dev *amd_8132 = NULL;
5873
5874 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5875 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5876 amd_8132))) {
5877 u8 rev;
5878
5879 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5880 if (rev >= 0x10 && rev <= 0x13) {
5881 disable_msi = 1;
5882 pci_dev_put(amd_8132);
5883 break;
5884 }
5885 }
5886 }
5887
Michael Chanb6016b72005-05-26 13:03:09 -07005888 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5889 bp->req_line_speed = 0;
5890 if (bp->phy_flags & PHY_SERDES_FLAG) {
5891 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005892
Michael Chane3648b32005-11-04 08:51:21 -08005893 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005894 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5895 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5896 bp->autoneg = 0;
5897 bp->req_line_speed = bp->line_speed = SPEED_1000;
5898 bp->req_duplex = DUPLEX_FULL;
5899 }
Michael Chanb6016b72005-05-26 13:03:09 -07005900 }
5901 else {
5902 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5903 }
5904
5905 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5906
Michael Chancd339a02005-08-25 15:35:24 -07005907 init_timer(&bp->timer);
5908 bp->timer.expires = RUN_AT(bp->timer_interval);
5909 bp->timer.data = (unsigned long) bp;
5910 bp->timer.function = bnx2_timer;
5911
Michael Chanb6016b72005-05-26 13:03:09 -07005912 return 0;
5913
5914err_out_unmap:
5915 if (bp->regview) {
5916 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005917 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005918 }
5919
5920err_out_release:
5921 pci_release_regions(pdev);
5922
5923err_out_disable:
5924 pci_disable_device(pdev);
5925 pci_set_drvdata(pdev, NULL);
5926
5927err_out:
5928 return rc;
5929}
5930
5931static int __devinit
5932bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5933{
5934 static int version_printed = 0;
5935 struct net_device *dev = NULL;
5936 struct bnx2 *bp;
5937 int rc, i;
5938
5939 if (version_printed++ == 0)
5940 printk(KERN_INFO "%s", version);
5941
5942 /* dev zeroed in init_etherdev */
5943 dev = alloc_etherdev(sizeof(*bp));
5944
5945 if (!dev)
5946 return -ENOMEM;
5947
5948 rc = bnx2_init_board(pdev, dev);
5949 if (rc < 0) {
5950 free_netdev(dev);
5951 return rc;
5952 }
5953
5954 dev->open = bnx2_open;
5955 dev->hard_start_xmit = bnx2_start_xmit;
5956 dev->stop = bnx2_close;
5957 dev->get_stats = bnx2_get_stats;
5958 dev->set_multicast_list = bnx2_set_rx_mode;
5959 dev->do_ioctl = bnx2_ioctl;
5960 dev->set_mac_address = bnx2_change_mac_addr;
5961 dev->change_mtu = bnx2_change_mtu;
5962 dev->tx_timeout = bnx2_tx_timeout;
5963 dev->watchdog_timeo = TX_TIMEOUT;
5964#ifdef BCM_VLAN
5965 dev->vlan_rx_register = bnx2_vlan_rx_register;
5966 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5967#endif
5968 dev->poll = bnx2_poll;
5969 dev->ethtool_ops = &bnx2_ethtool_ops;
5970 dev->weight = 64;
5971
Michael Chan972ec0d2006-01-23 16:12:43 -08005972 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005973
5974#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5975 dev->poll_controller = poll_bnx2;
5976#endif
5977
5978 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005979 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005980 if (bp->regview)
5981 iounmap(bp->regview);
5982 pci_release_regions(pdev);
5983 pci_disable_device(pdev);
5984 pci_set_drvdata(pdev, NULL);
5985 free_netdev(dev);
5986 return rc;
5987 }
5988
5989 pci_set_drvdata(pdev, dev);
5990
5991 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07005992 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07005993 bp->name = board_info[ent->driver_data].name,
5994 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5995 "IRQ %d, ",
5996 dev->name,
5997 bp->name,
5998 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5999 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6000 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6001 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6002 bp->bus_speed_mhz,
6003 dev->base_addr,
6004 bp->pdev->irq);
6005
6006 printk("node addr ");
6007 for (i = 0; i < 6; i++)
6008 printk("%2.2x", dev->dev_addr[i]);
6009 printk("\n");
6010
6011 dev->features |= NETIF_F_SG;
6012 if (bp->flags & USING_DAC_FLAG)
6013 dev->features |= NETIF_F_HIGHDMA;
6014 dev->features |= NETIF_F_IP_CSUM;
6015#ifdef BCM_VLAN
6016 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6017#endif
6018#ifdef BCM_TSO
Michael Chanb11d6212006-06-29 12:31:21 -07006019 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006020#endif
6021
6022 netif_carrier_off(bp->dev);
6023
6024 return 0;
6025}
6026
6027static void __devexit
6028bnx2_remove_one(struct pci_dev *pdev)
6029{
6030 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006031 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006032
Michael Chanafdc08b2005-08-25 15:34:29 -07006033 flush_scheduled_work();
6034
Michael Chanb6016b72005-05-26 13:03:09 -07006035 unregister_netdev(dev);
6036
6037 if (bp->regview)
6038 iounmap(bp->regview);
6039
6040 free_netdev(dev);
6041 pci_release_regions(pdev);
6042 pci_disable_device(pdev);
6043 pci_set_drvdata(pdev, NULL);
6044}
6045
6046static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006047bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006048{
6049 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006050 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006051 u32 reset_code;
6052
6053 if (!netif_running(dev))
6054 return 0;
6055
Michael Chan1d60290f2006-03-20 17:50:08 -08006056 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006057 bnx2_netif_stop(bp);
6058 netif_device_detach(dev);
6059 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006060 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006061 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006062 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006063 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6064 else
6065 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6066 bnx2_reset_chip(bp, reset_code);
6067 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006068 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006069 return 0;
6070}
6071
6072static int
6073bnx2_resume(struct pci_dev *pdev)
6074{
6075 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006076 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006077
6078 if (!netif_running(dev))
6079 return 0;
6080
Pavel Machek829ca9a2005-09-03 15:56:56 -07006081 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006082 netif_device_attach(dev);
6083 bnx2_init_nic(bp);
6084 bnx2_netif_start(bp);
6085 return 0;
6086}
6087
6088static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006089 .name = DRV_MODULE_NAME,
6090 .id_table = bnx2_pci_tbl,
6091 .probe = bnx2_init_one,
6092 .remove = __devexit_p(bnx2_remove_one),
6093 .suspend = bnx2_suspend,
6094 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006095};
6096
6097static int __init bnx2_init(void)
6098{
Jeff Garzik29917622006-08-19 17:48:59 -04006099 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006100}
6101
6102static void __exit bnx2_cleanup(void)
6103{
6104 pci_unregister_driver(&bnx2_pci_driver);
6105}
6106
6107module_init(bnx2_init);
6108module_exit(bnx2_cleanup);
6109
6110
6111