blob: 7d213707008a884ab1ac846bdc79ac2c75db1766 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12#include "bnx2.h"
13#include "bnx2_fw.h"
14
15#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": "
Michael Chan0d36f372006-03-20 17:55:25 -080017#define DRV_MODULE_VERSION "1.4.38"
18#define DRV_MODULE_RELDATE "February 10, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070019
20#define RUN_AT(x) (jiffies + (x))
21
22/* Time in jiffies before concluding the transmitter is hung. */
23#define TX_TIMEOUT (5*HZ)
24
25static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080029MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070030MODULE_LICENSE("GPL");
31MODULE_VERSION(DRV_MODULE_VERSION);
32
33static int disable_msi = 0;
34
35module_param(disable_msi, int, 0);
36MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38typedef enum {
39 BCM5706 = 0,
40 NC370T,
41 NC370I,
42 BCM5706S,
43 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080044 BCM5708,
45 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070046} board_t;
47
48/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050049static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070050 char *name;
51} board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080057 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -070059 };
60
61static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -080068 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -070070 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -080074 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -070076 { 0, }
77};
78
79static struct flash_spec flash_table[] =
80{
81 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -080082 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -070083 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -080086 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -070088 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -080089 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -070091 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -080093 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -070094 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -080099 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107 "Entry 0100"},
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
124 /* Fast EEPROM */
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 "EEPROM - fast"},
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 "Entry 1001"},
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 1010"},
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148 "Entry 1100"},
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153 "Entry 1101"},
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700164};
165
166MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
167
Michael Chane89bbf12005-08-25 15:36:58 -0700168static inline u32 bnx2_tx_avail(struct bnx2 *bp)
169{
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
171
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
175}
176
Michael Chanb6016b72005-05-26 13:03:09 -0700177static u32
178bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
179{
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
182}
183
184static void
185bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
186{
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
189}
190
191static void
192bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
193{
194 offset += cid_addr;
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
197}
198
199static int
200bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
201{
202 u32 val1;
203 int i, ret;
204
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
208
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
211
212 udelay(40);
213 }
214
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
219
220 for (i = 0; i < 50; i++) {
221 udelay(10);
222
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225 udelay(5);
226
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
229
230 break;
231 }
232 }
233
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235 *val = 0x0;
236 ret = -EBUSY;
237 }
238 else {
239 *val = val1;
240 ret = 0;
241 }
242
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
246
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
249
250 udelay(40);
251 }
252
253 return ret;
254}
255
256static int
257bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
258{
259 u32 val1;
260 int i, ret;
261
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
265
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
268
269 udelay(40);
270 }
271
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
276
277 for (i = 0; i < 50; i++) {
278 udelay(10);
279
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282 udelay(5);
283 break;
284 }
285 }
286
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288 ret = -EBUSY;
289 else
290 ret = 0;
291
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
295
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298
299 udelay(40);
300 }
301
302 return ret;
303}
304
305static void
306bnx2_disable_int(struct bnx2 *bp)
307{
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
311}
312
313static void
314bnx2_enable_int(struct bnx2 *bp)
315{
316 u32 val;
317
318 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800319 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
320 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
321
322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700323 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
324
325 val = REG_RD(bp, BNX2_HC_COMMAND);
326 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
327}
328
329static void
330bnx2_disable_int_sync(struct bnx2 *bp)
331{
332 atomic_inc(&bp->intr_sem);
333 bnx2_disable_int(bp);
334 synchronize_irq(bp->pdev->irq);
335}
336
337static void
338bnx2_netif_stop(struct bnx2 *bp)
339{
340 bnx2_disable_int_sync(bp);
341 if (netif_running(bp->dev)) {
342 netif_poll_disable(bp->dev);
343 netif_tx_disable(bp->dev);
344 bp->dev->trans_start = jiffies; /* prevent tx timeout */
345 }
346}
347
348static void
349bnx2_netif_start(struct bnx2 *bp)
350{
351 if (atomic_dec_and_test(&bp->intr_sem)) {
352 if (netif_running(bp->dev)) {
353 netif_wake_queue(bp->dev);
354 netif_poll_enable(bp->dev);
355 bnx2_enable_int(bp);
356 }
357 }
358}
359
360static void
361bnx2_free_mem(struct bnx2 *bp)
362{
Michael Chan13daffa2006-03-20 17:49:20 -0800363 int i;
364
Michael Chanb6016b72005-05-26 13:03:09 -0700365 if (bp->stats_blk) {
366 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
367 bp->stats_blk, bp->stats_blk_mapping);
368 bp->stats_blk = NULL;
369 }
370 if (bp->status_blk) {
371 pci_free_consistent(bp->pdev, sizeof(struct status_block),
372 bp->status_blk, bp->status_blk_mapping);
373 bp->status_blk = NULL;
374 }
375 if (bp->tx_desc_ring) {
376 pci_free_consistent(bp->pdev,
377 sizeof(struct tx_bd) * TX_DESC_CNT,
378 bp->tx_desc_ring, bp->tx_desc_mapping);
379 bp->tx_desc_ring = NULL;
380 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400381 kfree(bp->tx_buf_ring);
382 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800383 for (i = 0; i < bp->rx_max_ring; i++) {
384 if (bp->rx_desc_ring[i])
385 pci_free_consistent(bp->pdev,
386 sizeof(struct rx_bd) * RX_DESC_CNT,
387 bp->rx_desc_ring[i],
388 bp->rx_desc_mapping[i]);
389 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700390 }
Michael Chan13daffa2006-03-20 17:49:20 -0800391 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400392 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700393}
394
395static int
396bnx2_alloc_mem(struct bnx2 *bp)
397{
Michael Chan13daffa2006-03-20 17:49:20 -0800398 int i;
399
Michael Chanb6016b72005-05-26 13:03:09 -0700400 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
401 GFP_KERNEL);
402 if (bp->tx_buf_ring == NULL)
403 return -ENOMEM;
404
405 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
406 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
407 sizeof(struct tx_bd) *
408 TX_DESC_CNT,
409 &bp->tx_desc_mapping);
410 if (bp->tx_desc_ring == NULL)
411 goto alloc_mem_err;
412
Michael Chan13daffa2006-03-20 17:49:20 -0800413 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
414 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700415 if (bp->rx_buf_ring == NULL)
416 goto alloc_mem_err;
417
Michael Chan13daffa2006-03-20 17:49:20 -0800418 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
419 bp->rx_max_ring);
420
421 for (i = 0; i < bp->rx_max_ring; i++) {
422 bp->rx_desc_ring[i] =
423 pci_alloc_consistent(bp->pdev,
424 sizeof(struct rx_bd) * RX_DESC_CNT,
425 &bp->rx_desc_mapping[i]);
426 if (bp->rx_desc_ring[i] == NULL)
427 goto alloc_mem_err;
428
429 }
Michael Chanb6016b72005-05-26 13:03:09 -0700430
431 bp->status_blk = pci_alloc_consistent(bp->pdev,
432 sizeof(struct status_block),
433 &bp->status_blk_mapping);
434 if (bp->status_blk == NULL)
435 goto alloc_mem_err;
436
437 memset(bp->status_blk, 0, sizeof(struct status_block));
438
439 bp->stats_blk = pci_alloc_consistent(bp->pdev,
440 sizeof(struct statistics_block),
441 &bp->stats_blk_mapping);
442 if (bp->stats_blk == NULL)
443 goto alloc_mem_err;
444
445 memset(bp->stats_blk, 0, sizeof(struct statistics_block));
446
447 return 0;
448
449alloc_mem_err:
450 bnx2_free_mem(bp);
451 return -ENOMEM;
452}
453
454static void
Michael Chane3648b32005-11-04 08:51:21 -0800455bnx2_report_fw_link(struct bnx2 *bp)
456{
457 u32 fw_link_status = 0;
458
459 if (bp->link_up) {
460 u32 bmsr;
461
462 switch (bp->line_speed) {
463 case SPEED_10:
464 if (bp->duplex == DUPLEX_HALF)
465 fw_link_status = BNX2_LINK_STATUS_10HALF;
466 else
467 fw_link_status = BNX2_LINK_STATUS_10FULL;
468 break;
469 case SPEED_100:
470 if (bp->duplex == DUPLEX_HALF)
471 fw_link_status = BNX2_LINK_STATUS_100HALF;
472 else
473 fw_link_status = BNX2_LINK_STATUS_100FULL;
474 break;
475 case SPEED_1000:
476 if (bp->duplex == DUPLEX_HALF)
477 fw_link_status = BNX2_LINK_STATUS_1000HALF;
478 else
479 fw_link_status = BNX2_LINK_STATUS_1000FULL;
480 break;
481 case SPEED_2500:
482 if (bp->duplex == DUPLEX_HALF)
483 fw_link_status = BNX2_LINK_STATUS_2500HALF;
484 else
485 fw_link_status = BNX2_LINK_STATUS_2500FULL;
486 break;
487 }
488
489 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
490
491 if (bp->autoneg) {
492 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
493
494 bnx2_read_phy(bp, MII_BMSR, &bmsr);
495 bnx2_read_phy(bp, MII_BMSR, &bmsr);
496
497 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
498 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
499 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
500 else
501 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
502 }
503 }
504 else
505 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
506
507 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
508}
509
510static void
Michael Chanb6016b72005-05-26 13:03:09 -0700511bnx2_report_link(struct bnx2 *bp)
512{
513 if (bp->link_up) {
514 netif_carrier_on(bp->dev);
515 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
516
517 printk("%d Mbps ", bp->line_speed);
518
519 if (bp->duplex == DUPLEX_FULL)
520 printk("full duplex");
521 else
522 printk("half duplex");
523
524 if (bp->flow_ctrl) {
525 if (bp->flow_ctrl & FLOW_CTRL_RX) {
526 printk(", receive ");
527 if (bp->flow_ctrl & FLOW_CTRL_TX)
528 printk("& transmit ");
529 }
530 else {
531 printk(", transmit ");
532 }
533 printk("flow control ON");
534 }
535 printk("\n");
536 }
537 else {
538 netif_carrier_off(bp->dev);
539 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
540 }
Michael Chane3648b32005-11-04 08:51:21 -0800541
542 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700543}
544
545static void
546bnx2_resolve_flow_ctrl(struct bnx2 *bp)
547{
548 u32 local_adv, remote_adv;
549
550 bp->flow_ctrl = 0;
551 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
552 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
553
554 if (bp->duplex == DUPLEX_FULL) {
555 bp->flow_ctrl = bp->req_flow_ctrl;
556 }
557 return;
558 }
559
560 if (bp->duplex != DUPLEX_FULL) {
561 return;
562 }
563
Michael Chan5b0c76a2005-11-04 08:45:49 -0800564 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
565 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
566 u32 val;
567
568 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
569 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
570 bp->flow_ctrl |= FLOW_CTRL_TX;
571 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
572 bp->flow_ctrl |= FLOW_CTRL_RX;
573 return;
574 }
575
Michael Chanb6016b72005-05-26 13:03:09 -0700576 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
577 bnx2_read_phy(bp, MII_LPA, &remote_adv);
578
579 if (bp->phy_flags & PHY_SERDES_FLAG) {
580 u32 new_local_adv = 0;
581 u32 new_remote_adv = 0;
582
583 if (local_adv & ADVERTISE_1000XPAUSE)
584 new_local_adv |= ADVERTISE_PAUSE_CAP;
585 if (local_adv & ADVERTISE_1000XPSE_ASYM)
586 new_local_adv |= ADVERTISE_PAUSE_ASYM;
587 if (remote_adv & ADVERTISE_1000XPAUSE)
588 new_remote_adv |= ADVERTISE_PAUSE_CAP;
589 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
590 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
591
592 local_adv = new_local_adv;
593 remote_adv = new_remote_adv;
594 }
595
596 /* See Table 28B-3 of 802.3ab-1999 spec. */
597 if (local_adv & ADVERTISE_PAUSE_CAP) {
598 if(local_adv & ADVERTISE_PAUSE_ASYM) {
599 if (remote_adv & ADVERTISE_PAUSE_CAP) {
600 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
601 }
602 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
603 bp->flow_ctrl = FLOW_CTRL_RX;
604 }
605 }
606 else {
607 if (remote_adv & ADVERTISE_PAUSE_CAP) {
608 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
609 }
610 }
611 }
612 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
613 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
614 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
615
616 bp->flow_ctrl = FLOW_CTRL_TX;
617 }
618 }
619}
620
621static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800622bnx2_5708s_linkup(struct bnx2 *bp)
623{
624 u32 val;
625
626 bp->link_up = 1;
627 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
628 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
629 case BCM5708S_1000X_STAT1_SPEED_10:
630 bp->line_speed = SPEED_10;
631 break;
632 case BCM5708S_1000X_STAT1_SPEED_100:
633 bp->line_speed = SPEED_100;
634 break;
635 case BCM5708S_1000X_STAT1_SPEED_1G:
636 bp->line_speed = SPEED_1000;
637 break;
638 case BCM5708S_1000X_STAT1_SPEED_2G5:
639 bp->line_speed = SPEED_2500;
640 break;
641 }
642 if (val & BCM5708S_1000X_STAT1_FD)
643 bp->duplex = DUPLEX_FULL;
644 else
645 bp->duplex = DUPLEX_HALF;
646
647 return 0;
648}
649
650static int
651bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700652{
653 u32 bmcr, local_adv, remote_adv, common;
654
655 bp->link_up = 1;
656 bp->line_speed = SPEED_1000;
657
658 bnx2_read_phy(bp, MII_BMCR, &bmcr);
659 if (bmcr & BMCR_FULLDPLX) {
660 bp->duplex = DUPLEX_FULL;
661 }
662 else {
663 bp->duplex = DUPLEX_HALF;
664 }
665
666 if (!(bmcr & BMCR_ANENABLE)) {
667 return 0;
668 }
669
670 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
671 bnx2_read_phy(bp, MII_LPA, &remote_adv);
672
673 common = local_adv & remote_adv;
674 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
675
676 if (common & ADVERTISE_1000XFULL) {
677 bp->duplex = DUPLEX_FULL;
678 }
679 else {
680 bp->duplex = DUPLEX_HALF;
681 }
682 }
683
684 return 0;
685}
686
687static int
688bnx2_copper_linkup(struct bnx2 *bp)
689{
690 u32 bmcr;
691
692 bnx2_read_phy(bp, MII_BMCR, &bmcr);
693 if (bmcr & BMCR_ANENABLE) {
694 u32 local_adv, remote_adv, common;
695
696 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
697 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
698
699 common = local_adv & (remote_adv >> 2);
700 if (common & ADVERTISE_1000FULL) {
701 bp->line_speed = SPEED_1000;
702 bp->duplex = DUPLEX_FULL;
703 }
704 else if (common & ADVERTISE_1000HALF) {
705 bp->line_speed = SPEED_1000;
706 bp->duplex = DUPLEX_HALF;
707 }
708 else {
709 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
710 bnx2_read_phy(bp, MII_LPA, &remote_adv);
711
712 common = local_adv & remote_adv;
713 if (common & ADVERTISE_100FULL) {
714 bp->line_speed = SPEED_100;
715 bp->duplex = DUPLEX_FULL;
716 }
717 else if (common & ADVERTISE_100HALF) {
718 bp->line_speed = SPEED_100;
719 bp->duplex = DUPLEX_HALF;
720 }
721 else if (common & ADVERTISE_10FULL) {
722 bp->line_speed = SPEED_10;
723 bp->duplex = DUPLEX_FULL;
724 }
725 else if (common & ADVERTISE_10HALF) {
726 bp->line_speed = SPEED_10;
727 bp->duplex = DUPLEX_HALF;
728 }
729 else {
730 bp->line_speed = 0;
731 bp->link_up = 0;
732 }
733 }
734 }
735 else {
736 if (bmcr & BMCR_SPEED100) {
737 bp->line_speed = SPEED_100;
738 }
739 else {
740 bp->line_speed = SPEED_10;
741 }
742 if (bmcr & BMCR_FULLDPLX) {
743 bp->duplex = DUPLEX_FULL;
744 }
745 else {
746 bp->duplex = DUPLEX_HALF;
747 }
748 }
749
750 return 0;
751}
752
753static int
754bnx2_set_mac_link(struct bnx2 *bp)
755{
756 u32 val;
757
758 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
759 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
760 (bp->duplex == DUPLEX_HALF)) {
761 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
762 }
763
764 /* Configure the EMAC mode register. */
765 val = REG_RD(bp, BNX2_EMAC_MODE);
766
767 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800768 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
769 BNX2_EMAC_MODE_25G);
Michael Chanb6016b72005-05-26 13:03:09 -0700770
771 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800772 switch (bp->line_speed) {
773 case SPEED_10:
774 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
775 val |= BNX2_EMAC_MODE_PORT_MII_10;
776 break;
777 }
778 /* fall through */
779 case SPEED_100:
780 val |= BNX2_EMAC_MODE_PORT_MII;
781 break;
782 case SPEED_2500:
783 val |= BNX2_EMAC_MODE_25G;
784 /* fall through */
785 case SPEED_1000:
786 val |= BNX2_EMAC_MODE_PORT_GMII;
787 break;
788 }
Michael Chanb6016b72005-05-26 13:03:09 -0700789 }
790 else {
791 val |= BNX2_EMAC_MODE_PORT_GMII;
792 }
793
794 /* Set the MAC to operate in the appropriate duplex mode. */
795 if (bp->duplex == DUPLEX_HALF)
796 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
797 REG_WR(bp, BNX2_EMAC_MODE, val);
798
799 /* Enable/disable rx PAUSE. */
800 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
801
802 if (bp->flow_ctrl & FLOW_CTRL_RX)
803 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
804 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
805
806 /* Enable/disable tx PAUSE. */
807 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
808 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
809
810 if (bp->flow_ctrl & FLOW_CTRL_TX)
811 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
812 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
813
814 /* Acknowledge the interrupt. */
815 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
816
817 return 0;
818}
819
820static int
821bnx2_set_link(struct bnx2 *bp)
822{
823 u32 bmsr;
824 u8 link_up;
825
826 if (bp->loopback == MAC_LOOPBACK) {
827 bp->link_up = 1;
828 return 0;
829 }
830
831 link_up = bp->link_up;
832
833 bnx2_read_phy(bp, MII_BMSR, &bmsr);
834 bnx2_read_phy(bp, MII_BMSR, &bmsr);
835
836 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
837 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
838 u32 val;
839
840 val = REG_RD(bp, BNX2_EMAC_STATUS);
841 if (val & BNX2_EMAC_STATUS_LINK)
842 bmsr |= BMSR_LSTATUS;
843 else
844 bmsr &= ~BMSR_LSTATUS;
845 }
846
847 if (bmsr & BMSR_LSTATUS) {
848 bp->link_up = 1;
849
850 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800851 if (CHIP_NUM(bp) == CHIP_NUM_5706)
852 bnx2_5706s_linkup(bp);
853 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
854 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700855 }
856 else {
857 bnx2_copper_linkup(bp);
858 }
859 bnx2_resolve_flow_ctrl(bp);
860 }
861 else {
862 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
863 (bp->autoneg & AUTONEG_SPEED)) {
864
865 u32 bmcr;
866
867 bnx2_read_phy(bp, MII_BMCR, &bmcr);
868 if (!(bmcr & BMCR_ANENABLE)) {
869 bnx2_write_phy(bp, MII_BMCR, bmcr |
870 BMCR_ANENABLE);
871 }
872 }
873 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
874 bp->link_up = 0;
875 }
876
877 if (bp->link_up != link_up) {
878 bnx2_report_link(bp);
879 }
880
881 bnx2_set_mac_link(bp);
882
883 return 0;
884}
885
886static int
887bnx2_reset_phy(struct bnx2 *bp)
888{
889 int i;
890 u32 reg;
891
892 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
893
894#define PHY_RESET_MAX_WAIT 100
895 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
896 udelay(10);
897
898 bnx2_read_phy(bp, MII_BMCR, &reg);
899 if (!(reg & BMCR_RESET)) {
900 udelay(20);
901 break;
902 }
903 }
904 if (i == PHY_RESET_MAX_WAIT) {
905 return -EBUSY;
906 }
907 return 0;
908}
909
910static u32
911bnx2_phy_get_pause_adv(struct bnx2 *bp)
912{
913 u32 adv = 0;
914
915 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
916 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
917
918 if (bp->phy_flags & PHY_SERDES_FLAG) {
919 adv = ADVERTISE_1000XPAUSE;
920 }
921 else {
922 adv = ADVERTISE_PAUSE_CAP;
923 }
924 }
925 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
926 if (bp->phy_flags & PHY_SERDES_FLAG) {
927 adv = ADVERTISE_1000XPSE_ASYM;
928 }
929 else {
930 adv = ADVERTISE_PAUSE_ASYM;
931 }
932 }
933 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
935 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
936 }
937 else {
938 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
939 }
940 }
941 return adv;
942}
943
944static int
945bnx2_setup_serdes_phy(struct bnx2 *bp)
946{
Michael Chan5b0c76a2005-11-04 08:45:49 -0800947 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -0700948 u32 new_adv = 0;
949
950 if (!(bp->autoneg & AUTONEG_SPEED)) {
951 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800952 int force_link_down = 0;
953
954 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
955 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
956 if (up1 & BCM5708S_UP1_2G5) {
957 up1 &= ~BCM5708S_UP1_2G5;
958 bnx2_write_phy(bp, BCM5708S_UP1, up1);
959 force_link_down = 1;
960 }
961 }
962
963 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
964 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
Michael Chanb6016b72005-05-26 13:03:09 -0700965
966 bnx2_read_phy(bp, MII_BMCR, &bmcr);
967 new_bmcr = bmcr & ~BMCR_ANENABLE;
968 new_bmcr |= BMCR_SPEED1000;
969 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800970 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700971 new_bmcr |= BMCR_FULLDPLX;
972 }
973 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800974 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -0700975 new_bmcr &= ~BMCR_FULLDPLX;
976 }
Michael Chan5b0c76a2005-11-04 08:45:49 -0800977 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -0700978 /* Force a link down visible on the other side */
979 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800980 bnx2_write_phy(bp, MII_ADVERTISE, adv &
981 ~(ADVERTISE_1000XFULL |
982 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -0700983 bnx2_write_phy(bp, MII_BMCR, bmcr |
984 BMCR_ANRESTART | BMCR_ANENABLE);
985
986 bp->link_up = 0;
987 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -0800988 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700989 }
Michael Chan5b0c76a2005-11-04 08:45:49 -0800990 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700991 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
992 }
993 return 0;
994 }
995
Michael Chan5b0c76a2005-11-04 08:45:49 -0800996 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
997 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
998 up1 |= BCM5708S_UP1_2G5;
999 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1000 }
1001
Michael Chanb6016b72005-05-26 13:03:09 -07001002 if (bp->advertising & ADVERTISED_1000baseT_Full)
1003 new_adv |= ADVERTISE_1000XFULL;
1004
1005 new_adv |= bnx2_phy_get_pause_adv(bp);
1006
1007 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1008 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1009
1010 bp->serdes_an_pending = 0;
1011 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1012 /* Force a link down visible on the other side */
1013 if (bp->link_up) {
1014 int i;
1015
1016 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1017 for (i = 0; i < 110; i++) {
1018 udelay(100);
1019 }
1020 }
1021
1022 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1023 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1024 BMCR_ANENABLE);
Michael Chancd339a02005-08-25 15:35:24 -07001025 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1026 /* Speed up link-up time when the link partner
1027 * does not autonegotiate which is very common
1028 * in blade servers. Some blade servers use
1029 * IPMI for kerboard input and it's important
1030 * to minimize link disruptions. Autoneg. involves
1031 * exchanging base pages plus 3 next pages and
1032 * normally completes in about 120 msec.
1033 */
1034 bp->current_interval = SERDES_AN_TIMEOUT;
1035 bp->serdes_an_pending = 1;
1036 mod_timer(&bp->timer, jiffies + bp->current_interval);
1037 }
Michael Chanb6016b72005-05-26 13:03:09 -07001038 }
1039
1040 return 0;
1041}
1042
1043#define ETHTOOL_ALL_FIBRE_SPEED \
1044 (ADVERTISED_1000baseT_Full)
1045
1046#define ETHTOOL_ALL_COPPER_SPEED \
1047 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1048 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1049 ADVERTISED_1000baseT_Full)
1050
1051#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1052 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1053
1054#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1055
1056static int
1057bnx2_setup_copper_phy(struct bnx2 *bp)
1058{
1059 u32 bmcr;
1060 u32 new_bmcr;
1061
1062 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1063
1064 if (bp->autoneg & AUTONEG_SPEED) {
1065 u32 adv_reg, adv1000_reg;
1066 u32 new_adv_reg = 0;
1067 u32 new_adv1000_reg = 0;
1068
1069 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1070 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1071 ADVERTISE_PAUSE_ASYM);
1072
1073 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1074 adv1000_reg &= PHY_ALL_1000_SPEED;
1075
1076 if (bp->advertising & ADVERTISED_10baseT_Half)
1077 new_adv_reg |= ADVERTISE_10HALF;
1078 if (bp->advertising & ADVERTISED_10baseT_Full)
1079 new_adv_reg |= ADVERTISE_10FULL;
1080 if (bp->advertising & ADVERTISED_100baseT_Half)
1081 new_adv_reg |= ADVERTISE_100HALF;
1082 if (bp->advertising & ADVERTISED_100baseT_Full)
1083 new_adv_reg |= ADVERTISE_100FULL;
1084 if (bp->advertising & ADVERTISED_1000baseT_Full)
1085 new_adv1000_reg |= ADVERTISE_1000FULL;
1086
1087 new_adv_reg |= ADVERTISE_CSMA;
1088
1089 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1090
1091 if ((adv1000_reg != new_adv1000_reg) ||
1092 (adv_reg != new_adv_reg) ||
1093 ((bmcr & BMCR_ANENABLE) == 0)) {
1094
1095 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1096 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1097 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1098 BMCR_ANENABLE);
1099 }
1100 else if (bp->link_up) {
1101 /* Flow ctrl may have changed from auto to forced */
1102 /* or vice-versa. */
1103
1104 bnx2_resolve_flow_ctrl(bp);
1105 bnx2_set_mac_link(bp);
1106 }
1107 return 0;
1108 }
1109
1110 new_bmcr = 0;
1111 if (bp->req_line_speed == SPEED_100) {
1112 new_bmcr |= BMCR_SPEED100;
1113 }
1114 if (bp->req_duplex == DUPLEX_FULL) {
1115 new_bmcr |= BMCR_FULLDPLX;
1116 }
1117 if (new_bmcr != bmcr) {
1118 u32 bmsr;
1119 int i = 0;
1120
1121 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1122 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1123
1124 if (bmsr & BMSR_LSTATUS) {
1125 /* Force link down */
1126 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1127 do {
1128 udelay(100);
1129 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1130 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1131 i++;
1132 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1133 }
1134
1135 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1136
1137 /* Normally, the new speed is setup after the link has
1138 * gone down and up again. In some cases, link will not go
1139 * down so we need to set up the new speed here.
1140 */
1141 if (bmsr & BMSR_LSTATUS) {
1142 bp->line_speed = bp->req_line_speed;
1143 bp->duplex = bp->req_duplex;
1144 bnx2_resolve_flow_ctrl(bp);
1145 bnx2_set_mac_link(bp);
1146 }
1147 }
1148 return 0;
1149}
1150
1151static int
1152bnx2_setup_phy(struct bnx2 *bp)
1153{
1154 if (bp->loopback == MAC_LOOPBACK)
1155 return 0;
1156
1157 if (bp->phy_flags & PHY_SERDES_FLAG) {
1158 return (bnx2_setup_serdes_phy(bp));
1159 }
1160 else {
1161 return (bnx2_setup_copper_phy(bp));
1162 }
1163}
1164
1165static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001166bnx2_init_5708s_phy(struct bnx2 *bp)
1167{
1168 u32 val;
1169
1170 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1171 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1172 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1173
1174 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1175 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1176 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1177
1178 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1179 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1180 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1181
1182 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1183 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1184 val |= BCM5708S_UP1_2G5;
1185 bnx2_write_phy(bp, BCM5708S_UP1, val);
1186 }
1187
1188 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001189 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1190 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001191 /* increase tx signal amplitude */
1192 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1193 BCM5708S_BLK_ADDR_TX_MISC);
1194 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1195 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1196 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1197 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1198 }
1199
Michael Chane3648b32005-11-04 08:51:21 -08001200 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001201 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1202
1203 if (val) {
1204 u32 is_backplane;
1205
Michael Chane3648b32005-11-04 08:51:21 -08001206 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001207 BNX2_SHARED_HW_CFG_CONFIG);
1208 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1210 BCM5708S_BLK_ADDR_TX_MISC);
1211 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1213 BCM5708S_BLK_ADDR_DIG);
1214 }
1215 }
1216 return 0;
1217}
1218
1219static int
1220bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001221{
1222 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1223
1224 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1225 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1226 }
1227
1228 if (bp->dev->mtu > 1500) {
1229 u32 val;
1230
1231 /* Set extended packet length bit */
1232 bnx2_write_phy(bp, 0x18, 0x7);
1233 bnx2_read_phy(bp, 0x18, &val);
1234 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1235
1236 bnx2_write_phy(bp, 0x1c, 0x6c00);
1237 bnx2_read_phy(bp, 0x1c, &val);
1238 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1239 }
1240 else {
1241 u32 val;
1242
1243 bnx2_write_phy(bp, 0x18, 0x7);
1244 bnx2_read_phy(bp, 0x18, &val);
1245 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1246
1247 bnx2_write_phy(bp, 0x1c, 0x6c00);
1248 bnx2_read_phy(bp, 0x1c, &val);
1249 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1250 }
1251
1252 return 0;
1253}
1254
1255static int
1256bnx2_init_copper_phy(struct bnx2 *bp)
1257{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001258 u32 val;
1259
Michael Chanb6016b72005-05-26 13:03:09 -07001260 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1261
1262 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1263 bnx2_write_phy(bp, 0x18, 0x0c00);
1264 bnx2_write_phy(bp, 0x17, 0x000a);
1265 bnx2_write_phy(bp, 0x15, 0x310b);
1266 bnx2_write_phy(bp, 0x17, 0x201f);
1267 bnx2_write_phy(bp, 0x15, 0x9506);
1268 bnx2_write_phy(bp, 0x17, 0x401f);
1269 bnx2_write_phy(bp, 0x15, 0x14e2);
1270 bnx2_write_phy(bp, 0x18, 0x0400);
1271 }
1272
1273 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001274 /* Set extended packet length bit */
1275 bnx2_write_phy(bp, 0x18, 0x7);
1276 bnx2_read_phy(bp, 0x18, &val);
1277 bnx2_write_phy(bp, 0x18, val | 0x4000);
1278
1279 bnx2_read_phy(bp, 0x10, &val);
1280 bnx2_write_phy(bp, 0x10, val | 0x1);
1281 }
1282 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001283 bnx2_write_phy(bp, 0x18, 0x7);
1284 bnx2_read_phy(bp, 0x18, &val);
1285 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1286
1287 bnx2_read_phy(bp, 0x10, &val);
1288 bnx2_write_phy(bp, 0x10, val & ~0x1);
1289 }
1290
Michael Chan5b0c76a2005-11-04 08:45:49 -08001291 /* ethernet@wirespeed */
1292 bnx2_write_phy(bp, 0x18, 0x7007);
1293 bnx2_read_phy(bp, 0x18, &val);
1294 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001295 return 0;
1296}
1297
1298
1299static int
1300bnx2_init_phy(struct bnx2 *bp)
1301{
1302 u32 val;
1303 int rc = 0;
1304
1305 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1306 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1307
1308 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1309
1310 bnx2_reset_phy(bp);
1311
1312 bnx2_read_phy(bp, MII_PHYSID1, &val);
1313 bp->phy_id = val << 16;
1314 bnx2_read_phy(bp, MII_PHYSID2, &val);
1315 bp->phy_id |= val & 0xffff;
1316
1317 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001318 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1319 rc = bnx2_init_5706s_phy(bp);
1320 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1321 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001322 }
1323 else {
1324 rc = bnx2_init_copper_phy(bp);
1325 }
1326
1327 bnx2_setup_phy(bp);
1328
1329 return rc;
1330}
1331
1332static int
1333bnx2_set_mac_loopback(struct bnx2 *bp)
1334{
1335 u32 mac_mode;
1336
1337 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1338 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1339 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1340 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1341 bp->link_up = 1;
1342 return 0;
1343}
1344
Michael Chanbc5a0692006-01-23 16:13:22 -08001345static int bnx2_test_link(struct bnx2 *);
1346
1347static int
1348bnx2_set_phy_loopback(struct bnx2 *bp)
1349{
1350 u32 mac_mode;
1351 int rc, i;
1352
1353 spin_lock_bh(&bp->phy_lock);
1354 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1355 BMCR_SPEED1000);
1356 spin_unlock_bh(&bp->phy_lock);
1357 if (rc)
1358 return rc;
1359
1360 for (i = 0; i < 10; i++) {
1361 if (bnx2_test_link(bp) == 0)
1362 break;
1363 udelay(10);
1364 }
1365
1366 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1367 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1368 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1369 BNX2_EMAC_MODE_25G);
1370
1371 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1372 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1373 bp->link_up = 1;
1374 return 0;
1375}
1376
Michael Chanb6016b72005-05-26 13:03:09 -07001377static int
Michael Chanb090ae22006-01-23 16:07:10 -08001378bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001379{
1380 int i;
1381 u32 val;
1382
Michael Chanb6016b72005-05-26 13:03:09 -07001383 bp->fw_wr_seq++;
1384 msg_data |= bp->fw_wr_seq;
1385
Michael Chane3648b32005-11-04 08:51:21 -08001386 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001387
1388 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001389 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1390 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001391
Michael Chane3648b32005-11-04 08:51:21 -08001392 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001393
1394 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1395 break;
1396 }
Michael Chanb090ae22006-01-23 16:07:10 -08001397 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1398 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001399
1400 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001401 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1402 if (!silent)
1403 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1404 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001405
1406 msg_data &= ~BNX2_DRV_MSG_CODE;
1407 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1408
Michael Chane3648b32005-11-04 08:51:21 -08001409 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001410
Michael Chanb6016b72005-05-26 13:03:09 -07001411 return -EBUSY;
1412 }
1413
Michael Chanb090ae22006-01-23 16:07:10 -08001414 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1415 return -EIO;
1416
Michael Chanb6016b72005-05-26 13:03:09 -07001417 return 0;
1418}
1419
1420static void
1421bnx2_init_context(struct bnx2 *bp)
1422{
1423 u32 vcid;
1424
1425 vcid = 96;
1426 while (vcid) {
1427 u32 vcid_addr, pcid_addr, offset;
1428
1429 vcid--;
1430
1431 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1432 u32 new_vcid;
1433
1434 vcid_addr = GET_PCID_ADDR(vcid);
1435 if (vcid & 0x8) {
1436 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1437 }
1438 else {
1439 new_vcid = vcid;
1440 }
1441 pcid_addr = GET_PCID_ADDR(new_vcid);
1442 }
1443 else {
1444 vcid_addr = GET_CID_ADDR(vcid);
1445 pcid_addr = vcid_addr;
1446 }
1447
1448 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1449 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1450
1451 /* Zero out the context. */
1452 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1453 CTX_WR(bp, 0x00, offset, 0);
1454 }
1455
1456 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1457 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1458 }
1459}
1460
1461static int
1462bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1463{
1464 u16 *good_mbuf;
1465 u32 good_mbuf_cnt;
1466 u32 val;
1467
1468 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1469 if (good_mbuf == NULL) {
1470 printk(KERN_ERR PFX "Failed to allocate memory in "
1471 "bnx2_alloc_bad_rbuf\n");
1472 return -ENOMEM;
1473 }
1474
1475 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1476 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1477
1478 good_mbuf_cnt = 0;
1479
1480 /* Allocate a bunch of mbufs and save the good ones in an array. */
1481 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1482 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1483 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1484
1485 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1486
1487 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1488
1489 /* The addresses with Bit 9 set are bad memory blocks. */
1490 if (!(val & (1 << 9))) {
1491 good_mbuf[good_mbuf_cnt] = (u16) val;
1492 good_mbuf_cnt++;
1493 }
1494
1495 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1496 }
1497
1498 /* Free the good ones back to the mbuf pool thus discarding
1499 * all the bad ones. */
1500 while (good_mbuf_cnt) {
1501 good_mbuf_cnt--;
1502
1503 val = good_mbuf[good_mbuf_cnt];
1504 val = (val << 9) | val | 1;
1505
1506 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1507 }
1508 kfree(good_mbuf);
1509 return 0;
1510}
1511
1512static void
1513bnx2_set_mac_addr(struct bnx2 *bp)
1514{
1515 u32 val;
1516 u8 *mac_addr = bp->dev->dev_addr;
1517
1518 val = (mac_addr[0] << 8) | mac_addr[1];
1519
1520 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1521
1522 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1523 (mac_addr[4] << 8) | mac_addr[5];
1524
1525 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1526}
1527
1528static inline int
1529bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1530{
1531 struct sk_buff *skb;
1532 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1533 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001534 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001535 unsigned long align;
1536
1537 skb = dev_alloc_skb(bp->rx_buf_size);
1538 if (skb == NULL) {
1539 return -ENOMEM;
1540 }
1541
1542 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1543 skb_reserve(skb, 8 - align);
1544 }
1545
1546 skb->dev = bp->dev;
1547 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1548 PCI_DMA_FROMDEVICE);
1549
1550 rx_buf->skb = skb;
1551 pci_unmap_addr_set(rx_buf, mapping, mapping);
1552
1553 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1554 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1555
1556 bp->rx_prod_bseq += bp->rx_buf_use_size;
1557
1558 return 0;
1559}
1560
1561static void
1562bnx2_phy_int(struct bnx2 *bp)
1563{
1564 u32 new_link_state, old_link_state;
1565
1566 new_link_state = bp->status_blk->status_attn_bits &
1567 STATUS_ATTN_BITS_LINK_STATE;
1568 old_link_state = bp->status_blk->status_attn_bits_ack &
1569 STATUS_ATTN_BITS_LINK_STATE;
1570 if (new_link_state != old_link_state) {
1571 if (new_link_state) {
1572 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1573 STATUS_ATTN_BITS_LINK_STATE);
1574 }
1575 else {
1576 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1577 STATUS_ATTN_BITS_LINK_STATE);
1578 }
1579 bnx2_set_link(bp);
1580 }
1581}
1582
1583static void
1584bnx2_tx_int(struct bnx2 *bp)
1585{
Michael Chanf4e418f2005-11-04 08:53:48 -08001586 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001587 u16 hw_cons, sw_cons, sw_ring_cons;
1588 int tx_free_bd = 0;
1589
Michael Chanf4e418f2005-11-04 08:53:48 -08001590 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001591 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1592 hw_cons++;
1593 }
1594 sw_cons = bp->tx_cons;
1595
1596 while (sw_cons != hw_cons) {
1597 struct sw_bd *tx_buf;
1598 struct sk_buff *skb;
1599 int i, last;
1600
1601 sw_ring_cons = TX_RING_IDX(sw_cons);
1602
1603 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1604 skb = tx_buf->skb;
1605#ifdef BCM_TSO
1606 /* partial BD completions possible with TSO packets */
1607 if (skb_shinfo(skb)->tso_size) {
1608 u16 last_idx, last_ring_idx;
1609
1610 last_idx = sw_cons +
1611 skb_shinfo(skb)->nr_frags + 1;
1612 last_ring_idx = sw_ring_cons +
1613 skb_shinfo(skb)->nr_frags + 1;
1614 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1615 last_idx++;
1616 }
1617 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1618 break;
1619 }
1620 }
1621#endif
1622 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1623 skb_headlen(skb), PCI_DMA_TODEVICE);
1624
1625 tx_buf->skb = NULL;
1626 last = skb_shinfo(skb)->nr_frags;
1627
1628 for (i = 0; i < last; i++) {
1629 sw_cons = NEXT_TX_BD(sw_cons);
1630
1631 pci_unmap_page(bp->pdev,
1632 pci_unmap_addr(
1633 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1634 mapping),
1635 skb_shinfo(skb)->frags[i].size,
1636 PCI_DMA_TODEVICE);
1637 }
1638
1639 sw_cons = NEXT_TX_BD(sw_cons);
1640
1641 tx_free_bd += last + 1;
1642
1643 dev_kfree_skb_irq(skb);
1644
Michael Chanf4e418f2005-11-04 08:53:48 -08001645 hw_cons = bp->hw_tx_cons =
1646 sblk->status_tx_quick_consumer_index0;
1647
Michael Chanb6016b72005-05-26 13:03:09 -07001648 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1649 hw_cons++;
1650 }
1651 }
1652
Michael Chane89bbf12005-08-25 15:36:58 -07001653 bp->tx_cons = sw_cons;
Michael Chanb6016b72005-05-26 13:03:09 -07001654
1655 if (unlikely(netif_queue_stopped(bp->dev))) {
Michael Chanc770a652005-08-25 15:38:39 -07001656 spin_lock(&bp->tx_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001657 if ((netif_queue_stopped(bp->dev)) &&
Michael Chane89bbf12005-08-25 15:36:58 -07001658 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001659
1660 netif_wake_queue(bp->dev);
1661 }
Michael Chanc770a652005-08-25 15:38:39 -07001662 spin_unlock(&bp->tx_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001663 }
Michael Chanb6016b72005-05-26 13:03:09 -07001664}
1665
1666static inline void
1667bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1668 u16 cons, u16 prod)
1669{
Michael Chan236b6392006-03-20 17:49:02 -08001670 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1671 struct rx_bd *cons_bd, *prod_bd;
1672
1673 cons_rx_buf = &bp->rx_buf_ring[cons];
1674 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001675
1676 pci_dma_sync_single_for_device(bp->pdev,
1677 pci_unmap_addr(cons_rx_buf, mapping),
1678 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1679
Michael Chan236b6392006-03-20 17:49:02 -08001680 bp->rx_prod_bseq += bp->rx_buf_use_size;
1681
1682 prod_rx_buf->skb = skb;
1683
1684 if (cons == prod)
1685 return;
1686
Michael Chanb6016b72005-05-26 13:03:09 -07001687 pci_unmap_addr_set(prod_rx_buf, mapping,
1688 pci_unmap_addr(cons_rx_buf, mapping));
1689
Michael Chan3fdfcc22006-03-20 17:49:49 -08001690 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1691 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001692 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1693 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001694}
1695
1696static int
1697bnx2_rx_int(struct bnx2 *bp, int budget)
1698{
Michael Chanf4e418f2005-11-04 08:53:48 -08001699 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001700 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1701 struct l2_fhdr *rx_hdr;
1702 int rx_pkt = 0;
1703
Michael Chanf4e418f2005-11-04 08:53:48 -08001704 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001705 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1706 hw_cons++;
1707 }
1708 sw_cons = bp->rx_cons;
1709 sw_prod = bp->rx_prod;
1710
1711 /* Memory barrier necessary as speculative reads of the rx
1712 * buffer can be ahead of the index in the status block
1713 */
1714 rmb();
1715 while (sw_cons != hw_cons) {
1716 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001717 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001718 struct sw_bd *rx_buf;
1719 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001720 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001721
1722 sw_ring_cons = RX_RING_IDX(sw_cons);
1723 sw_ring_prod = RX_RING_IDX(sw_prod);
1724
1725 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1726 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001727
1728 rx_buf->skb = NULL;
1729
1730 dma_addr = pci_unmap_addr(rx_buf, mapping);
1731
1732 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001733 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1734
1735 rx_hdr = (struct l2_fhdr *) skb->data;
1736 len = rx_hdr->l2_fhdr_pkt_len - 4;
1737
Michael Chanade2bfe2006-01-23 16:09:51 -08001738 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001739 (L2_FHDR_ERRORS_BAD_CRC |
1740 L2_FHDR_ERRORS_PHY_DECODE |
1741 L2_FHDR_ERRORS_ALIGNMENT |
1742 L2_FHDR_ERRORS_TOO_SHORT |
1743 L2_FHDR_ERRORS_GIANT_FRAME)) {
1744
1745 goto reuse_rx;
1746 }
1747
1748 /* Since we don't have a jumbo ring, copy small packets
1749 * if mtu > 1500
1750 */
1751 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1752 struct sk_buff *new_skb;
1753
1754 new_skb = dev_alloc_skb(len + 2);
1755 if (new_skb == NULL)
1756 goto reuse_rx;
1757
1758 /* aligned copy */
1759 memcpy(new_skb->data,
1760 skb->data + bp->rx_offset - 2,
1761 len + 2);
1762
1763 skb_reserve(new_skb, 2);
1764 skb_put(new_skb, len);
1765 new_skb->dev = bp->dev;
1766
1767 bnx2_reuse_rx_skb(bp, skb,
1768 sw_ring_cons, sw_ring_prod);
1769
1770 skb = new_skb;
1771 }
1772 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001773 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001774 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1775
1776 skb_reserve(skb, bp->rx_offset);
1777 skb_put(skb, len);
1778 }
1779 else {
1780reuse_rx:
1781 bnx2_reuse_rx_skb(bp, skb,
1782 sw_ring_cons, sw_ring_prod);
1783 goto next_rx;
1784 }
1785
1786 skb->protocol = eth_type_trans(skb, bp->dev);
1787
1788 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1789 (htons(skb->protocol) != 0x8100)) {
1790
1791 dev_kfree_skb_irq(skb);
1792 goto next_rx;
1793
1794 }
1795
Michael Chanb6016b72005-05-26 13:03:09 -07001796 skb->ip_summed = CHECKSUM_NONE;
1797 if (bp->rx_csum &&
1798 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1799 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1800
Michael Chanade2bfe2006-01-23 16:09:51 -08001801 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1802 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001803 skb->ip_summed = CHECKSUM_UNNECESSARY;
1804 }
1805
1806#ifdef BCM_VLAN
1807 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1808 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1809 rx_hdr->l2_fhdr_vlan_tag);
1810 }
1811 else
1812#endif
1813 netif_receive_skb(skb);
1814
1815 bp->dev->last_rx = jiffies;
1816 rx_pkt++;
1817
1818next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001819 sw_cons = NEXT_RX_BD(sw_cons);
1820 sw_prod = NEXT_RX_BD(sw_prod);
1821
1822 if ((rx_pkt == budget))
1823 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001824
1825 /* Refresh hw_cons to see if there is new work */
1826 if (sw_cons == hw_cons) {
1827 hw_cons = bp->hw_rx_cons =
1828 sblk->status_rx_quick_consumer_index0;
1829 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1830 hw_cons++;
1831 rmb();
1832 }
Michael Chanb6016b72005-05-26 13:03:09 -07001833 }
1834 bp->rx_cons = sw_cons;
1835 bp->rx_prod = sw_prod;
1836
1837 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1838
1839 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1840
1841 mmiowb();
1842
1843 return rx_pkt;
1844
1845}
1846
1847/* MSI ISR - The only difference between this and the INTx ISR
1848 * is that the MSI interrupt is always serviced.
1849 */
1850static irqreturn_t
1851bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1852{
1853 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001854 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001855
Michael Chanc921e4c2005-09-08 13:15:32 -07001856 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001857 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1858 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1859 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1860
1861 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001862 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1863 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001864
Michael Chan73eef4c2005-08-25 15:39:15 -07001865 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001866
Michael Chan73eef4c2005-08-25 15:39:15 -07001867 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001868}
1869
1870static irqreturn_t
1871bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1872{
1873 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001874 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001875
1876 /* When using INTx, it is possible for the interrupt to arrive
1877 * at the CPU before the status block posted prior to the
1878 * interrupt. Reading a register will flush the status block.
1879 * When using MSI, the MSI message will always complete after
1880 * the status block write.
1881 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001882 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001883 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1884 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001885 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001886
1887 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1888 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1889 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1890
1891 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001892 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1893 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001894
Michael Chan73eef4c2005-08-25 15:39:15 -07001895 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001896
Michael Chan73eef4c2005-08-25 15:39:15 -07001897 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001898}
1899
Michael Chanf4e418f2005-11-04 08:53:48 -08001900static inline int
1901bnx2_has_work(struct bnx2 *bp)
1902{
1903 struct status_block *sblk = bp->status_blk;
1904
1905 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1906 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1907 return 1;
1908
1909 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1910 bp->link_up)
1911 return 1;
1912
1913 return 0;
1914}
1915
Michael Chanb6016b72005-05-26 13:03:09 -07001916static int
1917bnx2_poll(struct net_device *dev, int *budget)
1918{
Michael Chan972ec0d2006-01-23 16:12:43 -08001919 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001920
Michael Chanb6016b72005-05-26 13:03:09 -07001921 if ((bp->status_blk->status_attn_bits &
1922 STATUS_ATTN_BITS_LINK_STATE) !=
1923 (bp->status_blk->status_attn_bits_ack &
1924 STATUS_ATTN_BITS_LINK_STATE)) {
1925
Michael Chanc770a652005-08-25 15:38:39 -07001926 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001927 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07001928 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001929 }
1930
Michael Chanf4e418f2005-11-04 08:53:48 -08001931 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07001932 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001933
Michael Chanf4e418f2005-11-04 08:53:48 -08001934 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07001935 int orig_budget = *budget;
1936 int work_done;
1937
1938 if (orig_budget > dev->quota)
1939 orig_budget = dev->quota;
1940
1941 work_done = bnx2_rx_int(bp, orig_budget);
1942 *budget -= work_done;
1943 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07001944 }
1945
Michael Chanf4e418f2005-11-04 08:53:48 -08001946 bp->last_status_idx = bp->status_blk->status_idx;
1947 rmb();
1948
1949 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001950 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08001951 if (likely(bp->flags & USING_MSI_FLAG)) {
1952 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1953 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1954 bp->last_status_idx);
1955 return 0;
1956 }
Michael Chanb6016b72005-05-26 13:03:09 -07001957 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08001958 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1959 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1960 bp->last_status_idx);
1961
1962 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1963 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1964 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07001965 return 0;
1966 }
1967
1968 return 1;
1969}
1970
1971/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1972 * from set_multicast.
1973 */
1974static void
1975bnx2_set_rx_mode(struct net_device *dev)
1976{
Michael Chan972ec0d2006-01-23 16:12:43 -08001977 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001978 u32 rx_mode, sort_mode;
1979 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07001980
Michael Chanc770a652005-08-25 15:38:39 -07001981 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001982
1983 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1984 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1985 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1986#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08001987 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07001988 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07001989#else
Michael Chane29054f2006-01-23 16:06:06 -08001990 if (!(bp->flags & ASF_ENABLE_FLAG))
1991 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07001992#endif
1993 if (dev->flags & IFF_PROMISC) {
1994 /* Promiscuous mode. */
1995 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1996 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1997 }
1998 else if (dev->flags & IFF_ALLMULTI) {
1999 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2000 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2001 0xffffffff);
2002 }
2003 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2004 }
2005 else {
2006 /* Accept one or more multicast(s). */
2007 struct dev_mc_list *mclist;
2008 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2009 u32 regidx;
2010 u32 bit;
2011 u32 crc;
2012
2013 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2014
2015 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2016 i++, mclist = mclist->next) {
2017
2018 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2019 bit = crc & 0xff;
2020 regidx = (bit & 0xe0) >> 5;
2021 bit &= 0x1f;
2022 mc_filter[regidx] |= (1 << bit);
2023 }
2024
2025 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2026 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2027 mc_filter[i]);
2028 }
2029
2030 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2031 }
2032
2033 if (rx_mode != bp->rx_mode) {
2034 bp->rx_mode = rx_mode;
2035 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2036 }
2037
2038 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2039 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2040 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2041
Michael Chanc770a652005-08-25 15:38:39 -07002042 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002043}
2044
2045static void
2046load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2047 u32 rv2p_proc)
2048{
2049 int i;
2050 u32 val;
2051
2052
2053 for (i = 0; i < rv2p_code_len; i += 8) {
2054 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2055 rv2p_code++;
2056 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2057 rv2p_code++;
2058
2059 if (rv2p_proc == RV2P_PROC1) {
2060 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2061 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2062 }
2063 else {
2064 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2065 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2066 }
2067 }
2068
2069 /* Reset the processor, un-stall is done later. */
2070 if (rv2p_proc == RV2P_PROC1) {
2071 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2072 }
2073 else {
2074 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2075 }
2076}
2077
2078static void
2079load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2080{
2081 u32 offset;
2082 u32 val;
2083
2084 /* Halt the CPU. */
2085 val = REG_RD_IND(bp, cpu_reg->mode);
2086 val |= cpu_reg->mode_value_halt;
2087 REG_WR_IND(bp, cpu_reg->mode, val);
2088 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2089
2090 /* Load the Text area. */
2091 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2092 if (fw->text) {
2093 int j;
2094
2095 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2096 REG_WR_IND(bp, offset, fw->text[j]);
2097 }
2098 }
2099
2100 /* Load the Data area. */
2101 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2102 if (fw->data) {
2103 int j;
2104
2105 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2106 REG_WR_IND(bp, offset, fw->data[j]);
2107 }
2108 }
2109
2110 /* Load the SBSS area. */
2111 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2112 if (fw->sbss) {
2113 int j;
2114
2115 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2116 REG_WR_IND(bp, offset, fw->sbss[j]);
2117 }
2118 }
2119
2120 /* Load the BSS area. */
2121 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2122 if (fw->bss) {
2123 int j;
2124
2125 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2126 REG_WR_IND(bp, offset, fw->bss[j]);
2127 }
2128 }
2129
2130 /* Load the Read-Only area. */
2131 offset = cpu_reg->spad_base +
2132 (fw->rodata_addr - cpu_reg->mips_view_base);
2133 if (fw->rodata) {
2134 int j;
2135
2136 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2137 REG_WR_IND(bp, offset, fw->rodata[j]);
2138 }
2139 }
2140
2141 /* Clear the pre-fetch instruction. */
2142 REG_WR_IND(bp, cpu_reg->inst, 0);
2143 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2144
2145 /* Start the CPU. */
2146 val = REG_RD_IND(bp, cpu_reg->mode);
2147 val &= ~cpu_reg->mode_value_halt;
2148 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2149 REG_WR_IND(bp, cpu_reg->mode, val);
2150}
2151
2152static void
2153bnx2_init_cpus(struct bnx2 *bp)
2154{
2155 struct cpu_reg cpu_reg;
2156 struct fw_info fw;
2157
2158 /* Initialize the RV2P processor. */
2159 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2160 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2161
2162 /* Initialize the RX Processor. */
2163 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2164 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2165 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2166 cpu_reg.state = BNX2_RXP_CPU_STATE;
2167 cpu_reg.state_value_clear = 0xffffff;
2168 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2169 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2170 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2171 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2172 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2173 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2174 cpu_reg.mips_view_base = 0x8000000;
2175
2176 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2177 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2178 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2179 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2180
2181 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2182 fw.text_len = bnx2_RXP_b06FwTextLen;
2183 fw.text_index = 0;
2184 fw.text = bnx2_RXP_b06FwText;
2185
2186 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2187 fw.data_len = bnx2_RXP_b06FwDataLen;
2188 fw.data_index = 0;
2189 fw.data = bnx2_RXP_b06FwData;
2190
2191 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2192 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2193 fw.sbss_index = 0;
2194 fw.sbss = bnx2_RXP_b06FwSbss;
2195
2196 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2197 fw.bss_len = bnx2_RXP_b06FwBssLen;
2198 fw.bss_index = 0;
2199 fw.bss = bnx2_RXP_b06FwBss;
2200
2201 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2202 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2203 fw.rodata_index = 0;
2204 fw.rodata = bnx2_RXP_b06FwRodata;
2205
2206 load_cpu_fw(bp, &cpu_reg, &fw);
2207
2208 /* Initialize the TX Processor. */
2209 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2210 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2211 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2212 cpu_reg.state = BNX2_TXP_CPU_STATE;
2213 cpu_reg.state_value_clear = 0xffffff;
2214 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2215 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2216 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2217 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2218 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2219 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2220 cpu_reg.mips_view_base = 0x8000000;
2221
2222 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2223 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2224 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2225 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2226
2227 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2228 fw.text_len = bnx2_TXP_b06FwTextLen;
2229 fw.text_index = 0;
2230 fw.text = bnx2_TXP_b06FwText;
2231
2232 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2233 fw.data_len = bnx2_TXP_b06FwDataLen;
2234 fw.data_index = 0;
2235 fw.data = bnx2_TXP_b06FwData;
2236
2237 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2238 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2239 fw.sbss_index = 0;
2240 fw.sbss = bnx2_TXP_b06FwSbss;
2241
2242 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2243 fw.bss_len = bnx2_TXP_b06FwBssLen;
2244 fw.bss_index = 0;
2245 fw.bss = bnx2_TXP_b06FwBss;
2246
2247 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2248 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2249 fw.rodata_index = 0;
2250 fw.rodata = bnx2_TXP_b06FwRodata;
2251
2252 load_cpu_fw(bp, &cpu_reg, &fw);
2253
2254 /* Initialize the TX Patch-up Processor. */
2255 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2256 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2257 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2258 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2259 cpu_reg.state_value_clear = 0xffffff;
2260 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2261 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2262 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2263 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2264 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2265 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2266 cpu_reg.mips_view_base = 0x8000000;
2267
2268 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2269 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2270 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2271 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2272
2273 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2274 fw.text_len = bnx2_TPAT_b06FwTextLen;
2275 fw.text_index = 0;
2276 fw.text = bnx2_TPAT_b06FwText;
2277
2278 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2279 fw.data_len = bnx2_TPAT_b06FwDataLen;
2280 fw.data_index = 0;
2281 fw.data = bnx2_TPAT_b06FwData;
2282
2283 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2284 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2285 fw.sbss_index = 0;
2286 fw.sbss = bnx2_TPAT_b06FwSbss;
2287
2288 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2289 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2290 fw.bss_index = 0;
2291 fw.bss = bnx2_TPAT_b06FwBss;
2292
2293 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2294 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2295 fw.rodata_index = 0;
2296 fw.rodata = bnx2_TPAT_b06FwRodata;
2297
2298 load_cpu_fw(bp, &cpu_reg, &fw);
2299
2300 /* Initialize the Completion Processor. */
2301 cpu_reg.mode = BNX2_COM_CPU_MODE;
2302 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2303 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2304 cpu_reg.state = BNX2_COM_CPU_STATE;
2305 cpu_reg.state_value_clear = 0xffffff;
2306 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2307 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2308 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2309 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2310 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2311 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2312 cpu_reg.mips_view_base = 0x8000000;
2313
2314 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2315 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2316 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2317 fw.start_addr = bnx2_COM_b06FwStartAddr;
2318
2319 fw.text_addr = bnx2_COM_b06FwTextAddr;
2320 fw.text_len = bnx2_COM_b06FwTextLen;
2321 fw.text_index = 0;
2322 fw.text = bnx2_COM_b06FwText;
2323
2324 fw.data_addr = bnx2_COM_b06FwDataAddr;
2325 fw.data_len = bnx2_COM_b06FwDataLen;
2326 fw.data_index = 0;
2327 fw.data = bnx2_COM_b06FwData;
2328
2329 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2330 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2331 fw.sbss_index = 0;
2332 fw.sbss = bnx2_COM_b06FwSbss;
2333
2334 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2335 fw.bss_len = bnx2_COM_b06FwBssLen;
2336 fw.bss_index = 0;
2337 fw.bss = bnx2_COM_b06FwBss;
2338
2339 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2340 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2341 fw.rodata_index = 0;
2342 fw.rodata = bnx2_COM_b06FwRodata;
2343
2344 load_cpu_fw(bp, &cpu_reg, &fw);
2345
2346}
2347
2348static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002349bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002350{
2351 u16 pmcsr;
2352
2353 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2354
2355 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002356 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002357 u32 val;
2358
2359 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2360 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2361 PCI_PM_CTRL_PME_STATUS);
2362
2363 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2364 /* delay required during transition out of D3hot */
2365 msleep(20);
2366
2367 val = REG_RD(bp, BNX2_EMAC_MODE);
2368 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2369 val &= ~BNX2_EMAC_MODE_MPKT;
2370 REG_WR(bp, BNX2_EMAC_MODE, val);
2371
2372 val = REG_RD(bp, BNX2_RPM_CONFIG);
2373 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2374 REG_WR(bp, BNX2_RPM_CONFIG, val);
2375 break;
2376 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002377 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002378 int i;
2379 u32 val, wol_msg;
2380
2381 if (bp->wol) {
2382 u32 advertising;
2383 u8 autoneg;
2384
2385 autoneg = bp->autoneg;
2386 advertising = bp->advertising;
2387
2388 bp->autoneg = AUTONEG_SPEED;
2389 bp->advertising = ADVERTISED_10baseT_Half |
2390 ADVERTISED_10baseT_Full |
2391 ADVERTISED_100baseT_Half |
2392 ADVERTISED_100baseT_Full |
2393 ADVERTISED_Autoneg;
2394
2395 bnx2_setup_copper_phy(bp);
2396
2397 bp->autoneg = autoneg;
2398 bp->advertising = advertising;
2399
2400 bnx2_set_mac_addr(bp);
2401
2402 val = REG_RD(bp, BNX2_EMAC_MODE);
2403
2404 /* Enable port mode. */
2405 val &= ~BNX2_EMAC_MODE_PORT;
2406 val |= BNX2_EMAC_MODE_PORT_MII |
2407 BNX2_EMAC_MODE_MPKT_RCVD |
2408 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002409 BNX2_EMAC_MODE_MPKT;
2410
2411 REG_WR(bp, BNX2_EMAC_MODE, val);
2412
2413 /* receive all multicast */
2414 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2415 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2416 0xffffffff);
2417 }
2418 REG_WR(bp, BNX2_EMAC_RX_MODE,
2419 BNX2_EMAC_RX_MODE_SORT_MODE);
2420
2421 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2422 BNX2_RPM_SORT_USER0_MC_EN;
2423 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2424 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2425 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2426 BNX2_RPM_SORT_USER0_ENA);
2427
2428 /* Need to enable EMAC and RPM for WOL. */
2429 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2430 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2431 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2432 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2433
2434 val = REG_RD(bp, BNX2_RPM_CONFIG);
2435 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2436 REG_WR(bp, BNX2_RPM_CONFIG, val);
2437
2438 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2439 }
2440 else {
2441 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2442 }
2443
Michael Chandda1e392006-01-23 16:08:14 -08002444 if (!(bp->flags & NO_WOL_FLAG))
2445 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002446
2447 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2448 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2449 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2450
2451 if (bp->wol)
2452 pmcsr |= 3;
2453 }
2454 else {
2455 pmcsr |= 3;
2456 }
2457 if (bp->wol) {
2458 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2459 }
2460 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2461 pmcsr);
2462
2463 /* No more memory access after this point until
2464 * device is brought back to D0.
2465 */
2466 udelay(50);
2467 break;
2468 }
2469 default:
2470 return -EINVAL;
2471 }
2472 return 0;
2473}
2474
2475static int
2476bnx2_acquire_nvram_lock(struct bnx2 *bp)
2477{
2478 u32 val;
2479 int j;
2480
2481 /* Request access to the flash interface. */
2482 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2483 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2484 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2485 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2486 break;
2487
2488 udelay(5);
2489 }
2490
2491 if (j >= NVRAM_TIMEOUT_COUNT)
2492 return -EBUSY;
2493
2494 return 0;
2495}
2496
2497static int
2498bnx2_release_nvram_lock(struct bnx2 *bp)
2499{
2500 int j;
2501 u32 val;
2502
2503 /* Relinquish nvram interface. */
2504 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2505
2506 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2507 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2508 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2509 break;
2510
2511 udelay(5);
2512 }
2513
2514 if (j >= NVRAM_TIMEOUT_COUNT)
2515 return -EBUSY;
2516
2517 return 0;
2518}
2519
2520
2521static int
2522bnx2_enable_nvram_write(struct bnx2 *bp)
2523{
2524 u32 val;
2525
2526 val = REG_RD(bp, BNX2_MISC_CFG);
2527 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2528
2529 if (!bp->flash_info->buffered) {
2530 int j;
2531
2532 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2533 REG_WR(bp, BNX2_NVM_COMMAND,
2534 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2535
2536 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2537 udelay(5);
2538
2539 val = REG_RD(bp, BNX2_NVM_COMMAND);
2540 if (val & BNX2_NVM_COMMAND_DONE)
2541 break;
2542 }
2543
2544 if (j >= NVRAM_TIMEOUT_COUNT)
2545 return -EBUSY;
2546 }
2547 return 0;
2548}
2549
2550static void
2551bnx2_disable_nvram_write(struct bnx2 *bp)
2552{
2553 u32 val;
2554
2555 val = REG_RD(bp, BNX2_MISC_CFG);
2556 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2557}
2558
2559
2560static void
2561bnx2_enable_nvram_access(struct bnx2 *bp)
2562{
2563 u32 val;
2564
2565 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2566 /* Enable both bits, even on read. */
2567 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2568 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2569}
2570
2571static void
2572bnx2_disable_nvram_access(struct bnx2 *bp)
2573{
2574 u32 val;
2575
2576 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2577 /* Disable both bits, even after read. */
2578 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2579 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2580 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2581}
2582
2583static int
2584bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2585{
2586 u32 cmd;
2587 int j;
2588
2589 if (bp->flash_info->buffered)
2590 /* Buffered flash, no erase needed */
2591 return 0;
2592
2593 /* Build an erase command */
2594 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2595 BNX2_NVM_COMMAND_DOIT;
2596
2597 /* Need to clear DONE bit separately. */
2598 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2599
2600 /* Address of the NVRAM to read from. */
2601 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2602
2603 /* Issue an erase command. */
2604 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2605
2606 /* Wait for completion. */
2607 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2608 u32 val;
2609
2610 udelay(5);
2611
2612 val = REG_RD(bp, BNX2_NVM_COMMAND);
2613 if (val & BNX2_NVM_COMMAND_DONE)
2614 break;
2615 }
2616
2617 if (j >= NVRAM_TIMEOUT_COUNT)
2618 return -EBUSY;
2619
2620 return 0;
2621}
2622
2623static int
2624bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2625{
2626 u32 cmd;
2627 int j;
2628
2629 /* Build the command word. */
2630 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2631
2632 /* Calculate an offset of a buffered flash. */
2633 if (bp->flash_info->buffered) {
2634 offset = ((offset / bp->flash_info->page_size) <<
2635 bp->flash_info->page_bits) +
2636 (offset % bp->flash_info->page_size);
2637 }
2638
2639 /* Need to clear DONE bit separately. */
2640 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2641
2642 /* Address of the NVRAM to read from. */
2643 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2644
2645 /* Issue a read command. */
2646 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2647
2648 /* Wait for completion. */
2649 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2650 u32 val;
2651
2652 udelay(5);
2653
2654 val = REG_RD(bp, BNX2_NVM_COMMAND);
2655 if (val & BNX2_NVM_COMMAND_DONE) {
2656 val = REG_RD(bp, BNX2_NVM_READ);
2657
2658 val = be32_to_cpu(val);
2659 memcpy(ret_val, &val, 4);
2660 break;
2661 }
2662 }
2663 if (j >= NVRAM_TIMEOUT_COUNT)
2664 return -EBUSY;
2665
2666 return 0;
2667}
2668
2669
2670static int
2671bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2672{
2673 u32 cmd, val32;
2674 int j;
2675
2676 /* Build the command word. */
2677 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2678
2679 /* Calculate an offset of a buffered flash. */
2680 if (bp->flash_info->buffered) {
2681 offset = ((offset / bp->flash_info->page_size) <<
2682 bp->flash_info->page_bits) +
2683 (offset % bp->flash_info->page_size);
2684 }
2685
2686 /* Need to clear DONE bit separately. */
2687 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2688
2689 memcpy(&val32, val, 4);
2690 val32 = cpu_to_be32(val32);
2691
2692 /* Write the data. */
2693 REG_WR(bp, BNX2_NVM_WRITE, val32);
2694
2695 /* Address of the NVRAM to write to. */
2696 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2697
2698 /* Issue the write command. */
2699 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2700
2701 /* Wait for completion. */
2702 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2703 udelay(5);
2704
2705 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2706 break;
2707 }
2708 if (j >= NVRAM_TIMEOUT_COUNT)
2709 return -EBUSY;
2710
2711 return 0;
2712}
2713
2714static int
2715bnx2_init_nvram(struct bnx2 *bp)
2716{
2717 u32 val;
2718 int j, entry_count, rc;
2719 struct flash_spec *flash;
2720
2721 /* Determine the selected interface. */
2722 val = REG_RD(bp, BNX2_NVM_CFG1);
2723
2724 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2725
2726 rc = 0;
2727 if (val & 0x40000000) {
2728
2729 /* Flash interface has been reconfigured */
2730 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002731 j++, flash++) {
2732 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2733 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002734 bp->flash_info = flash;
2735 break;
2736 }
2737 }
2738 }
2739 else {
Michael Chan37137702005-11-04 08:49:17 -08002740 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002741 /* Not yet been reconfigured */
2742
Michael Chan37137702005-11-04 08:49:17 -08002743 if (val & (1 << 23))
2744 mask = FLASH_BACKUP_STRAP_MASK;
2745 else
2746 mask = FLASH_STRAP_MASK;
2747
Michael Chanb6016b72005-05-26 13:03:09 -07002748 for (j = 0, flash = &flash_table[0]; j < entry_count;
2749 j++, flash++) {
2750
Michael Chan37137702005-11-04 08:49:17 -08002751 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002752 bp->flash_info = flash;
2753
2754 /* Request access to the flash interface. */
2755 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2756 return rc;
2757
2758 /* Enable access to flash interface */
2759 bnx2_enable_nvram_access(bp);
2760
2761 /* Reconfigure the flash interface */
2762 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2763 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2764 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2765 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2766
2767 /* Disable access to flash interface */
2768 bnx2_disable_nvram_access(bp);
2769 bnx2_release_nvram_lock(bp);
2770
2771 break;
2772 }
2773 }
2774 } /* if (val & 0x40000000) */
2775
2776 if (j == entry_count) {
2777 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002778 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002779 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002780 }
2781
Michael Chan1122db72006-01-23 16:11:42 -08002782 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2783 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2784 if (val)
2785 bp->flash_size = val;
2786 else
2787 bp->flash_size = bp->flash_info->total_size;
2788
Michael Chanb6016b72005-05-26 13:03:09 -07002789 return rc;
2790}
2791
2792static int
2793bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2794 int buf_size)
2795{
2796 int rc = 0;
2797 u32 cmd_flags, offset32, len32, extra;
2798
2799 if (buf_size == 0)
2800 return 0;
2801
2802 /* Request access to the flash interface. */
2803 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2804 return rc;
2805
2806 /* Enable access to flash interface */
2807 bnx2_enable_nvram_access(bp);
2808
2809 len32 = buf_size;
2810 offset32 = offset;
2811 extra = 0;
2812
2813 cmd_flags = 0;
2814
2815 if (offset32 & 3) {
2816 u8 buf[4];
2817 u32 pre_len;
2818
2819 offset32 &= ~3;
2820 pre_len = 4 - (offset & 3);
2821
2822 if (pre_len >= len32) {
2823 pre_len = len32;
2824 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2825 BNX2_NVM_COMMAND_LAST;
2826 }
2827 else {
2828 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2829 }
2830
2831 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2832
2833 if (rc)
2834 return rc;
2835
2836 memcpy(ret_buf, buf + (offset & 3), pre_len);
2837
2838 offset32 += 4;
2839 ret_buf += pre_len;
2840 len32 -= pre_len;
2841 }
2842 if (len32 & 3) {
2843 extra = 4 - (len32 & 3);
2844 len32 = (len32 + 4) & ~3;
2845 }
2846
2847 if (len32 == 4) {
2848 u8 buf[4];
2849
2850 if (cmd_flags)
2851 cmd_flags = BNX2_NVM_COMMAND_LAST;
2852 else
2853 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2854 BNX2_NVM_COMMAND_LAST;
2855
2856 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2857
2858 memcpy(ret_buf, buf, 4 - extra);
2859 }
2860 else if (len32 > 0) {
2861 u8 buf[4];
2862
2863 /* Read the first word. */
2864 if (cmd_flags)
2865 cmd_flags = 0;
2866 else
2867 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2868
2869 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2870
2871 /* Advance to the next dword. */
2872 offset32 += 4;
2873 ret_buf += 4;
2874 len32 -= 4;
2875
2876 while (len32 > 4 && rc == 0) {
2877 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2878
2879 /* Advance to the next dword. */
2880 offset32 += 4;
2881 ret_buf += 4;
2882 len32 -= 4;
2883 }
2884
2885 if (rc)
2886 return rc;
2887
2888 cmd_flags = BNX2_NVM_COMMAND_LAST;
2889 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2890
2891 memcpy(ret_buf, buf, 4 - extra);
2892 }
2893
2894 /* Disable access to flash interface */
2895 bnx2_disable_nvram_access(bp);
2896
2897 bnx2_release_nvram_lock(bp);
2898
2899 return rc;
2900}
2901
2902static int
2903bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2904 int buf_size)
2905{
2906 u32 written, offset32, len32;
2907 u8 *buf, start[4], end[4];
2908 int rc = 0;
2909 int align_start, align_end;
2910
2911 buf = data_buf;
2912 offset32 = offset;
2913 len32 = buf_size;
2914 align_start = align_end = 0;
2915
2916 if ((align_start = (offset32 & 3))) {
2917 offset32 &= ~3;
2918 len32 += align_start;
2919 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2920 return rc;
2921 }
2922
2923 if (len32 & 3) {
2924 if ((len32 > 4) || !align_start) {
2925 align_end = 4 - (len32 & 3);
2926 len32 += align_end;
2927 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2928 end, 4))) {
2929 return rc;
2930 }
2931 }
2932 }
2933
2934 if (align_start || align_end) {
2935 buf = kmalloc(len32, GFP_KERNEL);
2936 if (buf == 0)
2937 return -ENOMEM;
2938 if (align_start) {
2939 memcpy(buf, start, 4);
2940 }
2941 if (align_end) {
2942 memcpy(buf + len32 - 4, end, 4);
2943 }
2944 memcpy(buf + align_start, data_buf, buf_size);
2945 }
2946
2947 written = 0;
2948 while ((written < len32) && (rc == 0)) {
2949 u32 page_start, page_end, data_start, data_end;
2950 u32 addr, cmd_flags;
2951 int i;
2952 u8 flash_buffer[264];
2953
2954 /* Find the page_start addr */
2955 page_start = offset32 + written;
2956 page_start -= (page_start % bp->flash_info->page_size);
2957 /* Find the page_end addr */
2958 page_end = page_start + bp->flash_info->page_size;
2959 /* Find the data_start addr */
2960 data_start = (written == 0) ? offset32 : page_start;
2961 /* Find the data_end addr */
2962 data_end = (page_end > offset32 + len32) ?
2963 (offset32 + len32) : page_end;
2964
2965 /* Request access to the flash interface. */
2966 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2967 goto nvram_write_end;
2968
2969 /* Enable access to flash interface */
2970 bnx2_enable_nvram_access(bp);
2971
2972 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2973 if (bp->flash_info->buffered == 0) {
2974 int j;
2975
2976 /* Read the whole page into the buffer
2977 * (non-buffer flash only) */
2978 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2979 if (j == (bp->flash_info->page_size - 4)) {
2980 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2981 }
2982 rc = bnx2_nvram_read_dword(bp,
2983 page_start + j,
2984 &flash_buffer[j],
2985 cmd_flags);
2986
2987 if (rc)
2988 goto nvram_write_end;
2989
2990 cmd_flags = 0;
2991 }
2992 }
2993
2994 /* Enable writes to flash interface (unlock write-protect) */
2995 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2996 goto nvram_write_end;
2997
2998 /* Erase the page */
2999 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3000 goto nvram_write_end;
3001
3002 /* Re-enable the write again for the actual write */
3003 bnx2_enable_nvram_write(bp);
3004
3005 /* Loop to write back the buffer data from page_start to
3006 * data_start */
3007 i = 0;
3008 if (bp->flash_info->buffered == 0) {
3009 for (addr = page_start; addr < data_start;
3010 addr += 4, i += 4) {
3011
3012 rc = bnx2_nvram_write_dword(bp, addr,
3013 &flash_buffer[i], cmd_flags);
3014
3015 if (rc != 0)
3016 goto nvram_write_end;
3017
3018 cmd_flags = 0;
3019 }
3020 }
3021
3022 /* Loop to write the new data from data_start to data_end */
3023 for (addr = data_start; addr < data_end; addr += 4, i++) {
3024 if ((addr == page_end - 4) ||
3025 ((bp->flash_info->buffered) &&
3026 (addr == data_end - 4))) {
3027
3028 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3029 }
3030 rc = bnx2_nvram_write_dword(bp, addr, buf,
3031 cmd_flags);
3032
3033 if (rc != 0)
3034 goto nvram_write_end;
3035
3036 cmd_flags = 0;
3037 buf += 4;
3038 }
3039
3040 /* Loop to write back the buffer data from data_end
3041 * to page_end */
3042 if (bp->flash_info->buffered == 0) {
3043 for (addr = data_end; addr < page_end;
3044 addr += 4, i += 4) {
3045
3046 if (addr == page_end-4) {
3047 cmd_flags = BNX2_NVM_COMMAND_LAST;
3048 }
3049 rc = bnx2_nvram_write_dword(bp, addr,
3050 &flash_buffer[i], cmd_flags);
3051
3052 if (rc != 0)
3053 goto nvram_write_end;
3054
3055 cmd_flags = 0;
3056 }
3057 }
3058
3059 /* Disable writes to flash interface (lock write-protect) */
3060 bnx2_disable_nvram_write(bp);
3061
3062 /* Disable access to flash interface */
3063 bnx2_disable_nvram_access(bp);
3064 bnx2_release_nvram_lock(bp);
3065
3066 /* Increment written */
3067 written += data_end - data_start;
3068 }
3069
3070nvram_write_end:
3071 if (align_start || align_end)
3072 kfree(buf);
3073 return rc;
3074}
3075
3076static int
3077bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3078{
3079 u32 val;
3080 int i, rc = 0;
3081
3082 /* Wait for the current PCI transaction to complete before
3083 * issuing a reset. */
3084 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3085 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3086 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3087 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3088 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3089 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3090 udelay(5);
3091
Michael Chanb090ae22006-01-23 16:07:10 -08003092 /* Wait for the firmware to tell us it is ok to issue a reset. */
3093 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3094
Michael Chanb6016b72005-05-26 13:03:09 -07003095 /* Deposit a driver reset signature so the firmware knows that
3096 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003097 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003098 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3099
Michael Chanb6016b72005-05-26 13:03:09 -07003100 /* Do a dummy read to force the chip to complete all current transaction
3101 * before we issue a reset. */
3102 val = REG_RD(bp, BNX2_MISC_ID);
3103
3104 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3105 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3106 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3107
3108 /* Chip reset. */
3109 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3110
3111 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3112 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3113 msleep(15);
3114
3115 /* Reset takes approximate 30 usec */
3116 for (i = 0; i < 10; i++) {
3117 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3118 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3119 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3120 break;
3121 }
3122 udelay(10);
3123 }
3124
3125 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3126 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3127 printk(KERN_ERR PFX "Chip reset did not complete\n");
3128 return -EBUSY;
3129 }
3130
3131 /* Make sure byte swapping is properly configured. */
3132 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3133 if (val != 0x01020304) {
3134 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3135 return -ENODEV;
3136 }
3137
Michael Chanb6016b72005-05-26 13:03:09 -07003138 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003139 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3140 if (rc)
3141 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003142
3143 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3144 /* Adjust the voltage regular to two steps lower. The default
3145 * of this register is 0x0000000e. */
3146 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3147
3148 /* Remove bad rbuf memory from the free pool. */
3149 rc = bnx2_alloc_bad_rbuf(bp);
3150 }
3151
3152 return rc;
3153}
3154
3155static int
3156bnx2_init_chip(struct bnx2 *bp)
3157{
3158 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003159 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003160
3161 /* Make sure the interrupt is not active. */
3162 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3163
3164 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3165 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3166#ifdef __BIG_ENDIAN
3167 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3168#endif
3169 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3170 DMA_READ_CHANS << 12 |
3171 DMA_WRITE_CHANS << 16;
3172
3173 val |= (0x2 << 20) | (1 << 11);
3174
Michael Chandda1e392006-01-23 16:08:14 -08003175 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003176 val |= (1 << 23);
3177
3178 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3179 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3180 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3181
3182 REG_WR(bp, BNX2_DMA_CONFIG, val);
3183
3184 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3185 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3186 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3187 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3188 }
3189
3190 if (bp->flags & PCIX_FLAG) {
3191 u16 val16;
3192
3193 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3194 &val16);
3195 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3196 val16 & ~PCI_X_CMD_ERO);
3197 }
3198
3199 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3200 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3201 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3202 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3203
3204 /* Initialize context mapping and zero out the quick contexts. The
3205 * context block must have already been enabled. */
3206 bnx2_init_context(bp);
3207
3208 bnx2_init_cpus(bp);
3209 bnx2_init_nvram(bp);
3210
3211 bnx2_set_mac_addr(bp);
3212
3213 val = REG_RD(bp, BNX2_MQ_CONFIG);
3214 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3215 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3216 REG_WR(bp, BNX2_MQ_CONFIG, val);
3217
3218 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3219 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3220 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3221
3222 val = (BCM_PAGE_BITS - 8) << 24;
3223 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3224
3225 /* Configure page size. */
3226 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3227 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3228 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3229 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3230
3231 val = bp->mac_addr[0] +
3232 (bp->mac_addr[1] << 8) +
3233 (bp->mac_addr[2] << 16) +
3234 bp->mac_addr[3] +
3235 (bp->mac_addr[4] << 8) +
3236 (bp->mac_addr[5] << 16);
3237 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3238
3239 /* Program the MTU. Also include 4 bytes for CRC32. */
3240 val = bp->dev->mtu + ETH_HLEN + 4;
3241 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3242 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3243 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3244
3245 bp->last_status_idx = 0;
3246 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3247
3248 /* Set up how to generate a link change interrupt. */
3249 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3250
3251 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3252 (u64) bp->status_blk_mapping & 0xffffffff);
3253 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3254
3255 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3256 (u64) bp->stats_blk_mapping & 0xffffffff);
3257 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3258 (u64) bp->stats_blk_mapping >> 32);
3259
3260 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3261 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3262
3263 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3264 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3265
3266 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3267 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3268
3269 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3270
3271 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3272
3273 REG_WR(bp, BNX2_HC_COM_TICKS,
3274 (bp->com_ticks_int << 16) | bp->com_ticks);
3275
3276 REG_WR(bp, BNX2_HC_CMD_TICKS,
3277 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3278
3279 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3280 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3281
3282 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3283 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3284 else {
3285 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3286 BNX2_HC_CONFIG_TX_TMR_MODE |
3287 BNX2_HC_CONFIG_COLLECT_STATS);
3288 }
3289
3290 /* Clear internal stats counters. */
3291 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3292
3293 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3294
Michael Chane29054f2006-01-23 16:06:06 -08003295 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3296 BNX2_PORT_FEATURE_ASF_ENABLED)
3297 bp->flags |= ASF_ENABLE_FLAG;
3298
Michael Chanb6016b72005-05-26 13:03:09 -07003299 /* Initialize the receive filter. */
3300 bnx2_set_rx_mode(bp->dev);
3301
Michael Chanb090ae22006-01-23 16:07:10 -08003302 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3303 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003304
3305 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3306 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3307
3308 udelay(20);
3309
Michael Chanb090ae22006-01-23 16:07:10 -08003310 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003311}
3312
3313
3314static void
3315bnx2_init_tx_ring(struct bnx2 *bp)
3316{
3317 struct tx_bd *txbd;
3318 u32 val;
3319
3320 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3321
3322 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3323 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3324
3325 bp->tx_prod = 0;
3326 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003327 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003328 bp->tx_prod_bseq = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003329
3330 val = BNX2_L2CTX_TYPE_TYPE_L2;
3331 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3332 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3333
3334 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3335 val |= 8 << 16;
3336 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3337
3338 val = (u64) bp->tx_desc_mapping >> 32;
3339 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3340
3341 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3342 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3343}
3344
3345static void
3346bnx2_init_rx_ring(struct bnx2 *bp)
3347{
3348 struct rx_bd *rxbd;
3349 int i;
3350 u16 prod, ring_prod;
3351 u32 val;
3352
3353 /* 8 for CRC and VLAN */
3354 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3355 /* 8 for alignment */
3356 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3357
3358 ring_prod = prod = bp->rx_prod = 0;
3359 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003360 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003361 bp->rx_prod_bseq = 0;
3362
Michael Chan13daffa2006-03-20 17:49:20 -08003363 for (i = 0; i < bp->rx_max_ring; i++) {
3364 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003365
Michael Chan13daffa2006-03-20 17:49:20 -08003366 rxbd = &bp->rx_desc_ring[i][0];
3367 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3368 rxbd->rx_bd_len = bp->rx_buf_use_size;
3369 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3370 }
3371 if (i == (bp->rx_max_ring - 1))
3372 j = 0;
3373 else
3374 j = i + 1;
3375 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3376 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3377 0xffffffff;
3378 }
Michael Chanb6016b72005-05-26 13:03:09 -07003379
3380 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3381 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3382 val |= 0x02 << 8;
3383 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3384
Michael Chan13daffa2006-03-20 17:49:20 -08003385 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003386 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3387
Michael Chan13daffa2006-03-20 17:49:20 -08003388 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003389 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3390
Michael Chan236b6392006-03-20 17:49:02 -08003391 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003392 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3393 break;
3394 }
3395 prod = NEXT_RX_BD(prod);
3396 ring_prod = RX_RING_IDX(prod);
3397 }
3398 bp->rx_prod = prod;
3399
3400 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3401
3402 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3403}
3404
3405static void
Michael Chan13daffa2006-03-20 17:49:20 -08003406bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3407{
3408 u32 num_rings, max;
3409
3410 bp->rx_ring_size = size;
3411 num_rings = 1;
3412 while (size > MAX_RX_DESC_CNT) {
3413 size -= MAX_RX_DESC_CNT;
3414 num_rings++;
3415 }
3416 /* round to next power of 2 */
3417 max = MAX_RX_RINGS;
3418 while ((max & num_rings) == 0)
3419 max >>= 1;
3420
3421 if (num_rings != max)
3422 max <<= 1;
3423
3424 bp->rx_max_ring = max;
3425 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3426}
3427
3428static void
Michael Chanb6016b72005-05-26 13:03:09 -07003429bnx2_free_tx_skbs(struct bnx2 *bp)
3430{
3431 int i;
3432
3433 if (bp->tx_buf_ring == NULL)
3434 return;
3435
3436 for (i = 0; i < TX_DESC_CNT; ) {
3437 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3438 struct sk_buff *skb = tx_buf->skb;
3439 int j, last;
3440
3441 if (skb == NULL) {
3442 i++;
3443 continue;
3444 }
3445
3446 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3447 skb_headlen(skb), PCI_DMA_TODEVICE);
3448
3449 tx_buf->skb = NULL;
3450
3451 last = skb_shinfo(skb)->nr_frags;
3452 for (j = 0; j < last; j++) {
3453 tx_buf = &bp->tx_buf_ring[i + j + 1];
3454 pci_unmap_page(bp->pdev,
3455 pci_unmap_addr(tx_buf, mapping),
3456 skb_shinfo(skb)->frags[j].size,
3457 PCI_DMA_TODEVICE);
3458 }
3459 dev_kfree_skb_any(skb);
3460 i += j + 1;
3461 }
3462
3463}
3464
3465static void
3466bnx2_free_rx_skbs(struct bnx2 *bp)
3467{
3468 int i;
3469
3470 if (bp->rx_buf_ring == NULL)
3471 return;
3472
Michael Chan13daffa2006-03-20 17:49:20 -08003473 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003474 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3475 struct sk_buff *skb = rx_buf->skb;
3476
Michael Chan05d0f1c2005-11-04 08:53:48 -08003477 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003478 continue;
3479
3480 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3481 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3482
3483 rx_buf->skb = NULL;
3484
3485 dev_kfree_skb_any(skb);
3486 }
3487}
3488
3489static void
3490bnx2_free_skbs(struct bnx2 *bp)
3491{
3492 bnx2_free_tx_skbs(bp);
3493 bnx2_free_rx_skbs(bp);
3494}
3495
3496static int
3497bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3498{
3499 int rc;
3500
3501 rc = bnx2_reset_chip(bp, reset_code);
3502 bnx2_free_skbs(bp);
3503 if (rc)
3504 return rc;
3505
3506 bnx2_init_chip(bp);
3507 bnx2_init_tx_ring(bp);
3508 bnx2_init_rx_ring(bp);
3509 return 0;
3510}
3511
3512static int
3513bnx2_init_nic(struct bnx2 *bp)
3514{
3515 int rc;
3516
3517 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3518 return rc;
3519
3520 bnx2_init_phy(bp);
3521 bnx2_set_link(bp);
3522 return 0;
3523}
3524
3525static int
3526bnx2_test_registers(struct bnx2 *bp)
3527{
3528 int ret;
3529 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003530 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003531 u16 offset;
3532 u16 flags;
3533 u32 rw_mask;
3534 u32 ro_mask;
3535 } reg_tbl[] = {
3536 { 0x006c, 0, 0x00000000, 0x0000003f },
3537 { 0x0090, 0, 0xffffffff, 0x00000000 },
3538 { 0x0094, 0, 0x00000000, 0x00000000 },
3539
3540 { 0x0404, 0, 0x00003f00, 0x00000000 },
3541 { 0x0418, 0, 0x00000000, 0xffffffff },
3542 { 0x041c, 0, 0x00000000, 0xffffffff },
3543 { 0x0420, 0, 0x00000000, 0x80ffffff },
3544 { 0x0424, 0, 0x00000000, 0x00000000 },
3545 { 0x0428, 0, 0x00000000, 0x00000001 },
3546 { 0x0450, 0, 0x00000000, 0x0000ffff },
3547 { 0x0454, 0, 0x00000000, 0xffffffff },
3548 { 0x0458, 0, 0x00000000, 0xffffffff },
3549
3550 { 0x0808, 0, 0x00000000, 0xffffffff },
3551 { 0x0854, 0, 0x00000000, 0xffffffff },
3552 { 0x0868, 0, 0x00000000, 0x77777777 },
3553 { 0x086c, 0, 0x00000000, 0x77777777 },
3554 { 0x0870, 0, 0x00000000, 0x77777777 },
3555 { 0x0874, 0, 0x00000000, 0x77777777 },
3556
3557 { 0x0c00, 0, 0x00000000, 0x00000001 },
3558 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3559 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003560
3561 { 0x1000, 0, 0x00000000, 0x00000001 },
3562 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003563
3564 { 0x1408, 0, 0x01c00800, 0x00000000 },
3565 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3566 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003567 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003568 { 0x14b0, 0, 0x00000002, 0x00000001 },
3569 { 0x14b8, 0, 0x00000000, 0x00000000 },
3570 { 0x14c0, 0, 0x00000000, 0x00000009 },
3571 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3572 { 0x14cc, 0, 0x00000000, 0x00000001 },
3573 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003574
3575 { 0x1800, 0, 0x00000000, 0x00000001 },
3576 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003577
3578 { 0x2800, 0, 0x00000000, 0x00000001 },
3579 { 0x2804, 0, 0x00000000, 0x00003f01 },
3580 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3581 { 0x2810, 0, 0xffff0000, 0x00000000 },
3582 { 0x2814, 0, 0xffff0000, 0x00000000 },
3583 { 0x2818, 0, 0xffff0000, 0x00000000 },
3584 { 0x281c, 0, 0xffff0000, 0x00000000 },
3585 { 0x2834, 0, 0xffffffff, 0x00000000 },
3586 { 0x2840, 0, 0x00000000, 0xffffffff },
3587 { 0x2844, 0, 0x00000000, 0xffffffff },
3588 { 0x2848, 0, 0xffffffff, 0x00000000 },
3589 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3590
3591 { 0x2c00, 0, 0x00000000, 0x00000011 },
3592 { 0x2c04, 0, 0x00000000, 0x00030007 },
3593
Michael Chanb6016b72005-05-26 13:03:09 -07003594 { 0x3c00, 0, 0x00000000, 0x00000001 },
3595 { 0x3c04, 0, 0x00000000, 0x00070000 },
3596 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3597 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3598 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3599 { 0x3c14, 0, 0x00000000, 0xffffffff },
3600 { 0x3c18, 0, 0x00000000, 0xffffffff },
3601 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3602 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003603
3604 { 0x5004, 0, 0x00000000, 0x0000007f },
3605 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3606 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3607
Michael Chanb6016b72005-05-26 13:03:09 -07003608 { 0x5c00, 0, 0x00000000, 0x00000001 },
3609 { 0x5c04, 0, 0x00000000, 0x0003000f },
3610 { 0x5c08, 0, 0x00000003, 0x00000000 },
3611 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3612 { 0x5c10, 0, 0x00000000, 0xffffffff },
3613 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3614 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3615 { 0x5c88, 0, 0x00000000, 0x00077373 },
3616 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3617
3618 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3619 { 0x680c, 0, 0xffffffff, 0x00000000 },
3620 { 0x6810, 0, 0xffffffff, 0x00000000 },
3621 { 0x6814, 0, 0xffffffff, 0x00000000 },
3622 { 0x6818, 0, 0xffffffff, 0x00000000 },
3623 { 0x681c, 0, 0xffffffff, 0x00000000 },
3624 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3625 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3626 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3627 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3628 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3629 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3630 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3631 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3632 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3633 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3634 { 0x684c, 0, 0xffffffff, 0x00000000 },
3635 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3636 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3637 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3638 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3639 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3640 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3641
3642 { 0xffff, 0, 0x00000000, 0x00000000 },
3643 };
3644
3645 ret = 0;
3646 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3647 u32 offset, rw_mask, ro_mask, save_val, val;
3648
3649 offset = (u32) reg_tbl[i].offset;
3650 rw_mask = reg_tbl[i].rw_mask;
3651 ro_mask = reg_tbl[i].ro_mask;
3652
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003653 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003654
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003655 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003656
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003657 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003658 if ((val & rw_mask) != 0) {
3659 goto reg_test_err;
3660 }
3661
3662 if ((val & ro_mask) != (save_val & ro_mask)) {
3663 goto reg_test_err;
3664 }
3665
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003666 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003667
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003668 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003669 if ((val & rw_mask) != rw_mask) {
3670 goto reg_test_err;
3671 }
3672
3673 if ((val & ro_mask) != (save_val & ro_mask)) {
3674 goto reg_test_err;
3675 }
3676
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003677 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003678 continue;
3679
3680reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003681 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003682 ret = -ENODEV;
3683 break;
3684 }
3685 return ret;
3686}
3687
3688static int
3689bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3690{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003691 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003692 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3693 int i;
3694
3695 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3696 u32 offset;
3697
3698 for (offset = 0; offset < size; offset += 4) {
3699
3700 REG_WR_IND(bp, start + offset, test_pattern[i]);
3701
3702 if (REG_RD_IND(bp, start + offset) !=
3703 test_pattern[i]) {
3704 return -ENODEV;
3705 }
3706 }
3707 }
3708 return 0;
3709}
3710
3711static int
3712bnx2_test_memory(struct bnx2 *bp)
3713{
3714 int ret = 0;
3715 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003716 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003717 u32 offset;
3718 u32 len;
3719 } mem_tbl[] = {
3720 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003721 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003722 { 0xe0000, 0x4000 },
3723 { 0x120000, 0x4000 },
3724 { 0x1a0000, 0x4000 },
3725 { 0x160000, 0x4000 },
3726 { 0xffffffff, 0 },
3727 };
3728
3729 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3730 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3731 mem_tbl[i].len)) != 0) {
3732 return ret;
3733 }
3734 }
3735
3736 return ret;
3737}
3738
Michael Chanbc5a0692006-01-23 16:13:22 -08003739#define BNX2_MAC_LOOPBACK 0
3740#define BNX2_PHY_LOOPBACK 1
3741
Michael Chanb6016b72005-05-26 13:03:09 -07003742static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003743bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003744{
3745 unsigned int pkt_size, num_pkts, i;
3746 struct sk_buff *skb, *rx_skb;
3747 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003748 u16 rx_start_idx, rx_idx;
3749 u32 val;
Michael Chanb6016b72005-05-26 13:03:09 -07003750 dma_addr_t map;
3751 struct tx_bd *txbd;
3752 struct sw_bd *rx_buf;
3753 struct l2_fhdr *rx_hdr;
3754 int ret = -ENODEV;
3755
Michael Chanbc5a0692006-01-23 16:13:22 -08003756 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3757 bp->loopback = MAC_LOOPBACK;
3758 bnx2_set_mac_loopback(bp);
3759 }
3760 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3761 bp->loopback = 0;
3762 bnx2_set_phy_loopback(bp);
3763 }
3764 else
3765 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003766
3767 pkt_size = 1514;
3768 skb = dev_alloc_skb(pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003769 if (!skb)
3770 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003771 packet = skb_put(skb, pkt_size);
3772 memcpy(packet, bp->mac_addr, 6);
3773 memset(packet + 6, 0x0, 8);
3774 for (i = 14; i < pkt_size; i++)
3775 packet[i] = (unsigned char) (i & 0xff);
3776
3777 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3778 PCI_DMA_TODEVICE);
3779
3780 val = REG_RD(bp, BNX2_HC_COMMAND);
3781 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3782 REG_RD(bp, BNX2_HC_COMMAND);
3783
3784 udelay(5);
3785 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3786
Michael Chanb6016b72005-05-26 13:03:09 -07003787 num_pkts = 0;
3788
Michael Chanbc5a0692006-01-23 16:13:22 -08003789 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07003790
3791 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3792 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3793 txbd->tx_bd_mss_nbytes = pkt_size;
3794 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3795
3796 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08003797 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3798 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07003799
Michael Chanbc5a0692006-01-23 16:13:22 -08003800 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3801 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07003802
3803 udelay(100);
3804
3805 val = REG_RD(bp, BNX2_HC_COMMAND);
3806 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3807 REG_RD(bp, BNX2_HC_COMMAND);
3808
3809 udelay(5);
3810
3811 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3812 dev_kfree_skb_irq(skb);
3813
Michael Chanbc5a0692006-01-23 16:13:22 -08003814 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07003815 goto loopback_test_done;
3816 }
3817
3818 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3819 if (rx_idx != rx_start_idx + num_pkts) {
3820 goto loopback_test_done;
3821 }
3822
3823 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3824 rx_skb = rx_buf->skb;
3825
3826 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3827 skb_reserve(rx_skb, bp->rx_offset);
3828
3829 pci_dma_sync_single_for_cpu(bp->pdev,
3830 pci_unmap_addr(rx_buf, mapping),
3831 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3832
Michael Chanade2bfe2006-01-23 16:09:51 -08003833 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07003834 (L2_FHDR_ERRORS_BAD_CRC |
3835 L2_FHDR_ERRORS_PHY_DECODE |
3836 L2_FHDR_ERRORS_ALIGNMENT |
3837 L2_FHDR_ERRORS_TOO_SHORT |
3838 L2_FHDR_ERRORS_GIANT_FRAME)) {
3839
3840 goto loopback_test_done;
3841 }
3842
3843 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3844 goto loopback_test_done;
3845 }
3846
3847 for (i = 14; i < pkt_size; i++) {
3848 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3849 goto loopback_test_done;
3850 }
3851 }
3852
3853 ret = 0;
3854
3855loopback_test_done:
3856 bp->loopback = 0;
3857 return ret;
3858}
3859
Michael Chanbc5a0692006-01-23 16:13:22 -08003860#define BNX2_MAC_LOOPBACK_FAILED 1
3861#define BNX2_PHY_LOOPBACK_FAILED 2
3862#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3863 BNX2_PHY_LOOPBACK_FAILED)
3864
3865static int
3866bnx2_test_loopback(struct bnx2 *bp)
3867{
3868 int rc = 0;
3869
3870 if (!netif_running(bp->dev))
3871 return BNX2_LOOPBACK_FAILED;
3872
3873 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3874 spin_lock_bh(&bp->phy_lock);
3875 bnx2_init_phy(bp);
3876 spin_unlock_bh(&bp->phy_lock);
3877 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3878 rc |= BNX2_MAC_LOOPBACK_FAILED;
3879 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3880 rc |= BNX2_PHY_LOOPBACK_FAILED;
3881 return rc;
3882}
3883
Michael Chanb6016b72005-05-26 13:03:09 -07003884#define NVRAM_SIZE 0x200
3885#define CRC32_RESIDUAL 0xdebb20e3
3886
3887static int
3888bnx2_test_nvram(struct bnx2 *bp)
3889{
3890 u32 buf[NVRAM_SIZE / 4];
3891 u8 *data = (u8 *) buf;
3892 int rc = 0;
3893 u32 magic, csum;
3894
3895 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3896 goto test_nvram_done;
3897
3898 magic = be32_to_cpu(buf[0]);
3899 if (magic != 0x669955aa) {
3900 rc = -ENODEV;
3901 goto test_nvram_done;
3902 }
3903
3904 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3905 goto test_nvram_done;
3906
3907 csum = ether_crc_le(0x100, data);
3908 if (csum != CRC32_RESIDUAL) {
3909 rc = -ENODEV;
3910 goto test_nvram_done;
3911 }
3912
3913 csum = ether_crc_le(0x100, data + 0x100);
3914 if (csum != CRC32_RESIDUAL) {
3915 rc = -ENODEV;
3916 }
3917
3918test_nvram_done:
3919 return rc;
3920}
3921
3922static int
3923bnx2_test_link(struct bnx2 *bp)
3924{
3925 u32 bmsr;
3926
Michael Chanc770a652005-08-25 15:38:39 -07003927 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003928 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3929 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07003930 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003931
3932 if (bmsr & BMSR_LSTATUS) {
3933 return 0;
3934 }
3935 return -ENODEV;
3936}
3937
3938static int
3939bnx2_test_intr(struct bnx2 *bp)
3940{
3941 int i;
3942 u32 val;
3943 u16 status_idx;
3944
3945 if (!netif_running(bp->dev))
3946 return -ENODEV;
3947
3948 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3949
3950 /* This register is not touched during run-time. */
3951 val = REG_RD(bp, BNX2_HC_COMMAND);
3952 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
3953 REG_RD(bp, BNX2_HC_COMMAND);
3954
3955 for (i = 0; i < 10; i++) {
3956 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3957 status_idx) {
3958
3959 break;
3960 }
3961
3962 msleep_interruptible(10);
3963 }
3964 if (i < 10)
3965 return 0;
3966
3967 return -ENODEV;
3968}
3969
3970static void
3971bnx2_timer(unsigned long data)
3972{
3973 struct bnx2 *bp = (struct bnx2 *) data;
3974 u32 msg;
3975
Michael Chancd339a02005-08-25 15:35:24 -07003976 if (!netif_running(bp->dev))
3977 return;
3978
Michael Chanb6016b72005-05-26 13:03:09 -07003979 if (atomic_read(&bp->intr_sem) != 0)
3980 goto bnx2_restart_timer;
3981
3982 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08003983 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07003984
3985 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3986 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003987
Michael Chanc770a652005-08-25 15:38:39 -07003988 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003989 if (bp->serdes_an_pending) {
3990 bp->serdes_an_pending--;
3991 }
3992 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3993 u32 bmcr;
3994
Michael Chancd339a02005-08-25 15:35:24 -07003995 bp->current_interval = bp->timer_interval;
3996
Michael Chanb6016b72005-05-26 13:03:09 -07003997 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3998
3999 if (bmcr & BMCR_ANENABLE) {
4000 u32 phy1, phy2;
4001
4002 bnx2_write_phy(bp, 0x1c, 0x7c00);
4003 bnx2_read_phy(bp, 0x1c, &phy1);
4004
4005 bnx2_write_phy(bp, 0x17, 0x0f01);
4006 bnx2_read_phy(bp, 0x15, &phy2);
4007 bnx2_write_phy(bp, 0x17, 0x0f01);
4008 bnx2_read_phy(bp, 0x15, &phy2);
4009
4010 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4011 !(phy2 & 0x20)) { /* no CONFIG */
4012
4013 bmcr &= ~BMCR_ANENABLE;
4014 bmcr |= BMCR_SPEED1000 |
4015 BMCR_FULLDPLX;
4016 bnx2_write_phy(bp, MII_BMCR, bmcr);
4017 bp->phy_flags |=
4018 PHY_PARALLEL_DETECT_FLAG;
4019 }
4020 }
4021 }
4022 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4023 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4024 u32 phy2;
4025
4026 bnx2_write_phy(bp, 0x17, 0x0f01);
4027 bnx2_read_phy(bp, 0x15, &phy2);
4028 if (phy2 & 0x20) {
4029 u32 bmcr;
4030
4031 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4032 bmcr |= BMCR_ANENABLE;
4033 bnx2_write_phy(bp, MII_BMCR, bmcr);
4034
4035 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4036
4037 }
4038 }
Michael Chancd339a02005-08-25 15:35:24 -07004039 else
4040 bp->current_interval = bp->timer_interval;
Michael Chanb6016b72005-05-26 13:03:09 -07004041
Michael Chanc770a652005-08-25 15:38:39 -07004042 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004043 }
4044
4045bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004046 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004047}
4048
4049/* Called with rtnl_lock */
4050static int
4051bnx2_open(struct net_device *dev)
4052{
Michael Chan972ec0d2006-01-23 16:12:43 -08004053 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004054 int rc;
4055
Pavel Machek829ca9a2005-09-03 15:56:56 -07004056 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004057 bnx2_disable_int(bp);
4058
4059 rc = bnx2_alloc_mem(bp);
4060 if (rc)
4061 return rc;
4062
4063 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4064 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4065 !disable_msi) {
4066
4067 if (pci_enable_msi(bp->pdev) == 0) {
4068 bp->flags |= USING_MSI_FLAG;
4069 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4070 dev);
4071 }
4072 else {
4073 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4074 SA_SHIRQ, dev->name, dev);
4075 }
4076 }
4077 else {
4078 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4079 dev->name, dev);
4080 }
4081 if (rc) {
4082 bnx2_free_mem(bp);
4083 return rc;
4084 }
4085
4086 rc = bnx2_init_nic(bp);
4087
4088 if (rc) {
4089 free_irq(bp->pdev->irq, dev);
4090 if (bp->flags & USING_MSI_FLAG) {
4091 pci_disable_msi(bp->pdev);
4092 bp->flags &= ~USING_MSI_FLAG;
4093 }
4094 bnx2_free_skbs(bp);
4095 bnx2_free_mem(bp);
4096 return rc;
4097 }
4098
Michael Chancd339a02005-08-25 15:35:24 -07004099 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004100
4101 atomic_set(&bp->intr_sem, 0);
4102
4103 bnx2_enable_int(bp);
4104
4105 if (bp->flags & USING_MSI_FLAG) {
4106 /* Test MSI to make sure it is working
4107 * If MSI test fails, go back to INTx mode
4108 */
4109 if (bnx2_test_intr(bp) != 0) {
4110 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4111 " using MSI, switching to INTx mode. Please"
4112 " report this failure to the PCI maintainer"
4113 " and include system chipset information.\n",
4114 bp->dev->name);
4115
4116 bnx2_disable_int(bp);
4117 free_irq(bp->pdev->irq, dev);
4118 pci_disable_msi(bp->pdev);
4119 bp->flags &= ~USING_MSI_FLAG;
4120
4121 rc = bnx2_init_nic(bp);
4122
4123 if (!rc) {
4124 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4125 SA_SHIRQ, dev->name, dev);
4126 }
4127 if (rc) {
4128 bnx2_free_skbs(bp);
4129 bnx2_free_mem(bp);
4130 del_timer_sync(&bp->timer);
4131 return rc;
4132 }
4133 bnx2_enable_int(bp);
4134 }
4135 }
4136 if (bp->flags & USING_MSI_FLAG) {
4137 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4138 }
4139
4140 netif_start_queue(dev);
4141
4142 return 0;
4143}
4144
4145static void
4146bnx2_reset_task(void *data)
4147{
4148 struct bnx2 *bp = data;
4149
Michael Chanafdc08b2005-08-25 15:34:29 -07004150 if (!netif_running(bp->dev))
4151 return;
4152
4153 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004154 bnx2_netif_stop(bp);
4155
4156 bnx2_init_nic(bp);
4157
4158 atomic_set(&bp->intr_sem, 1);
4159 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004160 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004161}
4162
4163static void
4164bnx2_tx_timeout(struct net_device *dev)
4165{
Michael Chan972ec0d2006-01-23 16:12:43 -08004166 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004167
4168 /* This allows the netif to be shutdown gracefully before resetting */
4169 schedule_work(&bp->reset_task);
4170}
4171
4172#ifdef BCM_VLAN
4173/* Called with rtnl_lock */
4174static void
4175bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4176{
Michael Chan972ec0d2006-01-23 16:12:43 -08004177 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004178
4179 bnx2_netif_stop(bp);
4180
4181 bp->vlgrp = vlgrp;
4182 bnx2_set_rx_mode(dev);
4183
4184 bnx2_netif_start(bp);
4185}
4186
4187/* Called with rtnl_lock */
4188static void
4189bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4190{
Michael Chan972ec0d2006-01-23 16:12:43 -08004191 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004192
4193 bnx2_netif_stop(bp);
4194
4195 if (bp->vlgrp)
4196 bp->vlgrp->vlan_devices[vid] = NULL;
4197 bnx2_set_rx_mode(dev);
4198
4199 bnx2_netif_start(bp);
4200}
4201#endif
4202
4203/* Called with dev->xmit_lock.
4204 * hard_start_xmit is pseudo-lockless - a lock is only required when
4205 * the tx queue is full. This way, we get the benefit of lockless
4206 * operations most of the time without the complexities to handle
4207 * netif_stop_queue/wake_queue race conditions.
4208 */
4209static int
4210bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4211{
Michael Chan972ec0d2006-01-23 16:12:43 -08004212 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004213 dma_addr_t mapping;
4214 struct tx_bd *txbd;
4215 struct sw_bd *tx_buf;
4216 u32 len, vlan_tag_flags, last_frag, mss;
4217 u16 prod, ring_prod;
4218 int i;
4219
Michael Chane89bbf12005-08-25 15:36:58 -07004220 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004221 netif_stop_queue(dev);
4222 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4223 dev->name);
4224
4225 return NETDEV_TX_BUSY;
4226 }
4227 len = skb_headlen(skb);
4228 prod = bp->tx_prod;
4229 ring_prod = TX_RING_IDX(prod);
4230
4231 vlan_tag_flags = 0;
4232 if (skb->ip_summed == CHECKSUM_HW) {
4233 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4234 }
4235
4236 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4237 vlan_tag_flags |=
4238 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4239 }
4240#ifdef BCM_TSO
4241 if ((mss = skb_shinfo(skb)->tso_size) &&
4242 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4243 u32 tcp_opt_len, ip_tcp_len;
4244
4245 if (skb_header_cloned(skb) &&
4246 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4247 dev_kfree_skb(skb);
4248 return NETDEV_TX_OK;
4249 }
4250
4251 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4252 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4253
4254 tcp_opt_len = 0;
4255 if (skb->h.th->doff > 5) {
4256 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4257 }
4258 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4259
4260 skb->nh.iph->check = 0;
4261 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4262 skb->h.th->check =
4263 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4264 skb->nh.iph->daddr,
4265 0, IPPROTO_TCP, 0);
4266
4267 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4268 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4269 (tcp_opt_len >> 2)) << 8;
4270 }
4271 }
4272 else
4273#endif
4274 {
4275 mss = 0;
4276 }
4277
4278 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4279
4280 tx_buf = &bp->tx_buf_ring[ring_prod];
4281 tx_buf->skb = skb;
4282 pci_unmap_addr_set(tx_buf, mapping, mapping);
4283
4284 txbd = &bp->tx_desc_ring[ring_prod];
4285
4286 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4287 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4288 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4289 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4290
4291 last_frag = skb_shinfo(skb)->nr_frags;
4292
4293 for (i = 0; i < last_frag; i++) {
4294 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4295
4296 prod = NEXT_TX_BD(prod);
4297 ring_prod = TX_RING_IDX(prod);
4298 txbd = &bp->tx_desc_ring[ring_prod];
4299
4300 len = frag->size;
4301 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4302 len, PCI_DMA_TODEVICE);
4303 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4304 mapping, mapping);
4305
4306 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4307 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4308 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4309 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4310
4311 }
4312 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4313
4314 prod = NEXT_TX_BD(prod);
4315 bp->tx_prod_bseq += skb->len;
4316
Michael Chanb6016b72005-05-26 13:03:09 -07004317 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4318 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4319
4320 mmiowb();
4321
4322 bp->tx_prod = prod;
4323 dev->trans_start = jiffies;
4324
Michael Chane89bbf12005-08-25 15:36:58 -07004325 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chanc770a652005-08-25 15:38:39 -07004326 spin_lock(&bp->tx_lock);
Michael Chane89bbf12005-08-25 15:36:58 -07004327 netif_stop_queue(dev);
4328
4329 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4330 netif_wake_queue(dev);
Michael Chanc770a652005-08-25 15:38:39 -07004331 spin_unlock(&bp->tx_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004332 }
4333
4334 return NETDEV_TX_OK;
4335}
4336
4337/* Called with rtnl_lock */
4338static int
4339bnx2_close(struct net_device *dev)
4340{
Michael Chan972ec0d2006-01-23 16:12:43 -08004341 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004342 u32 reset_code;
4343
Michael Chanafdc08b2005-08-25 15:34:29 -07004344 /* Calling flush_scheduled_work() may deadlock because
4345 * linkwatch_event() may be on the workqueue and it will try to get
4346 * the rtnl_lock which we are holding.
4347 */
4348 while (bp->in_reset_task)
4349 msleep(1);
4350
Michael Chanb6016b72005-05-26 13:03:09 -07004351 bnx2_netif_stop(bp);
4352 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004353 if (bp->flags & NO_WOL_FLAG)
4354 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4355 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004356 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4357 else
4358 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4359 bnx2_reset_chip(bp, reset_code);
4360 free_irq(bp->pdev->irq, dev);
4361 if (bp->flags & USING_MSI_FLAG) {
4362 pci_disable_msi(bp->pdev);
4363 bp->flags &= ~USING_MSI_FLAG;
4364 }
4365 bnx2_free_skbs(bp);
4366 bnx2_free_mem(bp);
4367 bp->link_up = 0;
4368 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004369 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004370 return 0;
4371}
4372
4373#define GET_NET_STATS64(ctr) \
4374 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4375 (unsigned long) (ctr##_lo)
4376
4377#define GET_NET_STATS32(ctr) \
4378 (ctr##_lo)
4379
4380#if (BITS_PER_LONG == 64)
4381#define GET_NET_STATS GET_NET_STATS64
4382#else
4383#define GET_NET_STATS GET_NET_STATS32
4384#endif
4385
4386static struct net_device_stats *
4387bnx2_get_stats(struct net_device *dev)
4388{
Michael Chan972ec0d2006-01-23 16:12:43 -08004389 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004390 struct statistics_block *stats_blk = bp->stats_blk;
4391 struct net_device_stats *net_stats = &bp->net_stats;
4392
4393 if (bp->stats_blk == NULL) {
4394 return net_stats;
4395 }
4396 net_stats->rx_packets =
4397 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4398 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4399 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4400
4401 net_stats->tx_packets =
4402 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4403 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4404 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4405
4406 net_stats->rx_bytes =
4407 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4408
4409 net_stats->tx_bytes =
4410 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4411
4412 net_stats->multicast =
4413 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4414
4415 net_stats->collisions =
4416 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4417
4418 net_stats->rx_length_errors =
4419 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4420 stats_blk->stat_EtherStatsOverrsizePkts);
4421
4422 net_stats->rx_over_errors =
4423 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4424
4425 net_stats->rx_frame_errors =
4426 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4427
4428 net_stats->rx_crc_errors =
4429 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4430
4431 net_stats->rx_errors = net_stats->rx_length_errors +
4432 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4433 net_stats->rx_crc_errors;
4434
4435 net_stats->tx_aborted_errors =
4436 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4437 stats_blk->stat_Dot3StatsLateCollisions);
4438
Michael Chan5b0c76a2005-11-04 08:45:49 -08004439 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4440 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004441 net_stats->tx_carrier_errors = 0;
4442 else {
4443 net_stats->tx_carrier_errors =
4444 (unsigned long)
4445 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4446 }
4447
4448 net_stats->tx_errors =
4449 (unsigned long)
4450 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4451 +
4452 net_stats->tx_aborted_errors +
4453 net_stats->tx_carrier_errors;
4454
4455 return net_stats;
4456}
4457
4458/* All ethtool functions called with rtnl_lock */
4459
4460static int
4461bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4462{
Michael Chan972ec0d2006-01-23 16:12:43 -08004463 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004464
4465 cmd->supported = SUPPORTED_Autoneg;
4466 if (bp->phy_flags & PHY_SERDES_FLAG) {
4467 cmd->supported |= SUPPORTED_1000baseT_Full |
4468 SUPPORTED_FIBRE;
4469
4470 cmd->port = PORT_FIBRE;
4471 }
4472 else {
4473 cmd->supported |= SUPPORTED_10baseT_Half |
4474 SUPPORTED_10baseT_Full |
4475 SUPPORTED_100baseT_Half |
4476 SUPPORTED_100baseT_Full |
4477 SUPPORTED_1000baseT_Full |
4478 SUPPORTED_TP;
4479
4480 cmd->port = PORT_TP;
4481 }
4482
4483 cmd->advertising = bp->advertising;
4484
4485 if (bp->autoneg & AUTONEG_SPEED) {
4486 cmd->autoneg = AUTONEG_ENABLE;
4487 }
4488 else {
4489 cmd->autoneg = AUTONEG_DISABLE;
4490 }
4491
4492 if (netif_carrier_ok(dev)) {
4493 cmd->speed = bp->line_speed;
4494 cmd->duplex = bp->duplex;
4495 }
4496 else {
4497 cmd->speed = -1;
4498 cmd->duplex = -1;
4499 }
4500
4501 cmd->transceiver = XCVR_INTERNAL;
4502 cmd->phy_address = bp->phy_addr;
4503
4504 return 0;
4505}
4506
4507static int
4508bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4509{
Michael Chan972ec0d2006-01-23 16:12:43 -08004510 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004511 u8 autoneg = bp->autoneg;
4512 u8 req_duplex = bp->req_duplex;
4513 u16 req_line_speed = bp->req_line_speed;
4514 u32 advertising = bp->advertising;
4515
4516 if (cmd->autoneg == AUTONEG_ENABLE) {
4517 autoneg |= AUTONEG_SPEED;
4518
4519 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4520
4521 /* allow advertising 1 speed */
4522 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4523 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4524 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4525 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4526
4527 if (bp->phy_flags & PHY_SERDES_FLAG)
4528 return -EINVAL;
4529
4530 advertising = cmd->advertising;
4531
4532 }
4533 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4534 advertising = cmd->advertising;
4535 }
4536 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4537 return -EINVAL;
4538 }
4539 else {
4540 if (bp->phy_flags & PHY_SERDES_FLAG) {
4541 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4542 }
4543 else {
4544 advertising = ETHTOOL_ALL_COPPER_SPEED;
4545 }
4546 }
4547 advertising |= ADVERTISED_Autoneg;
4548 }
4549 else {
4550 if (bp->phy_flags & PHY_SERDES_FLAG) {
4551 if ((cmd->speed != SPEED_1000) ||
4552 (cmd->duplex != DUPLEX_FULL)) {
4553 return -EINVAL;
4554 }
4555 }
4556 else if (cmd->speed == SPEED_1000) {
4557 return -EINVAL;
4558 }
4559 autoneg &= ~AUTONEG_SPEED;
4560 req_line_speed = cmd->speed;
4561 req_duplex = cmd->duplex;
4562 advertising = 0;
4563 }
4564
4565 bp->autoneg = autoneg;
4566 bp->advertising = advertising;
4567 bp->req_line_speed = req_line_speed;
4568 bp->req_duplex = req_duplex;
4569
Michael Chanc770a652005-08-25 15:38:39 -07004570 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004571
4572 bnx2_setup_phy(bp);
4573
Michael Chanc770a652005-08-25 15:38:39 -07004574 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004575
4576 return 0;
4577}
4578
4579static void
4580bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4581{
Michael Chan972ec0d2006-01-23 16:12:43 -08004582 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004583
4584 strcpy(info->driver, DRV_MODULE_NAME);
4585 strcpy(info->version, DRV_MODULE_VERSION);
4586 strcpy(info->bus_info, pci_name(bp->pdev));
4587 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4588 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4589 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004590 info->fw_version[1] = info->fw_version[3] = '.';
4591 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004592}
4593
Michael Chan244ac4f2006-03-20 17:48:46 -08004594#define BNX2_REGDUMP_LEN (32 * 1024)
4595
4596static int
4597bnx2_get_regs_len(struct net_device *dev)
4598{
4599 return BNX2_REGDUMP_LEN;
4600}
4601
4602static void
4603bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4604{
4605 u32 *p = _p, i, offset;
4606 u8 *orig_p = _p;
4607 struct bnx2 *bp = netdev_priv(dev);
4608 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4609 0x0800, 0x0880, 0x0c00, 0x0c10,
4610 0x0c30, 0x0d08, 0x1000, 0x101c,
4611 0x1040, 0x1048, 0x1080, 0x10a4,
4612 0x1400, 0x1490, 0x1498, 0x14f0,
4613 0x1500, 0x155c, 0x1580, 0x15dc,
4614 0x1600, 0x1658, 0x1680, 0x16d8,
4615 0x1800, 0x1820, 0x1840, 0x1854,
4616 0x1880, 0x1894, 0x1900, 0x1984,
4617 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4618 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4619 0x2000, 0x2030, 0x23c0, 0x2400,
4620 0x2800, 0x2820, 0x2830, 0x2850,
4621 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4622 0x3c00, 0x3c94, 0x4000, 0x4010,
4623 0x4080, 0x4090, 0x43c0, 0x4458,
4624 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4625 0x4fc0, 0x5010, 0x53c0, 0x5444,
4626 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4627 0x5fc0, 0x6000, 0x6400, 0x6428,
4628 0x6800, 0x6848, 0x684c, 0x6860,
4629 0x6888, 0x6910, 0x8000 };
4630
4631 regs->version = 0;
4632
4633 memset(p, 0, BNX2_REGDUMP_LEN);
4634
4635 if (!netif_running(bp->dev))
4636 return;
4637
4638 i = 0;
4639 offset = reg_boundaries[0];
4640 p += offset;
4641 while (offset < BNX2_REGDUMP_LEN) {
4642 *p++ = REG_RD(bp, offset);
4643 offset += 4;
4644 if (offset == reg_boundaries[i + 1]) {
4645 offset = reg_boundaries[i + 2];
4646 p = (u32 *) (orig_p + offset);
4647 i += 2;
4648 }
4649 }
4650}
4651
Michael Chanb6016b72005-05-26 13:03:09 -07004652static void
4653bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4654{
Michael Chan972ec0d2006-01-23 16:12:43 -08004655 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004656
4657 if (bp->flags & NO_WOL_FLAG) {
4658 wol->supported = 0;
4659 wol->wolopts = 0;
4660 }
4661 else {
4662 wol->supported = WAKE_MAGIC;
4663 if (bp->wol)
4664 wol->wolopts = WAKE_MAGIC;
4665 else
4666 wol->wolopts = 0;
4667 }
4668 memset(&wol->sopass, 0, sizeof(wol->sopass));
4669}
4670
4671static int
4672bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4673{
Michael Chan972ec0d2006-01-23 16:12:43 -08004674 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004675
4676 if (wol->wolopts & ~WAKE_MAGIC)
4677 return -EINVAL;
4678
4679 if (wol->wolopts & WAKE_MAGIC) {
4680 if (bp->flags & NO_WOL_FLAG)
4681 return -EINVAL;
4682
4683 bp->wol = 1;
4684 }
4685 else {
4686 bp->wol = 0;
4687 }
4688 return 0;
4689}
4690
4691static int
4692bnx2_nway_reset(struct net_device *dev)
4693{
Michael Chan972ec0d2006-01-23 16:12:43 -08004694 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004695 u32 bmcr;
4696
4697 if (!(bp->autoneg & AUTONEG_SPEED)) {
4698 return -EINVAL;
4699 }
4700
Michael Chanc770a652005-08-25 15:38:39 -07004701 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004702
4703 /* Force a link down visible on the other side */
4704 if (bp->phy_flags & PHY_SERDES_FLAG) {
4705 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004706 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004707
4708 msleep(20);
4709
Michael Chanc770a652005-08-25 15:38:39 -07004710 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004711 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
Michael Chancd339a02005-08-25 15:35:24 -07004712 bp->current_interval = SERDES_AN_TIMEOUT;
4713 bp->serdes_an_pending = 1;
4714 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004715 }
4716 }
4717
4718 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4719 bmcr &= ~BMCR_LOOPBACK;
4720 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4721
Michael Chanc770a652005-08-25 15:38:39 -07004722 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004723
4724 return 0;
4725}
4726
4727static int
4728bnx2_get_eeprom_len(struct net_device *dev)
4729{
Michael Chan972ec0d2006-01-23 16:12:43 -08004730 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004731
Michael Chan1122db72006-01-23 16:11:42 -08004732 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004733 return 0;
4734
Michael Chan1122db72006-01-23 16:11:42 -08004735 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004736}
4737
4738static int
4739bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4740 u8 *eebuf)
4741{
Michael Chan972ec0d2006-01-23 16:12:43 -08004742 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004743 int rc;
4744
John W. Linville1064e942005-11-10 12:58:24 -08004745 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004746
4747 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4748
4749 return rc;
4750}
4751
4752static int
4753bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4754 u8 *eebuf)
4755{
Michael Chan972ec0d2006-01-23 16:12:43 -08004756 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004757 int rc;
4758
John W. Linville1064e942005-11-10 12:58:24 -08004759 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004760
4761 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4762
4763 return rc;
4764}
4765
4766static int
4767bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4768{
Michael Chan972ec0d2006-01-23 16:12:43 -08004769 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004770
4771 memset(coal, 0, sizeof(struct ethtool_coalesce));
4772
4773 coal->rx_coalesce_usecs = bp->rx_ticks;
4774 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4775 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4776 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4777
4778 coal->tx_coalesce_usecs = bp->tx_ticks;
4779 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4780 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4781 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4782
4783 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4784
4785 return 0;
4786}
4787
4788static int
4789bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4790{
Michael Chan972ec0d2006-01-23 16:12:43 -08004791 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004792
4793 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4794 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4795
4796 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4797 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4798
4799 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4800 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4801
4802 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4803 if (bp->rx_quick_cons_trip_int > 0xff)
4804 bp->rx_quick_cons_trip_int = 0xff;
4805
4806 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4807 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4808
4809 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4810 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4811
4812 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4813 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4814
4815 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4816 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4817 0xff;
4818
4819 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4820 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4821 bp->stats_ticks &= 0xffff00;
4822
4823 if (netif_running(bp->dev)) {
4824 bnx2_netif_stop(bp);
4825 bnx2_init_nic(bp);
4826 bnx2_netif_start(bp);
4827 }
4828
4829 return 0;
4830}
4831
4832static void
4833bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4834{
Michael Chan972ec0d2006-01-23 16:12:43 -08004835 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004836
Michael Chan13daffa2006-03-20 17:49:20 -08004837 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07004838 ering->rx_mini_max_pending = 0;
4839 ering->rx_jumbo_max_pending = 0;
4840
4841 ering->rx_pending = bp->rx_ring_size;
4842 ering->rx_mini_pending = 0;
4843 ering->rx_jumbo_pending = 0;
4844
4845 ering->tx_max_pending = MAX_TX_DESC_CNT;
4846 ering->tx_pending = bp->tx_ring_size;
4847}
4848
4849static int
4850bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4851{
Michael Chan972ec0d2006-01-23 16:12:43 -08004852 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004853
Michael Chan13daffa2006-03-20 17:49:20 -08004854 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07004855 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4856 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4857
4858 return -EINVAL;
4859 }
Michael Chan13daffa2006-03-20 17:49:20 -08004860 if (netif_running(bp->dev)) {
4861 bnx2_netif_stop(bp);
4862 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4863 bnx2_free_skbs(bp);
4864 bnx2_free_mem(bp);
4865 }
4866
4867 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07004868 bp->tx_ring_size = ering->tx_pending;
4869
4870 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08004871 int rc;
4872
4873 rc = bnx2_alloc_mem(bp);
4874 if (rc)
4875 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004876 bnx2_init_nic(bp);
4877 bnx2_netif_start(bp);
4878 }
4879
4880 return 0;
4881}
4882
4883static void
4884bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4885{
Michael Chan972ec0d2006-01-23 16:12:43 -08004886 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004887
4888 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4889 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4890 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4891}
4892
4893static int
4894bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4895{
Michael Chan972ec0d2006-01-23 16:12:43 -08004896 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004897
4898 bp->req_flow_ctrl = 0;
4899 if (epause->rx_pause)
4900 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4901 if (epause->tx_pause)
4902 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4903
4904 if (epause->autoneg) {
4905 bp->autoneg |= AUTONEG_FLOW_CTRL;
4906 }
4907 else {
4908 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4909 }
4910
Michael Chanc770a652005-08-25 15:38:39 -07004911 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004912
4913 bnx2_setup_phy(bp);
4914
Michael Chanc770a652005-08-25 15:38:39 -07004915 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004916
4917 return 0;
4918}
4919
4920static u32
4921bnx2_get_rx_csum(struct net_device *dev)
4922{
Michael Chan972ec0d2006-01-23 16:12:43 -08004923 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004924
4925 return bp->rx_csum;
4926}
4927
4928static int
4929bnx2_set_rx_csum(struct net_device *dev, u32 data)
4930{
Michael Chan972ec0d2006-01-23 16:12:43 -08004931 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004932
4933 bp->rx_csum = data;
4934 return 0;
4935}
4936
4937#define BNX2_NUM_STATS 45
4938
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004939static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004940 char string[ETH_GSTRING_LEN];
4941} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4942 { "rx_bytes" },
4943 { "rx_error_bytes" },
4944 { "tx_bytes" },
4945 { "tx_error_bytes" },
4946 { "rx_ucast_packets" },
4947 { "rx_mcast_packets" },
4948 { "rx_bcast_packets" },
4949 { "tx_ucast_packets" },
4950 { "tx_mcast_packets" },
4951 { "tx_bcast_packets" },
4952 { "tx_mac_errors" },
4953 { "tx_carrier_errors" },
4954 { "rx_crc_errors" },
4955 { "rx_align_errors" },
4956 { "tx_single_collisions" },
4957 { "tx_multi_collisions" },
4958 { "tx_deferred" },
4959 { "tx_excess_collisions" },
4960 { "tx_late_collisions" },
4961 { "tx_total_collisions" },
4962 { "rx_fragments" },
4963 { "rx_jabbers" },
4964 { "rx_undersize_packets" },
4965 { "rx_oversize_packets" },
4966 { "rx_64_byte_packets" },
4967 { "rx_65_to_127_byte_packets" },
4968 { "rx_128_to_255_byte_packets" },
4969 { "rx_256_to_511_byte_packets" },
4970 { "rx_512_to_1023_byte_packets" },
4971 { "rx_1024_to_1522_byte_packets" },
4972 { "rx_1523_to_9022_byte_packets" },
4973 { "tx_64_byte_packets" },
4974 { "tx_65_to_127_byte_packets" },
4975 { "tx_128_to_255_byte_packets" },
4976 { "tx_256_to_511_byte_packets" },
4977 { "tx_512_to_1023_byte_packets" },
4978 { "tx_1024_to_1522_byte_packets" },
4979 { "tx_1523_to_9022_byte_packets" },
4980 { "rx_xon_frames" },
4981 { "rx_xoff_frames" },
4982 { "tx_xon_frames" },
4983 { "tx_xoff_frames" },
4984 { "rx_mac_ctrl_frames" },
4985 { "rx_filtered_packets" },
4986 { "rx_discards" },
4987};
4988
4989#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4990
Arjan van de Venf71e1302006-03-03 21:33:57 -05004991static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004992 STATS_OFFSET32(stat_IfHCInOctets_hi),
4993 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4994 STATS_OFFSET32(stat_IfHCOutOctets_hi),
4995 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4996 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4997 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4998 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
4999 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5000 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5001 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5002 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5003 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5004 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5005 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5006 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5007 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5008 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5009 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5010 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5011 STATS_OFFSET32(stat_EtherStatsCollisions),
5012 STATS_OFFSET32(stat_EtherStatsFragments),
5013 STATS_OFFSET32(stat_EtherStatsJabbers),
5014 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5015 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5016 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5017 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5018 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5019 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5020 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5021 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5022 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5023 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5024 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5025 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5026 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5027 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5028 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5029 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5030 STATS_OFFSET32(stat_XonPauseFramesReceived),
5031 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5032 STATS_OFFSET32(stat_OutXonSent),
5033 STATS_OFFSET32(stat_OutXoffSent),
5034 STATS_OFFSET32(stat_MacControlFramesReceived),
5035 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5036 STATS_OFFSET32(stat_IfInMBUFDiscards),
5037};
5038
5039/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5040 * skipped because of errata.
5041 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005042static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005043 8,0,8,8,8,8,8,8,8,8,
5044 4,0,4,4,4,4,4,4,4,4,
5045 4,4,4,4,4,4,4,4,4,4,
5046 4,4,4,4,4,4,4,4,4,4,
5047 4,4,4,4,4,
5048};
5049
Michael Chan5b0c76a2005-11-04 08:45:49 -08005050static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5051 8,0,8,8,8,8,8,8,8,8,
5052 4,4,4,4,4,4,4,4,4,4,
5053 4,4,4,4,4,4,4,4,4,4,
5054 4,4,4,4,4,4,4,4,4,4,
5055 4,4,4,4,4,
5056};
5057
Michael Chanb6016b72005-05-26 13:03:09 -07005058#define BNX2_NUM_TESTS 6
5059
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005060static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005061 char string[ETH_GSTRING_LEN];
5062} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5063 { "register_test (offline)" },
5064 { "memory_test (offline)" },
5065 { "loopback_test (offline)" },
5066 { "nvram_test (online)" },
5067 { "interrupt_test (online)" },
5068 { "link_test (online)" },
5069};
5070
5071static int
5072bnx2_self_test_count(struct net_device *dev)
5073{
5074 return BNX2_NUM_TESTS;
5075}
5076
5077static void
5078bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5079{
Michael Chan972ec0d2006-01-23 16:12:43 -08005080 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005081
5082 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5083 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5084 bnx2_netif_stop(bp);
5085 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5086 bnx2_free_skbs(bp);
5087
5088 if (bnx2_test_registers(bp) != 0) {
5089 buf[0] = 1;
5090 etest->flags |= ETH_TEST_FL_FAILED;
5091 }
5092 if (bnx2_test_memory(bp) != 0) {
5093 buf[1] = 1;
5094 etest->flags |= ETH_TEST_FL_FAILED;
5095 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005096 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005097 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005098
5099 if (!netif_running(bp->dev)) {
5100 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5101 }
5102 else {
5103 bnx2_init_nic(bp);
5104 bnx2_netif_start(bp);
5105 }
5106
5107 /* wait for link up */
5108 msleep_interruptible(3000);
5109 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5110 msleep_interruptible(4000);
5111 }
5112
5113 if (bnx2_test_nvram(bp) != 0) {
5114 buf[3] = 1;
5115 etest->flags |= ETH_TEST_FL_FAILED;
5116 }
5117 if (bnx2_test_intr(bp) != 0) {
5118 buf[4] = 1;
5119 etest->flags |= ETH_TEST_FL_FAILED;
5120 }
5121
5122 if (bnx2_test_link(bp) != 0) {
5123 buf[5] = 1;
5124 etest->flags |= ETH_TEST_FL_FAILED;
5125
5126 }
5127}
5128
5129static void
5130bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5131{
5132 switch (stringset) {
5133 case ETH_SS_STATS:
5134 memcpy(buf, bnx2_stats_str_arr,
5135 sizeof(bnx2_stats_str_arr));
5136 break;
5137 case ETH_SS_TEST:
5138 memcpy(buf, bnx2_tests_str_arr,
5139 sizeof(bnx2_tests_str_arr));
5140 break;
5141 }
5142}
5143
5144static int
5145bnx2_get_stats_count(struct net_device *dev)
5146{
5147 return BNX2_NUM_STATS;
5148}
5149
5150static void
5151bnx2_get_ethtool_stats(struct net_device *dev,
5152 struct ethtool_stats *stats, u64 *buf)
5153{
Michael Chan972ec0d2006-01-23 16:12:43 -08005154 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005155 int i;
5156 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005157 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005158
5159 if (hw_stats == NULL) {
5160 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5161 return;
5162 }
5163
Michael Chan5b0c76a2005-11-04 08:45:49 -08005164 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5165 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5166 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5167 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005168 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005169 else
5170 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005171
5172 for (i = 0; i < BNX2_NUM_STATS; i++) {
5173 if (stats_len_arr[i] == 0) {
5174 /* skip this counter */
5175 buf[i] = 0;
5176 continue;
5177 }
5178 if (stats_len_arr[i] == 4) {
5179 /* 4-byte counter */
5180 buf[i] = (u64)
5181 *(hw_stats + bnx2_stats_offset_arr[i]);
5182 continue;
5183 }
5184 /* 8-byte counter */
5185 buf[i] = (((u64) *(hw_stats +
5186 bnx2_stats_offset_arr[i])) << 32) +
5187 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5188 }
5189}
5190
5191static int
5192bnx2_phys_id(struct net_device *dev, u32 data)
5193{
Michael Chan972ec0d2006-01-23 16:12:43 -08005194 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005195 int i;
5196 u32 save;
5197
5198 if (data == 0)
5199 data = 2;
5200
5201 save = REG_RD(bp, BNX2_MISC_CFG);
5202 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5203
5204 for (i = 0; i < (data * 2); i++) {
5205 if ((i % 2) == 0) {
5206 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5207 }
5208 else {
5209 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5210 BNX2_EMAC_LED_1000MB_OVERRIDE |
5211 BNX2_EMAC_LED_100MB_OVERRIDE |
5212 BNX2_EMAC_LED_10MB_OVERRIDE |
5213 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5214 BNX2_EMAC_LED_TRAFFIC);
5215 }
5216 msleep_interruptible(500);
5217 if (signal_pending(current))
5218 break;
5219 }
5220 REG_WR(bp, BNX2_EMAC_LED, 0);
5221 REG_WR(bp, BNX2_MISC_CFG, save);
5222 return 0;
5223}
5224
5225static struct ethtool_ops bnx2_ethtool_ops = {
5226 .get_settings = bnx2_get_settings,
5227 .set_settings = bnx2_set_settings,
5228 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005229 .get_regs_len = bnx2_get_regs_len,
5230 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005231 .get_wol = bnx2_get_wol,
5232 .set_wol = bnx2_set_wol,
5233 .nway_reset = bnx2_nway_reset,
5234 .get_link = ethtool_op_get_link,
5235 .get_eeprom_len = bnx2_get_eeprom_len,
5236 .get_eeprom = bnx2_get_eeprom,
5237 .set_eeprom = bnx2_set_eeprom,
5238 .get_coalesce = bnx2_get_coalesce,
5239 .set_coalesce = bnx2_set_coalesce,
5240 .get_ringparam = bnx2_get_ringparam,
5241 .set_ringparam = bnx2_set_ringparam,
5242 .get_pauseparam = bnx2_get_pauseparam,
5243 .set_pauseparam = bnx2_set_pauseparam,
5244 .get_rx_csum = bnx2_get_rx_csum,
5245 .set_rx_csum = bnx2_set_rx_csum,
5246 .get_tx_csum = ethtool_op_get_tx_csum,
5247 .set_tx_csum = ethtool_op_set_tx_csum,
5248 .get_sg = ethtool_op_get_sg,
5249 .set_sg = ethtool_op_set_sg,
5250#ifdef BCM_TSO
5251 .get_tso = ethtool_op_get_tso,
5252 .set_tso = ethtool_op_set_tso,
5253#endif
5254 .self_test_count = bnx2_self_test_count,
5255 .self_test = bnx2_self_test,
5256 .get_strings = bnx2_get_strings,
5257 .phys_id = bnx2_phys_id,
5258 .get_stats_count = bnx2_get_stats_count,
5259 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005260 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005261};
5262
5263/* Called with rtnl_lock */
5264static int
5265bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5266{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005267 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005268 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005269 int err;
5270
5271 switch(cmd) {
5272 case SIOCGMIIPHY:
5273 data->phy_id = bp->phy_addr;
5274
5275 /* fallthru */
5276 case SIOCGMIIREG: {
5277 u32 mii_regval;
5278
Michael Chanc770a652005-08-25 15:38:39 -07005279 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005280 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005281 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005282
5283 data->val_out = mii_regval;
5284
5285 return err;
5286 }
5287
5288 case SIOCSMIIREG:
5289 if (!capable(CAP_NET_ADMIN))
5290 return -EPERM;
5291
Michael Chanc770a652005-08-25 15:38:39 -07005292 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005293 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005294 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005295
5296 return err;
5297
5298 default:
5299 /* do nothing */
5300 break;
5301 }
5302 return -EOPNOTSUPP;
5303}
5304
5305/* Called with rtnl_lock */
5306static int
5307bnx2_change_mac_addr(struct net_device *dev, void *p)
5308{
5309 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005310 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005311
Michael Chan73eef4c2005-08-25 15:39:15 -07005312 if (!is_valid_ether_addr(addr->sa_data))
5313 return -EINVAL;
5314
Michael Chanb6016b72005-05-26 13:03:09 -07005315 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5316 if (netif_running(dev))
5317 bnx2_set_mac_addr(bp);
5318
5319 return 0;
5320}
5321
5322/* Called with rtnl_lock */
5323static int
5324bnx2_change_mtu(struct net_device *dev, int new_mtu)
5325{
Michael Chan972ec0d2006-01-23 16:12:43 -08005326 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005327
5328 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5329 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5330 return -EINVAL;
5331
5332 dev->mtu = new_mtu;
5333 if (netif_running(dev)) {
5334 bnx2_netif_stop(bp);
5335
5336 bnx2_init_nic(bp);
5337
5338 bnx2_netif_start(bp);
5339 }
5340 return 0;
5341}
5342
5343#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5344static void
5345poll_bnx2(struct net_device *dev)
5346{
Michael Chan972ec0d2006-01-23 16:12:43 -08005347 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005348
5349 disable_irq(bp->pdev->irq);
5350 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5351 enable_irq(bp->pdev->irq);
5352}
5353#endif
5354
5355static int __devinit
5356bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5357{
5358 struct bnx2 *bp;
5359 unsigned long mem_len;
5360 int rc;
5361 u32 reg;
5362
5363 SET_MODULE_OWNER(dev);
5364 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005365 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005366
5367 bp->flags = 0;
5368 bp->phy_flags = 0;
5369
5370 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5371 rc = pci_enable_device(pdev);
5372 if (rc) {
5373 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5374 goto err_out;
5375 }
5376
5377 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5378 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5379 "aborting.\n");
5380 rc = -ENODEV;
5381 goto err_out_disable;
5382 }
5383
5384 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5385 if (rc) {
5386 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5387 goto err_out_disable;
5388 }
5389
5390 pci_set_master(pdev);
5391
5392 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5393 if (bp->pm_cap == 0) {
5394 printk(KERN_ERR PFX "Cannot find power management capability, "
5395 "aborting.\n");
5396 rc = -EIO;
5397 goto err_out_release;
5398 }
5399
5400 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5401 if (bp->pcix_cap == 0) {
5402 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5403 rc = -EIO;
5404 goto err_out_release;
5405 }
5406
5407 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5408 bp->flags |= USING_DAC_FLAG;
5409 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5410 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5411 "failed, aborting.\n");
5412 rc = -EIO;
5413 goto err_out_release;
5414 }
5415 }
5416 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5417 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5418 rc = -EIO;
5419 goto err_out_release;
5420 }
5421
5422 bp->dev = dev;
5423 bp->pdev = pdev;
5424
5425 spin_lock_init(&bp->phy_lock);
5426 spin_lock_init(&bp->tx_lock);
5427 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5428
5429 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5430 mem_len = MB_GET_CID_ADDR(17);
5431 dev->mem_end = dev->mem_start + mem_len;
5432 dev->irq = pdev->irq;
5433
5434 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5435
5436 if (!bp->regview) {
5437 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5438 rc = -ENOMEM;
5439 goto err_out_release;
5440 }
5441
5442 /* Configure byte swap and enable write to the reg_window registers.
5443 * Rely on CPU to do target byte swapping on big endian systems
5444 * The chip's target access swapping will not swap all accesses
5445 */
5446 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5447 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5448 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5449
Pavel Machek829ca9a2005-09-03 15:56:56 -07005450 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005451
5452 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5453
Michael Chanb6016b72005-05-26 13:03:09 -07005454 /* Get bus information. */
5455 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5456 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5457 u32 clkreg;
5458
5459 bp->flags |= PCIX_FLAG;
5460
5461 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5462
5463 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5464 switch (clkreg) {
5465 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5466 bp->bus_speed_mhz = 133;
5467 break;
5468
5469 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5470 bp->bus_speed_mhz = 100;
5471 break;
5472
5473 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5474 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5475 bp->bus_speed_mhz = 66;
5476 break;
5477
5478 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5479 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5480 bp->bus_speed_mhz = 50;
5481 break;
5482
5483 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5484 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5485 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5486 bp->bus_speed_mhz = 33;
5487 break;
5488 }
5489 }
5490 else {
5491 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5492 bp->bus_speed_mhz = 66;
5493 else
5494 bp->bus_speed_mhz = 33;
5495 }
5496
5497 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5498 bp->flags |= PCI_32BIT_FLAG;
5499
5500 /* 5706A0 may falsely detect SERR and PERR. */
5501 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5502 reg = REG_RD(bp, PCI_COMMAND);
5503 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5504 REG_WR(bp, PCI_COMMAND, reg);
5505 }
5506 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5507 !(bp->flags & PCIX_FLAG)) {
5508
5509 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5510 "aborting.\n");
5511 goto err_out_unmap;
5512 }
5513
5514 bnx2_init_nvram(bp);
5515
Michael Chane3648b32005-11-04 08:51:21 -08005516 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5517
5518 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5519 BNX2_SHM_HDR_SIGNATURE_SIG)
5520 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5521 else
5522 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5523
Michael Chanb6016b72005-05-26 13:03:09 -07005524 /* Get the permanent MAC address. First we need to make sure the
5525 * firmware is actually running.
5526 */
Michael Chane3648b32005-11-04 08:51:21 -08005527 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005528
5529 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5530 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5531 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5532 rc = -ENODEV;
5533 goto err_out_unmap;
5534 }
5535
Michael Chane3648b32005-11-04 08:51:21 -08005536 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005537
Michael Chane3648b32005-11-04 08:51:21 -08005538 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005539 bp->mac_addr[0] = (u8) (reg >> 8);
5540 bp->mac_addr[1] = (u8) reg;
5541
Michael Chane3648b32005-11-04 08:51:21 -08005542 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005543 bp->mac_addr[2] = (u8) (reg >> 24);
5544 bp->mac_addr[3] = (u8) (reg >> 16);
5545 bp->mac_addr[4] = (u8) (reg >> 8);
5546 bp->mac_addr[5] = (u8) reg;
5547
5548 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan13daffa2006-03-20 17:49:20 -08005549 bnx2_set_rx_ring_size(bp, 100);
Michael Chanb6016b72005-05-26 13:03:09 -07005550
5551 bp->rx_csum = 1;
5552
5553 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5554
5555 bp->tx_quick_cons_trip_int = 20;
5556 bp->tx_quick_cons_trip = 20;
5557 bp->tx_ticks_int = 80;
5558 bp->tx_ticks = 80;
5559
5560 bp->rx_quick_cons_trip_int = 6;
5561 bp->rx_quick_cons_trip = 6;
5562 bp->rx_ticks_int = 18;
5563 bp->rx_ticks = 18;
5564
5565 bp->stats_ticks = 1000000 & 0xffff00;
5566
5567 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005568 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005569
Michael Chan5b0c76a2005-11-04 08:45:49 -08005570 bp->phy_addr = 1;
5571
Michael Chanb6016b72005-05-26 13:03:09 -07005572 /* Disable WOL support if we are running on a SERDES chip. */
5573 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5574 bp->phy_flags |= PHY_SERDES_FLAG;
5575 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005576 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5577 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005578 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005579 BNX2_SHARED_HW_CFG_CONFIG);
5580 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5581 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5582 }
Michael Chanb6016b72005-05-26 13:03:09 -07005583 }
5584
Michael Chandda1e392006-01-23 16:08:14 -08005585 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5586 bp->flags |= NO_WOL_FLAG;
5587
Michael Chanb6016b72005-05-26 13:03:09 -07005588 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5589 bp->tx_quick_cons_trip_int =
5590 bp->tx_quick_cons_trip;
5591 bp->tx_ticks_int = bp->tx_ticks;
5592 bp->rx_quick_cons_trip_int =
5593 bp->rx_quick_cons_trip;
5594 bp->rx_ticks_int = bp->rx_ticks;
5595 bp->comp_prod_trip_int = bp->comp_prod_trip;
5596 bp->com_ticks_int = bp->com_ticks;
5597 bp->cmd_ticks_int = bp->cmd_ticks;
5598 }
5599
5600 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5601 bp->req_line_speed = 0;
5602 if (bp->phy_flags & PHY_SERDES_FLAG) {
5603 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005604
Michael Chane3648b32005-11-04 08:51:21 -08005605 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005606 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5607 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5608 bp->autoneg = 0;
5609 bp->req_line_speed = bp->line_speed = SPEED_1000;
5610 bp->req_duplex = DUPLEX_FULL;
5611 }
Michael Chanb6016b72005-05-26 13:03:09 -07005612 }
5613 else {
5614 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5615 }
5616
5617 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5618
Michael Chancd339a02005-08-25 15:35:24 -07005619 init_timer(&bp->timer);
5620 bp->timer.expires = RUN_AT(bp->timer_interval);
5621 bp->timer.data = (unsigned long) bp;
5622 bp->timer.function = bnx2_timer;
5623
Michael Chanb6016b72005-05-26 13:03:09 -07005624 return 0;
5625
5626err_out_unmap:
5627 if (bp->regview) {
5628 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005629 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005630 }
5631
5632err_out_release:
5633 pci_release_regions(pdev);
5634
5635err_out_disable:
5636 pci_disable_device(pdev);
5637 pci_set_drvdata(pdev, NULL);
5638
5639err_out:
5640 return rc;
5641}
5642
5643static int __devinit
5644bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5645{
5646 static int version_printed = 0;
5647 struct net_device *dev = NULL;
5648 struct bnx2 *bp;
5649 int rc, i;
5650
5651 if (version_printed++ == 0)
5652 printk(KERN_INFO "%s", version);
5653
5654 /* dev zeroed in init_etherdev */
5655 dev = alloc_etherdev(sizeof(*bp));
5656
5657 if (!dev)
5658 return -ENOMEM;
5659
5660 rc = bnx2_init_board(pdev, dev);
5661 if (rc < 0) {
5662 free_netdev(dev);
5663 return rc;
5664 }
5665
5666 dev->open = bnx2_open;
5667 dev->hard_start_xmit = bnx2_start_xmit;
5668 dev->stop = bnx2_close;
5669 dev->get_stats = bnx2_get_stats;
5670 dev->set_multicast_list = bnx2_set_rx_mode;
5671 dev->do_ioctl = bnx2_ioctl;
5672 dev->set_mac_address = bnx2_change_mac_addr;
5673 dev->change_mtu = bnx2_change_mtu;
5674 dev->tx_timeout = bnx2_tx_timeout;
5675 dev->watchdog_timeo = TX_TIMEOUT;
5676#ifdef BCM_VLAN
5677 dev->vlan_rx_register = bnx2_vlan_rx_register;
5678 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5679#endif
5680 dev->poll = bnx2_poll;
5681 dev->ethtool_ops = &bnx2_ethtool_ops;
5682 dev->weight = 64;
5683
Michael Chan972ec0d2006-01-23 16:12:43 -08005684 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005685
5686#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5687 dev->poll_controller = poll_bnx2;
5688#endif
5689
5690 if ((rc = register_netdev(dev))) {
5691 printk(KERN_ERR PFX "Cannot register net device\n");
5692 if (bp->regview)
5693 iounmap(bp->regview);
5694 pci_release_regions(pdev);
5695 pci_disable_device(pdev);
5696 pci_set_drvdata(pdev, NULL);
5697 free_netdev(dev);
5698 return rc;
5699 }
5700
5701 pci_set_drvdata(pdev, dev);
5702
5703 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07005704 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07005705 bp->name = board_info[ent->driver_data].name,
5706 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5707 "IRQ %d, ",
5708 dev->name,
5709 bp->name,
5710 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5711 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5712 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5713 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5714 bp->bus_speed_mhz,
5715 dev->base_addr,
5716 bp->pdev->irq);
5717
5718 printk("node addr ");
5719 for (i = 0; i < 6; i++)
5720 printk("%2.2x", dev->dev_addr[i]);
5721 printk("\n");
5722
5723 dev->features |= NETIF_F_SG;
5724 if (bp->flags & USING_DAC_FLAG)
5725 dev->features |= NETIF_F_HIGHDMA;
5726 dev->features |= NETIF_F_IP_CSUM;
5727#ifdef BCM_VLAN
5728 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5729#endif
5730#ifdef BCM_TSO
5731 dev->features |= NETIF_F_TSO;
5732#endif
5733
5734 netif_carrier_off(bp->dev);
5735
5736 return 0;
5737}
5738
5739static void __devexit
5740bnx2_remove_one(struct pci_dev *pdev)
5741{
5742 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005743 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005744
Michael Chanafdc08b2005-08-25 15:34:29 -07005745 flush_scheduled_work();
5746
Michael Chanb6016b72005-05-26 13:03:09 -07005747 unregister_netdev(dev);
5748
5749 if (bp->regview)
5750 iounmap(bp->regview);
5751
5752 free_netdev(dev);
5753 pci_release_regions(pdev);
5754 pci_disable_device(pdev);
5755 pci_set_drvdata(pdev, NULL);
5756}
5757
5758static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07005759bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07005760{
5761 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005762 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005763 u32 reset_code;
5764
5765 if (!netif_running(dev))
5766 return 0;
5767
Michael Chan1d60290f2006-03-20 17:50:08 -08005768 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07005769 bnx2_netif_stop(bp);
5770 netif_device_detach(dev);
5771 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005772 if (bp->flags & NO_WOL_FLAG)
5773 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5774 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005775 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5776 else
5777 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5778 bnx2_reset_chip(bp, reset_code);
5779 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005780 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07005781 return 0;
5782}
5783
5784static int
5785bnx2_resume(struct pci_dev *pdev)
5786{
5787 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005788 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005789
5790 if (!netif_running(dev))
5791 return 0;
5792
Pavel Machek829ca9a2005-09-03 15:56:56 -07005793 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005794 netif_device_attach(dev);
5795 bnx2_init_nic(bp);
5796 bnx2_netif_start(bp);
5797 return 0;
5798}
5799
5800static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005801 .name = DRV_MODULE_NAME,
5802 .id_table = bnx2_pci_tbl,
5803 .probe = bnx2_init_one,
5804 .remove = __devexit_p(bnx2_remove_one),
5805 .suspend = bnx2_suspend,
5806 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07005807};
5808
5809static int __init bnx2_init(void)
5810{
5811 return pci_module_init(&bnx2_pci_driver);
5812}
5813
5814static void __exit bnx2_cleanup(void)
5815{
5816 pci_unregister_driver(&bnx2_pci_driver);
5817}
5818
5819module_init(bnx2_init);
5820module_exit(bnx2_cleanup);
5821
5822
5823