blob: eb0c4f1d448313fea0db44ab62d2c85602f64d4e [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan68c9f752007-04-24 15:35:53 -070057#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chanb6016b72005-05-26 13:03:09 -070087} board_t;
88
89/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050090static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070091 char *name;
92} board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080098 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chanb6016b72005-05-26 13:03:09 -0700101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chanb6016b72005-05-26 13:03:09 -0700120 { 0, }
121};
122
123static struct flash_spec flash_table[] =
124{
125 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700127 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700132 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800133 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700138 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700144 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0100"},
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800154 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167 "Non-buffered flash (64kB)"},
168 /* Fast EEPROM */
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 "EEPROM - fast"},
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 1001"},
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 "Entry 1010"},
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 "Entry 1100"},
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 "Entry 1101"},
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700208};
209
210MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
Michael Chane89bbf12005-08-25 15:36:58 -0700212static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213{
Michael Chan2f8af122006-08-15 01:39:10 -0700214 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700215
Michael Chan2f8af122006-08-15 01:39:10 -0700216 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800217
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
220 */
221 diff = bp->tx_prod - bp->tx_cons;
222 if (unlikely(diff >= TX_DESC_CNT)) {
223 diff &= 0xffff;
224 if (diff == TX_DESC_CNT)
225 diff = MAX_TX_DESC_CNT;
226 }
Michael Chane89bbf12005-08-25 15:36:58 -0700227 return (bp->tx_ring_size - diff);
228}
229
Michael Chanb6016b72005-05-26 13:03:09 -0700230static u32
231bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232{
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235}
236
237static void
238bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239{
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242}
243
244static void
245bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246{
247 offset += cid_addr;
Michael Chan59b47d82006-11-19 14:10:45 -0800248 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249 int i;
250
251 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254 for (i = 0; i < 5; i++) {
255 u32 val;
256 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258 break;
259 udelay(5);
260 }
261 } else {
262 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263 REG_WR(bp, BNX2_CTX_DATA, val);
264 }
Michael Chanb6016b72005-05-26 13:03:09 -0700265}
266
267static int
268bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269{
270 u32 val1;
271 int i, ret;
272
273 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280 udelay(40);
281 }
282
283 val1 = (bp->phy_addr << 21) | (reg << 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285 BNX2_EMAC_MDIO_COMM_START_BUSY;
286 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288 for (i = 0; i < 50; i++) {
289 udelay(10);
290
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293 udelay(5);
294
295 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298 break;
299 }
300 }
301
302 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303 *val = 0x0;
304 ret = -EBUSY;
305 }
306 else {
307 *val = val1;
308 ret = 0;
309 }
310
311 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318 udelay(40);
319 }
320
321 return ret;
322}
323
324static int
325bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326{
327 u32 val1;
328 int i, ret;
329
330 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337 udelay(40);
338 }
339
340 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400344
Michael Chanb6016b72005-05-26 13:03:09 -0700345 for (i = 0; i < 50; i++) {
346 udelay(10);
347
348 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350 udelay(5);
351 break;
352 }
353 }
354
355 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356 ret = -EBUSY;
357 else
358 ret = 0;
359
360 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367 udelay(40);
368 }
369
370 return ret;
371}
372
373static void
374bnx2_disable_int(struct bnx2 *bp)
375{
376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379}
380
381static void
382bnx2_enable_int(struct bnx2 *bp)
383{
Michael Chanb6016b72005-05-26 13:03:09 -0700384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
Michael Chanbf5295b2006-03-23 01:11:56 -0800391 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700392}
393
394static void
395bnx2_disable_int_sync(struct bnx2 *bp)
396{
397 atomic_inc(&bp->intr_sem);
398 bnx2_disable_int(bp);
399 synchronize_irq(bp->pdev->irq);
400}
401
402static void
403bnx2_netif_stop(struct bnx2 *bp)
404{
405 bnx2_disable_int_sync(bp);
406 if (netif_running(bp->dev)) {
407 netif_poll_disable(bp->dev);
408 netif_tx_disable(bp->dev);
409 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410 }
411}
412
413static void
414bnx2_netif_start(struct bnx2 *bp)
415{
416 if (atomic_dec_and_test(&bp->intr_sem)) {
417 if (netif_running(bp->dev)) {
418 netif_wake_queue(bp->dev);
419 netif_poll_enable(bp->dev);
420 bnx2_enable_int(bp);
421 }
422 }
423}
424
425static void
426bnx2_free_mem(struct bnx2 *bp)
427{
Michael Chan13daffa2006-03-20 17:49:20 -0800428 int i;
429
Michael Chan59b47d82006-11-19 14:10:45 -0800430 for (i = 0; i < bp->ctx_pages; i++) {
431 if (bp->ctx_blk[i]) {
432 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433 bp->ctx_blk[i],
434 bp->ctx_blk_mapping[i]);
435 bp->ctx_blk[i] = NULL;
436 }
437 }
Michael Chanb6016b72005-05-26 13:03:09 -0700438 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800439 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700440 bp->status_blk, bp->status_blk_mapping);
441 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800442 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700443 }
444 if (bp->tx_desc_ring) {
445 pci_free_consistent(bp->pdev,
446 sizeof(struct tx_bd) * TX_DESC_CNT,
447 bp->tx_desc_ring, bp->tx_desc_mapping);
448 bp->tx_desc_ring = NULL;
449 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400450 kfree(bp->tx_buf_ring);
451 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800452 for (i = 0; i < bp->rx_max_ring; i++) {
453 if (bp->rx_desc_ring[i])
454 pci_free_consistent(bp->pdev,
455 sizeof(struct rx_bd) * RX_DESC_CNT,
456 bp->rx_desc_ring[i],
457 bp->rx_desc_mapping[i]);
458 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700459 }
Michael Chan13daffa2006-03-20 17:49:20 -0800460 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400461 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700462}
463
464static int
465bnx2_alloc_mem(struct bnx2 *bp)
466{
Michael Chan0f31f992006-03-23 01:12:38 -0800467 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800468
Michael Chan0f31f992006-03-23 01:12:38 -0800469 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700471 if (bp->tx_buf_ring == NULL)
472 return -ENOMEM;
473
Michael Chanb6016b72005-05-26 13:03:09 -0700474 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475 sizeof(struct tx_bd) *
476 TX_DESC_CNT,
477 &bp->tx_desc_mapping);
478 if (bp->tx_desc_ring == NULL)
479 goto alloc_mem_err;
480
Michael Chan13daffa2006-03-20 17:49:20 -0800481 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700483 if (bp->rx_buf_ring == NULL)
484 goto alloc_mem_err;
485
Michael Chan13daffa2006-03-20 17:49:20 -0800486 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487 bp->rx_max_ring);
488
489 for (i = 0; i < bp->rx_max_ring; i++) {
490 bp->rx_desc_ring[i] =
491 pci_alloc_consistent(bp->pdev,
492 sizeof(struct rx_bd) * RX_DESC_CNT,
493 &bp->rx_desc_mapping[i]);
494 if (bp->rx_desc_ring[i] == NULL)
495 goto alloc_mem_err;
496
497 }
Michael Chanb6016b72005-05-26 13:03:09 -0700498
Michael Chan0f31f992006-03-23 01:12:38 -0800499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501 bp->status_stats_size = status_blk_size +
502 sizeof(struct statistics_block);
503
504 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700505 &bp->status_blk_mapping);
506 if (bp->status_blk == NULL)
507 goto alloc_mem_err;
508
Michael Chan0f31f992006-03-23 01:12:38 -0800509 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700510
Michael Chan0f31f992006-03-23 01:12:38 -0800511 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700513
Michael Chan0f31f992006-03-23 01:12:38 -0800514 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700515
Michael Chan59b47d82006-11-19 14:10:45 -0800516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518 if (bp->ctx_pages == 0)
519 bp->ctx_pages = 1;
520 for (i = 0; i < bp->ctx_pages; i++) {
521 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522 BCM_PAGE_SIZE,
523 &bp->ctx_blk_mapping[i]);
524 if (bp->ctx_blk[i] == NULL)
525 goto alloc_mem_err;
526 }
527 }
Michael Chanb6016b72005-05-26 13:03:09 -0700528 return 0;
529
530alloc_mem_err:
531 bnx2_free_mem(bp);
532 return -ENOMEM;
533}
534
535static void
Michael Chane3648b32005-11-04 08:51:21 -0800536bnx2_report_fw_link(struct bnx2 *bp)
537{
538 u32 fw_link_status = 0;
539
540 if (bp->link_up) {
541 u32 bmsr;
542
543 switch (bp->line_speed) {
544 case SPEED_10:
545 if (bp->duplex == DUPLEX_HALF)
546 fw_link_status = BNX2_LINK_STATUS_10HALF;
547 else
548 fw_link_status = BNX2_LINK_STATUS_10FULL;
549 break;
550 case SPEED_100:
551 if (bp->duplex == DUPLEX_HALF)
552 fw_link_status = BNX2_LINK_STATUS_100HALF;
553 else
554 fw_link_status = BNX2_LINK_STATUS_100FULL;
555 break;
556 case SPEED_1000:
557 if (bp->duplex == DUPLEX_HALF)
558 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559 else
560 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561 break;
562 case SPEED_2500:
563 if (bp->duplex == DUPLEX_HALF)
564 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565 else
566 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567 break;
568 }
569
570 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572 if (bp->autoneg) {
573 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
575 bnx2_read_phy(bp, MII_BMSR, &bmsr);
576 bnx2_read_phy(bp, MII_BMSR, &bmsr);
577
578 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581 else
582 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583 }
584 }
585 else
586 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589}
590
591static void
Michael Chanb6016b72005-05-26 13:03:09 -0700592bnx2_report_link(struct bnx2 *bp)
593{
594 if (bp->link_up) {
595 netif_carrier_on(bp->dev);
596 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598 printk("%d Mbps ", bp->line_speed);
599
600 if (bp->duplex == DUPLEX_FULL)
601 printk("full duplex");
602 else
603 printk("half duplex");
604
605 if (bp->flow_ctrl) {
606 if (bp->flow_ctrl & FLOW_CTRL_RX) {
607 printk(", receive ");
608 if (bp->flow_ctrl & FLOW_CTRL_TX)
609 printk("& transmit ");
610 }
611 else {
612 printk(", transmit ");
613 }
614 printk("flow control ON");
615 }
616 printk("\n");
617 }
618 else {
619 netif_carrier_off(bp->dev);
620 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621 }
Michael Chane3648b32005-11-04 08:51:21 -0800622
623 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700624}
625
626static void
627bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628{
629 u32 local_adv, remote_adv;
630
631 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400632 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700633 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635 if (bp->duplex == DUPLEX_FULL) {
636 bp->flow_ctrl = bp->req_flow_ctrl;
637 }
638 return;
639 }
640
641 if (bp->duplex != DUPLEX_FULL) {
642 return;
643 }
644
Michael Chan5b0c76a2005-11-04 08:45:49 -0800645 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647 u32 val;
648
649 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651 bp->flow_ctrl |= FLOW_CTRL_TX;
652 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653 bp->flow_ctrl |= FLOW_CTRL_RX;
654 return;
655 }
656
Michael Chanb6016b72005-05-26 13:03:09 -0700657 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
658 bnx2_read_phy(bp, MII_LPA, &remote_adv);
659
660 if (bp->phy_flags & PHY_SERDES_FLAG) {
661 u32 new_local_adv = 0;
662 u32 new_remote_adv = 0;
663
664 if (local_adv & ADVERTISE_1000XPAUSE)
665 new_local_adv |= ADVERTISE_PAUSE_CAP;
666 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667 new_local_adv |= ADVERTISE_PAUSE_ASYM;
668 if (remote_adv & ADVERTISE_1000XPAUSE)
669 new_remote_adv |= ADVERTISE_PAUSE_CAP;
670 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673 local_adv = new_local_adv;
674 remote_adv = new_remote_adv;
675 }
676
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv & ADVERTISE_PAUSE_CAP) {
679 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682 }
683 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684 bp->flow_ctrl = FLOW_CTRL_RX;
685 }
686 }
687 else {
688 if (remote_adv & ADVERTISE_PAUSE_CAP) {
689 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690 }
691 }
692 }
693 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697 bp->flow_ctrl = FLOW_CTRL_TX;
698 }
699 }
700}
701
702static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800703bnx2_5708s_linkup(struct bnx2 *bp)
704{
705 u32 val;
706
707 bp->link_up = 1;
708 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710 case BCM5708S_1000X_STAT1_SPEED_10:
711 bp->line_speed = SPEED_10;
712 break;
713 case BCM5708S_1000X_STAT1_SPEED_100:
714 bp->line_speed = SPEED_100;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_1G:
717 bp->line_speed = SPEED_1000;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_2G5:
720 bp->line_speed = SPEED_2500;
721 break;
722 }
723 if (val & BCM5708S_1000X_STAT1_FD)
724 bp->duplex = DUPLEX_FULL;
725 else
726 bp->duplex = DUPLEX_HALF;
727
728 return 0;
729}
730
731static int
732bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700733{
734 u32 bmcr, local_adv, remote_adv, common;
735
736 bp->link_up = 1;
737 bp->line_speed = SPEED_1000;
738
739 bnx2_read_phy(bp, MII_BMCR, &bmcr);
740 if (bmcr & BMCR_FULLDPLX) {
741 bp->duplex = DUPLEX_FULL;
742 }
743 else {
744 bp->duplex = DUPLEX_HALF;
745 }
746
747 if (!(bmcr & BMCR_ANENABLE)) {
748 return 0;
749 }
750
751 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
752 bnx2_read_phy(bp, MII_LPA, &remote_adv);
753
754 common = local_adv & remote_adv;
755 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757 if (common & ADVERTISE_1000XFULL) {
758 bp->duplex = DUPLEX_FULL;
759 }
760 else {
761 bp->duplex = DUPLEX_HALF;
762 }
763 }
764
765 return 0;
766}
767
768static int
769bnx2_copper_linkup(struct bnx2 *bp)
770{
771 u32 bmcr;
772
773 bnx2_read_phy(bp, MII_BMCR, &bmcr);
774 if (bmcr & BMCR_ANENABLE) {
775 u32 local_adv, remote_adv, common;
776
777 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780 common = local_adv & (remote_adv >> 2);
781 if (common & ADVERTISE_1000FULL) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_FULL;
784 }
785 else if (common & ADVERTISE_1000HALF) {
786 bp->line_speed = SPEED_1000;
787 bp->duplex = DUPLEX_HALF;
788 }
789 else {
790 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
791 bnx2_read_phy(bp, MII_LPA, &remote_adv);
792
793 common = local_adv & remote_adv;
794 if (common & ADVERTISE_100FULL) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_FULL;
797 }
798 else if (common & ADVERTISE_100HALF) {
799 bp->line_speed = SPEED_100;
800 bp->duplex = DUPLEX_HALF;
801 }
802 else if (common & ADVERTISE_10FULL) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_FULL;
805 }
806 else if (common & ADVERTISE_10HALF) {
807 bp->line_speed = SPEED_10;
808 bp->duplex = DUPLEX_HALF;
809 }
810 else {
811 bp->line_speed = 0;
812 bp->link_up = 0;
813 }
814 }
815 }
816 else {
817 if (bmcr & BMCR_SPEED100) {
818 bp->line_speed = SPEED_100;
819 }
820 else {
821 bp->line_speed = SPEED_10;
822 }
823 if (bmcr & BMCR_FULLDPLX) {
824 bp->duplex = DUPLEX_FULL;
825 }
826 else {
827 bp->duplex = DUPLEX_HALF;
828 }
829 }
830
831 return 0;
832}
833
834static int
835bnx2_set_mac_link(struct bnx2 *bp)
836{
837 u32 val;
838
839 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841 (bp->duplex == DUPLEX_HALF)) {
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843 }
844
845 /* Configure the EMAC mode register. */
846 val = REG_RD(bp, BNX2_EMAC_MODE);
847
848 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800849 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800850 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700851
852 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800853 switch (bp->line_speed) {
854 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800855 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800857 break;
858 }
859 /* fall through */
860 case SPEED_100:
861 val |= BNX2_EMAC_MODE_PORT_MII;
862 break;
863 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800864 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800865 /* fall through */
866 case SPEED_1000:
867 val |= BNX2_EMAC_MODE_PORT_GMII;
868 break;
869 }
Michael Chanb6016b72005-05-26 13:03:09 -0700870 }
871 else {
872 val |= BNX2_EMAC_MODE_PORT_GMII;
873 }
874
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp->duplex == DUPLEX_HALF)
877 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878 REG_WR(bp, BNX2_EMAC_MODE, val);
879
880 /* Enable/disable rx PAUSE. */
881 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883 if (bp->flow_ctrl & FLOW_CTRL_RX)
884 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887 /* Enable/disable tx PAUSE. */
888 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891 if (bp->flow_ctrl & FLOW_CTRL_TX)
892 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895 /* Acknowledge the interrupt. */
896 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898 return 0;
899}
900
901static int
902bnx2_set_link(struct bnx2 *bp)
903{
904 u32 bmsr;
905 u8 link_up;
906
Michael Chan80be4432006-11-19 14:07:28 -0800907 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -0700908 bp->link_up = 1;
909 return 0;
910 }
911
912 link_up = bp->link_up;
913
914 bnx2_read_phy(bp, MII_BMSR, &bmsr);
915 bnx2_read_phy(bp, MII_BMSR, &bmsr);
916
917 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919 u32 val;
920
921 val = REG_RD(bp, BNX2_EMAC_STATUS);
922 if (val & BNX2_EMAC_STATUS_LINK)
923 bmsr |= BMSR_LSTATUS;
924 else
925 bmsr &= ~BMSR_LSTATUS;
926 }
927
928 if (bmsr & BMSR_LSTATUS) {
929 bp->link_up = 1;
930
931 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800932 if (CHIP_NUM(bp) == CHIP_NUM_5706)
933 bnx2_5706s_linkup(bp);
934 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700936 }
937 else {
938 bnx2_copper_linkup(bp);
939 }
940 bnx2_resolve_flow_ctrl(bp);
941 }
942 else {
943 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944 (bp->autoneg & AUTONEG_SPEED)) {
945
946 u32 bmcr;
947
948 bnx2_read_phy(bp, MII_BMCR, &bmcr);
Michael Chan80be4432006-11-19 14:07:28 -0800949 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
Michael Chanb6016b72005-05-26 13:03:09 -0700950 if (!(bmcr & BMCR_ANENABLE)) {
951 bnx2_write_phy(bp, MII_BMCR, bmcr |
952 BMCR_ANENABLE);
953 }
954 }
955 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956 bp->link_up = 0;
957 }
958
959 if (bp->link_up != link_up) {
960 bnx2_report_link(bp);
961 }
962
963 bnx2_set_mac_link(bp);
964
965 return 0;
966}
967
968static int
969bnx2_reset_phy(struct bnx2 *bp)
970{
971 int i;
972 u32 reg;
973
974 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
975
976#define PHY_RESET_MAX_WAIT 100
977 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978 udelay(10);
979
980 bnx2_read_phy(bp, MII_BMCR, &reg);
981 if (!(reg & BMCR_RESET)) {
982 udelay(20);
983 break;
984 }
985 }
986 if (i == PHY_RESET_MAX_WAIT) {
987 return -EBUSY;
988 }
989 return 0;
990}
991
992static u32
993bnx2_phy_get_pause_adv(struct bnx2 *bp)
994{
995 u32 adv = 0;
996
997 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
999
1000 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001 adv = ADVERTISE_1000XPAUSE;
1002 }
1003 else {
1004 adv = ADVERTISE_PAUSE_CAP;
1005 }
1006 }
1007 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009 adv = ADVERTISE_1000XPSE_ASYM;
1010 }
1011 else {
1012 adv = ADVERTISE_PAUSE_ASYM;
1013 }
1014 }
1015 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1018 }
1019 else {
1020 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1021 }
1022 }
1023 return adv;
1024}
1025
1026static int
1027bnx2_setup_serdes_phy(struct bnx2 *bp)
1028{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001029 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -07001030 u32 new_adv = 0;
1031
1032 if (!(bp->autoneg & AUTONEG_SPEED)) {
1033 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001034 int force_link_down = 0;
1035
Michael Chan80be4432006-11-19 14:07:28 -08001036 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1037 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1038
1039 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1040 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041 new_bmcr |= BMCR_SPEED1000;
1042 if (bp->req_line_speed == SPEED_2500) {
1043 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 if (!(up1 & BCM5708S_UP1_2G5)) {
1046 up1 |= BCM5708S_UP1_2G5;
1047 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048 force_link_down = 1;
1049 }
1050 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001051 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052 if (up1 & BCM5708S_UP1_2G5) {
1053 up1 &= ~BCM5708S_UP1_2G5;
1054 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055 force_link_down = 1;
1056 }
1057 }
1058
Michael Chanb6016b72005-05-26 13:03:09 -07001059 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001060 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001061 new_bmcr |= BMCR_FULLDPLX;
1062 }
1063 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001064 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001065 new_bmcr &= ~BMCR_FULLDPLX;
1066 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001067 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001068 /* Force a link down visible on the other side */
1069 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001070 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1071 ~(ADVERTISE_1000XFULL |
1072 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -07001073 bnx2_write_phy(bp, MII_BMCR, bmcr |
1074 BMCR_ANRESTART | BMCR_ANENABLE);
1075
1076 bp->link_up = 0;
1077 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001078 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001079 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001080 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001081 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -07001082 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1083 }
1084 return 0;
1085 }
1086
Michael Chan5b0c76a2005-11-04 08:45:49 -08001087 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089 up1 |= BCM5708S_UP1_2G5;
1090 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1091 }
1092
Michael Chanb6016b72005-05-26 13:03:09 -07001093 if (bp->advertising & ADVERTISED_1000baseT_Full)
1094 new_adv |= ADVERTISE_1000XFULL;
1095
1096 new_adv |= bnx2_phy_get_pause_adv(bp);
1097
1098 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101 bp->serdes_an_pending = 0;
1102 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103 /* Force a link down visible on the other side */
1104 if (bp->link_up) {
Michael Chanb6016b72005-05-26 13:03:09 -07001105 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001106 spin_unlock_bh(&bp->phy_lock);
1107 msleep(20);
1108 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001109 }
1110
1111 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1112 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1113 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001114 /* Speed up link-up time when the link partner
1115 * does not autonegotiate which is very common
1116 * in blade servers. Some blade servers use
1117 * IPMI for kerboard input and it's important
1118 * to minimize link disruptions. Autoneg. involves
1119 * exchanging base pages plus 3 next pages and
1120 * normally completes in about 120 msec.
1121 */
1122 bp->current_interval = SERDES_AN_TIMEOUT;
1123 bp->serdes_an_pending = 1;
1124 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07001125 }
1126
1127 return 0;
1128}
1129
1130#define ETHTOOL_ALL_FIBRE_SPEED \
1131 (ADVERTISED_1000baseT_Full)
1132
1133#define ETHTOOL_ALL_COPPER_SPEED \
1134 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1135 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1136 ADVERTISED_1000baseT_Full)
1137
1138#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001140
Michael Chanb6016b72005-05-26 13:03:09 -07001141#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1142
1143static int
1144bnx2_setup_copper_phy(struct bnx2 *bp)
1145{
1146 u32 bmcr;
1147 u32 new_bmcr;
1148
1149 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1150
1151 if (bp->autoneg & AUTONEG_SPEED) {
1152 u32 adv_reg, adv1000_reg;
1153 u32 new_adv_reg = 0;
1154 u32 new_adv1000_reg = 0;
1155
1156 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1157 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158 ADVERTISE_PAUSE_ASYM);
1159
1160 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161 adv1000_reg &= PHY_ALL_1000_SPEED;
1162
1163 if (bp->advertising & ADVERTISED_10baseT_Half)
1164 new_adv_reg |= ADVERTISE_10HALF;
1165 if (bp->advertising & ADVERTISED_10baseT_Full)
1166 new_adv_reg |= ADVERTISE_10FULL;
1167 if (bp->advertising & ADVERTISED_100baseT_Half)
1168 new_adv_reg |= ADVERTISE_100HALF;
1169 if (bp->advertising & ADVERTISED_100baseT_Full)
1170 new_adv_reg |= ADVERTISE_100FULL;
1171 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001173
Michael Chanb6016b72005-05-26 13:03:09 -07001174 new_adv_reg |= ADVERTISE_CSMA;
1175
1176 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1177
1178 if ((adv1000_reg != new_adv1000_reg) ||
1179 (adv_reg != new_adv_reg) ||
1180 ((bmcr & BMCR_ANENABLE) == 0)) {
1181
1182 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1183 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1184 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1185 BMCR_ANENABLE);
1186 }
1187 else if (bp->link_up) {
1188 /* Flow ctrl may have changed from auto to forced */
1189 /* or vice-versa. */
1190
1191 bnx2_resolve_flow_ctrl(bp);
1192 bnx2_set_mac_link(bp);
1193 }
1194 return 0;
1195 }
1196
1197 new_bmcr = 0;
1198 if (bp->req_line_speed == SPEED_100) {
1199 new_bmcr |= BMCR_SPEED100;
1200 }
1201 if (bp->req_duplex == DUPLEX_FULL) {
1202 new_bmcr |= BMCR_FULLDPLX;
1203 }
1204 if (new_bmcr != bmcr) {
1205 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001206
1207 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001209
Michael Chanb6016b72005-05-26 13:03:09 -07001210 if (bmsr & BMSR_LSTATUS) {
1211 /* Force link down */
1212 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001213 spin_unlock_bh(&bp->phy_lock);
1214 msleep(50);
1215 spin_lock_bh(&bp->phy_lock);
1216
1217 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1218 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001219 }
1220
1221 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1222
1223 /* Normally, the new speed is setup after the link has
1224 * gone down and up again. In some cases, link will not go
1225 * down so we need to set up the new speed here.
1226 */
1227 if (bmsr & BMSR_LSTATUS) {
1228 bp->line_speed = bp->req_line_speed;
1229 bp->duplex = bp->req_duplex;
1230 bnx2_resolve_flow_ctrl(bp);
1231 bnx2_set_mac_link(bp);
1232 }
1233 }
1234 return 0;
1235}
1236
1237static int
1238bnx2_setup_phy(struct bnx2 *bp)
1239{
1240 if (bp->loopback == MAC_LOOPBACK)
1241 return 0;
1242
1243 if (bp->phy_flags & PHY_SERDES_FLAG) {
1244 return (bnx2_setup_serdes_phy(bp));
1245 }
1246 else {
1247 return (bnx2_setup_copper_phy(bp));
1248 }
1249}
1250
1251static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001252bnx2_init_5708s_phy(struct bnx2 *bp)
1253{
1254 u32 val;
1255
1256 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1259
1260 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1263
1264 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1267
1268 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270 val |= BCM5708S_UP1_2G5;
1271 bnx2_write_phy(bp, BCM5708S_UP1, val);
1272 }
1273
1274 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001275 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001277 /* increase tx signal amplitude */
1278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279 BCM5708S_BLK_ADDR_TX_MISC);
1280 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1284 }
1285
Michael Chane3648b32005-11-04 08:51:21 -08001286 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001287 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288
1289 if (val) {
1290 u32 is_backplane;
1291
Michael Chane3648b32005-11-04 08:51:21 -08001292 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001293 BNX2_SHARED_HW_CFG_CONFIG);
1294 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296 BCM5708S_BLK_ADDR_TX_MISC);
1297 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_DIG);
1300 }
1301 }
1302 return 0;
1303}
1304
1305static int
1306bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001307{
1308 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1309
Michael Chan59b47d82006-11-19 14:10:45 -08001310 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001312
1313 if (bp->dev->mtu > 1500) {
1314 u32 val;
1315
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1320
1321 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322 bnx2_read_phy(bp, 0x1c, &val);
1323 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324 }
1325 else {
1326 u32 val;
1327
1328 bnx2_write_phy(bp, 0x18, 0x7);
1329 bnx2_read_phy(bp, 0x18, &val);
1330 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333 bnx2_read_phy(bp, 0x1c, &val);
1334 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1335 }
1336
1337 return 0;
1338}
1339
1340static int
1341bnx2_init_copper_phy(struct bnx2 *bp)
1342{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001343 u32 val;
1344
Michael Chanb6016b72005-05-26 13:03:09 -07001345 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346 bnx2_write_phy(bp, 0x18, 0x0c00);
1347 bnx2_write_phy(bp, 0x17, 0x000a);
1348 bnx2_write_phy(bp, 0x15, 0x310b);
1349 bnx2_write_phy(bp, 0x17, 0x201f);
1350 bnx2_write_phy(bp, 0x15, 0x9506);
1351 bnx2_write_phy(bp, 0x17, 0x401f);
1352 bnx2_write_phy(bp, 0x15, 0x14e2);
1353 bnx2_write_phy(bp, 0x18, 0x0400);
1354 }
1355
Michael Chanb659f442007-02-02 00:46:35 -08001356 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358 MII_BNX2_DSP_EXPAND_REG | 0x8);
1359 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360 val &= ~(1 << 8);
1361 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362 }
1363
Michael Chanb6016b72005-05-26 13:03:09 -07001364 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001365 /* Set extended packet length bit */
1366 bnx2_write_phy(bp, 0x18, 0x7);
1367 bnx2_read_phy(bp, 0x18, &val);
1368 bnx2_write_phy(bp, 0x18, val | 0x4000);
1369
1370 bnx2_read_phy(bp, 0x10, &val);
1371 bnx2_write_phy(bp, 0x10, val | 0x1);
1372 }
1373 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001374 bnx2_write_phy(bp, 0x18, 0x7);
1375 bnx2_read_phy(bp, 0x18, &val);
1376 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1377
1378 bnx2_read_phy(bp, 0x10, &val);
1379 bnx2_write_phy(bp, 0x10, val & ~0x1);
1380 }
1381
Michael Chan5b0c76a2005-11-04 08:45:49 -08001382 /* ethernet@wirespeed */
1383 bnx2_write_phy(bp, 0x18, 0x7007);
1384 bnx2_read_phy(bp, 0x18, &val);
1385 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001386 return 0;
1387}
1388
1389
1390static int
1391bnx2_init_phy(struct bnx2 *bp)
1392{
1393 u32 val;
1394 int rc = 0;
1395
1396 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1398
1399 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1400
1401 bnx2_reset_phy(bp);
1402
1403 bnx2_read_phy(bp, MII_PHYSID1, &val);
1404 bp->phy_id = val << 16;
1405 bnx2_read_phy(bp, MII_PHYSID2, &val);
1406 bp->phy_id |= val & 0xffff;
1407
1408 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001409 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1410 rc = bnx2_init_5706s_phy(bp);
1411 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1412 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001413 }
1414 else {
1415 rc = bnx2_init_copper_phy(bp);
1416 }
1417
1418 bnx2_setup_phy(bp);
1419
1420 return rc;
1421}
1422
1423static int
1424bnx2_set_mac_loopback(struct bnx2 *bp)
1425{
1426 u32 mac_mode;
1427
1428 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1429 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1430 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1431 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1432 bp->link_up = 1;
1433 return 0;
1434}
1435
Michael Chanbc5a0692006-01-23 16:13:22 -08001436static int bnx2_test_link(struct bnx2 *);
1437
1438static int
1439bnx2_set_phy_loopback(struct bnx2 *bp)
1440{
1441 u32 mac_mode;
1442 int rc, i;
1443
1444 spin_lock_bh(&bp->phy_lock);
1445 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1446 BMCR_SPEED1000);
1447 spin_unlock_bh(&bp->phy_lock);
1448 if (rc)
1449 return rc;
1450
1451 for (i = 0; i < 10; i++) {
1452 if (bnx2_test_link(bp) == 0)
1453 break;
Michael Chan80be4432006-11-19 14:07:28 -08001454 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001455 }
1456
1457 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1458 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1459 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001460 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001461
1462 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1463 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464 bp->link_up = 1;
1465 return 0;
1466}
1467
Michael Chanb6016b72005-05-26 13:03:09 -07001468static int
Michael Chanb090ae22006-01-23 16:07:10 -08001469bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001470{
1471 int i;
1472 u32 val;
1473
Michael Chanb6016b72005-05-26 13:03:09 -07001474 bp->fw_wr_seq++;
1475 msg_data |= bp->fw_wr_seq;
1476
Michael Chane3648b32005-11-04 08:51:21 -08001477 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001478
1479 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001480 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1481 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001482
Michael Chane3648b32005-11-04 08:51:21 -08001483 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001484
1485 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1486 break;
1487 }
Michael Chanb090ae22006-01-23 16:07:10 -08001488 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1489 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001490
1491 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001492 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1493 if (!silent)
1494 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1495 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001496
1497 msg_data &= ~BNX2_DRV_MSG_CODE;
1498 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1499
Michael Chane3648b32005-11-04 08:51:21 -08001500 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001501
Michael Chanb6016b72005-05-26 13:03:09 -07001502 return -EBUSY;
1503 }
1504
Michael Chanb090ae22006-01-23 16:07:10 -08001505 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1506 return -EIO;
1507
Michael Chanb6016b72005-05-26 13:03:09 -07001508 return 0;
1509}
1510
Michael Chan59b47d82006-11-19 14:10:45 -08001511static int
1512bnx2_init_5709_context(struct bnx2 *bp)
1513{
1514 int i, ret = 0;
1515 u32 val;
1516
1517 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1518 val |= (BCM_PAGE_BITS - 8) << 16;
1519 REG_WR(bp, BNX2_CTX_COMMAND, val);
1520 for (i = 0; i < bp->ctx_pages; i++) {
1521 int j;
1522
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1524 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1527 (u64) bp->ctx_blk_mapping[i] >> 32);
1528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1530 for (j = 0; j < 10; j++) {
1531
1532 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1533 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534 break;
1535 udelay(5);
1536 }
1537 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1538 ret = -EBUSY;
1539 break;
1540 }
1541 }
1542 return ret;
1543}
1544
Michael Chanb6016b72005-05-26 13:03:09 -07001545static void
1546bnx2_init_context(struct bnx2 *bp)
1547{
1548 u32 vcid;
1549
1550 vcid = 96;
1551 while (vcid) {
1552 u32 vcid_addr, pcid_addr, offset;
1553
1554 vcid--;
1555
1556 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1557 u32 new_vcid;
1558
1559 vcid_addr = GET_PCID_ADDR(vcid);
1560 if (vcid & 0x8) {
1561 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1562 }
1563 else {
1564 new_vcid = vcid;
1565 }
1566 pcid_addr = GET_PCID_ADDR(new_vcid);
1567 }
1568 else {
1569 vcid_addr = GET_CID_ADDR(vcid);
1570 pcid_addr = vcid_addr;
1571 }
1572
1573 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1574 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1575
1576 /* Zero out the context. */
1577 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1578 CTX_WR(bp, 0x00, offset, 0);
1579 }
1580
1581 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1582 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1583 }
1584}
1585
1586static int
1587bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1588{
1589 u16 *good_mbuf;
1590 u32 good_mbuf_cnt;
1591 u32 val;
1592
1593 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1594 if (good_mbuf == NULL) {
1595 printk(KERN_ERR PFX "Failed to allocate memory in "
1596 "bnx2_alloc_bad_rbuf\n");
1597 return -ENOMEM;
1598 }
1599
1600 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1601 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1602
1603 good_mbuf_cnt = 0;
1604
1605 /* Allocate a bunch of mbufs and save the good ones in an array. */
1606 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1608 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1609
1610 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1611
1612 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1613
1614 /* The addresses with Bit 9 set are bad memory blocks. */
1615 if (!(val & (1 << 9))) {
1616 good_mbuf[good_mbuf_cnt] = (u16) val;
1617 good_mbuf_cnt++;
1618 }
1619
1620 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1621 }
1622
1623 /* Free the good ones back to the mbuf pool thus discarding
1624 * all the bad ones. */
1625 while (good_mbuf_cnt) {
1626 good_mbuf_cnt--;
1627
1628 val = good_mbuf[good_mbuf_cnt];
1629 val = (val << 9) | val | 1;
1630
1631 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1632 }
1633 kfree(good_mbuf);
1634 return 0;
1635}
1636
1637static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001638bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001639{
1640 u32 val;
1641 u8 *mac_addr = bp->dev->dev_addr;
1642
1643 val = (mac_addr[0] << 8) | mac_addr[1];
1644
1645 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1646
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001647 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001648 (mac_addr[4] << 8) | mac_addr[5];
1649
1650 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1651}
1652
1653static inline int
1654bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1655{
1656 struct sk_buff *skb;
1657 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1658 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001659 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001660 unsigned long align;
1661
Michael Chan932f3772006-08-15 01:39:36 -07001662 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001663 if (skb == NULL) {
1664 return -ENOMEM;
1665 }
1666
Michael Chan59b47d82006-11-19 14:10:45 -08001667 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1668 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001669
Michael Chanb6016b72005-05-26 13:03:09 -07001670 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1671 PCI_DMA_FROMDEVICE);
1672
1673 rx_buf->skb = skb;
1674 pci_unmap_addr_set(rx_buf, mapping, mapping);
1675
1676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1678
1679 bp->rx_prod_bseq += bp->rx_buf_use_size;
1680
1681 return 0;
1682}
1683
1684static void
1685bnx2_phy_int(struct bnx2 *bp)
1686{
1687 u32 new_link_state, old_link_state;
1688
1689 new_link_state = bp->status_blk->status_attn_bits &
1690 STATUS_ATTN_BITS_LINK_STATE;
1691 old_link_state = bp->status_blk->status_attn_bits_ack &
1692 STATUS_ATTN_BITS_LINK_STATE;
1693 if (new_link_state != old_link_state) {
1694 if (new_link_state) {
1695 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1696 STATUS_ATTN_BITS_LINK_STATE);
1697 }
1698 else {
1699 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1700 STATUS_ATTN_BITS_LINK_STATE);
1701 }
1702 bnx2_set_link(bp);
1703 }
1704}
1705
1706static void
1707bnx2_tx_int(struct bnx2 *bp)
1708{
Michael Chanf4e418f2005-11-04 08:53:48 -08001709 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001710 u16 hw_cons, sw_cons, sw_ring_cons;
1711 int tx_free_bd = 0;
1712
Michael Chanf4e418f2005-11-04 08:53:48 -08001713 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001714 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1715 hw_cons++;
1716 }
1717 sw_cons = bp->tx_cons;
1718
1719 while (sw_cons != hw_cons) {
1720 struct sw_bd *tx_buf;
1721 struct sk_buff *skb;
1722 int i, last;
1723
1724 sw_ring_cons = TX_RING_IDX(sw_cons);
1725
1726 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1727 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001728
Michael Chanb6016b72005-05-26 13:03:09 -07001729 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07001730 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001731 u16 last_idx, last_ring_idx;
1732
1733 last_idx = sw_cons +
1734 skb_shinfo(skb)->nr_frags + 1;
1735 last_ring_idx = sw_ring_cons +
1736 skb_shinfo(skb)->nr_frags + 1;
1737 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1738 last_idx++;
1739 }
1740 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1741 break;
1742 }
1743 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01001744
Michael Chanb6016b72005-05-26 13:03:09 -07001745 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1746 skb_headlen(skb), PCI_DMA_TODEVICE);
1747
1748 tx_buf->skb = NULL;
1749 last = skb_shinfo(skb)->nr_frags;
1750
1751 for (i = 0; i < last; i++) {
1752 sw_cons = NEXT_TX_BD(sw_cons);
1753
1754 pci_unmap_page(bp->pdev,
1755 pci_unmap_addr(
1756 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1757 mapping),
1758 skb_shinfo(skb)->frags[i].size,
1759 PCI_DMA_TODEVICE);
1760 }
1761
1762 sw_cons = NEXT_TX_BD(sw_cons);
1763
1764 tx_free_bd += last + 1;
1765
Michael Chan745720e2006-06-29 12:37:41 -07001766 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001767
Michael Chanf4e418f2005-11-04 08:53:48 -08001768 hw_cons = bp->hw_tx_cons =
1769 sblk->status_tx_quick_consumer_index0;
1770
Michael Chanb6016b72005-05-26 13:03:09 -07001771 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1772 hw_cons++;
1773 }
1774 }
1775
Michael Chane89bbf12005-08-25 15:36:58 -07001776 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07001777 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778 * before checking for netif_queue_stopped(). Without the
1779 * memory barrier, there is a small possibility that bnx2_start_xmit()
1780 * will miss it and cause the queue to be stopped forever.
1781 */
1782 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07001783
Michael Chan2f8af122006-08-15 01:39:10 -07001784 if (unlikely(netif_queue_stopped(bp->dev)) &&
1785 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1786 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001787 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07001788 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07001789 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07001790 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001791 }
Michael Chanb6016b72005-05-26 13:03:09 -07001792}
1793
1794static inline void
1795bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1796 u16 cons, u16 prod)
1797{
Michael Chan236b6392006-03-20 17:49:02 -08001798 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1799 struct rx_bd *cons_bd, *prod_bd;
1800
1801 cons_rx_buf = &bp->rx_buf_ring[cons];
1802 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07001803
1804 pci_dma_sync_single_for_device(bp->pdev,
1805 pci_unmap_addr(cons_rx_buf, mapping),
1806 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1807
Michael Chan236b6392006-03-20 17:49:02 -08001808 bp->rx_prod_bseq += bp->rx_buf_use_size;
1809
1810 prod_rx_buf->skb = skb;
1811
1812 if (cons == prod)
1813 return;
1814
Michael Chanb6016b72005-05-26 13:03:09 -07001815 pci_unmap_addr_set(prod_rx_buf, mapping,
1816 pci_unmap_addr(cons_rx_buf, mapping));
1817
Michael Chan3fdfcc22006-03-20 17:49:49 -08001818 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1819 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08001820 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1821 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07001822}
1823
1824static int
1825bnx2_rx_int(struct bnx2 *bp, int budget)
1826{
Michael Chanf4e418f2005-11-04 08:53:48 -08001827 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001828 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1829 struct l2_fhdr *rx_hdr;
1830 int rx_pkt = 0;
1831
Michael Chanf4e418f2005-11-04 08:53:48 -08001832 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001833 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1834 hw_cons++;
1835 }
1836 sw_cons = bp->rx_cons;
1837 sw_prod = bp->rx_prod;
1838
1839 /* Memory barrier necessary as speculative reads of the rx
1840 * buffer can be ahead of the index in the status block
1841 */
1842 rmb();
1843 while (sw_cons != hw_cons) {
1844 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001845 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001846 struct sw_bd *rx_buf;
1847 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08001848 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07001849
1850 sw_ring_cons = RX_RING_IDX(sw_cons);
1851 sw_ring_prod = RX_RING_IDX(sw_prod);
1852
1853 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1854 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08001855
1856 rx_buf->skb = NULL;
1857
1858 dma_addr = pci_unmap_addr(rx_buf, mapping);
1859
1860 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001861 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1862
1863 rx_hdr = (struct l2_fhdr *) skb->data;
1864 len = rx_hdr->l2_fhdr_pkt_len - 4;
1865
Michael Chanade2bfe2006-01-23 16:09:51 -08001866 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001867 (L2_FHDR_ERRORS_BAD_CRC |
1868 L2_FHDR_ERRORS_PHY_DECODE |
1869 L2_FHDR_ERRORS_ALIGNMENT |
1870 L2_FHDR_ERRORS_TOO_SHORT |
1871 L2_FHDR_ERRORS_GIANT_FRAME)) {
1872
1873 goto reuse_rx;
1874 }
1875
1876 /* Since we don't have a jumbo ring, copy small packets
1877 * if mtu > 1500
1878 */
1879 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1880 struct sk_buff *new_skb;
1881
Michael Chan932f3772006-08-15 01:39:36 -07001882 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07001883 if (new_skb == NULL)
1884 goto reuse_rx;
1885
1886 /* aligned copy */
1887 memcpy(new_skb->data,
1888 skb->data + bp->rx_offset - 2,
1889 len + 2);
1890
1891 skb_reserve(new_skb, 2);
1892 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07001893
1894 bnx2_reuse_rx_skb(bp, skb,
1895 sw_ring_cons, sw_ring_prod);
1896
1897 skb = new_skb;
1898 }
1899 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08001900 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07001901 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1902
1903 skb_reserve(skb, bp->rx_offset);
1904 skb_put(skb, len);
1905 }
1906 else {
1907reuse_rx:
1908 bnx2_reuse_rx_skb(bp, skb,
1909 sw_ring_cons, sw_ring_prod);
1910 goto next_rx;
1911 }
1912
1913 skb->protocol = eth_type_trans(skb, bp->dev);
1914
1915 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07001916 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001917
Michael Chan745720e2006-06-29 12:37:41 -07001918 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07001919 goto next_rx;
1920
1921 }
1922
Michael Chanb6016b72005-05-26 13:03:09 -07001923 skb->ip_summed = CHECKSUM_NONE;
1924 if (bp->rx_csum &&
1925 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1926 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1927
Michael Chanade2bfe2006-01-23 16:09:51 -08001928 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1929 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001930 skb->ip_summed = CHECKSUM_UNNECESSARY;
1931 }
1932
1933#ifdef BCM_VLAN
1934 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1935 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1936 rx_hdr->l2_fhdr_vlan_tag);
1937 }
1938 else
1939#endif
1940 netif_receive_skb(skb);
1941
1942 bp->dev->last_rx = jiffies;
1943 rx_pkt++;
1944
1945next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07001946 sw_cons = NEXT_RX_BD(sw_cons);
1947 sw_prod = NEXT_RX_BD(sw_prod);
1948
1949 if ((rx_pkt == budget))
1950 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001951
1952 /* Refresh hw_cons to see if there is new work */
1953 if (sw_cons == hw_cons) {
1954 hw_cons = bp->hw_rx_cons =
1955 sblk->status_rx_quick_consumer_index0;
1956 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1957 hw_cons++;
1958 rmb();
1959 }
Michael Chanb6016b72005-05-26 13:03:09 -07001960 }
1961 bp->rx_cons = sw_cons;
1962 bp->rx_prod = sw_prod;
1963
1964 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1965
1966 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1967
1968 mmiowb();
1969
1970 return rx_pkt;
1971
1972}
1973
1974/* MSI ISR - The only difference between this and the INTx ISR
1975 * is that the MSI interrupt is always serviced.
1976 */
1977static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001978bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001979{
1980 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001981 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001982
Michael Chanc921e4c2005-09-08 13:15:32 -07001983 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001984 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1985 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1986 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1987
1988 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001989 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1990 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001991
Michael Chan73eef4c2005-08-25 15:39:15 -07001992 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001993
Michael Chan73eef4c2005-08-25 15:39:15 -07001994 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001995}
1996
1997static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001998bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07001999{
2000 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002001 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002002
2003 /* When using INTx, it is possible for the interrupt to arrive
2004 * at the CPU before the status block posted prior to the
2005 * interrupt. Reading a register will flush the status block.
2006 * When using MSI, the MSI message will always complete after
2007 * the status block write.
2008 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002009 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002010 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2011 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002012 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002013
2014 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2015 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2016 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2017
2018 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002019 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2020 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002021
Michael Chan73eef4c2005-08-25 15:39:15 -07002022 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002023
Michael Chan73eef4c2005-08-25 15:39:15 -07002024 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002025}
2026
Michael Chanf4e418f2005-11-04 08:53:48 -08002027static inline int
2028bnx2_has_work(struct bnx2 *bp)
2029{
2030 struct status_block *sblk = bp->status_blk;
2031
2032 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2033 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2034 return 1;
2035
Michael Chandb8b2252007-03-28 14:17:36 -07002036 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2037 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
Michael Chanf4e418f2005-11-04 08:53:48 -08002038 return 1;
2039
2040 return 0;
2041}
2042
Michael Chanb6016b72005-05-26 13:03:09 -07002043static int
2044bnx2_poll(struct net_device *dev, int *budget)
2045{
Michael Chan972ec0d2006-01-23 16:12:43 -08002046 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002047
Michael Chanb6016b72005-05-26 13:03:09 -07002048 if ((bp->status_blk->status_attn_bits &
2049 STATUS_ATTN_BITS_LINK_STATE) !=
2050 (bp->status_blk->status_attn_bits_ack &
2051 STATUS_ATTN_BITS_LINK_STATE)) {
2052
Michael Chanc770a652005-08-25 15:38:39 -07002053 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002054 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07002055 spin_unlock(&bp->phy_lock);
Michael Chanbf5295b2006-03-23 01:11:56 -08002056
2057 /* This is needed to take care of transient status
2058 * during link changes.
2059 */
2060 REG_WR(bp, BNX2_HC_COMMAND,
2061 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2062 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002063 }
2064
Michael Chanf4e418f2005-11-04 08:53:48 -08002065 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002066 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002067
Michael Chanf4e418f2005-11-04 08:53:48 -08002068 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002069 int orig_budget = *budget;
2070 int work_done;
2071
2072 if (orig_budget > dev->quota)
2073 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002074
Michael Chanb6016b72005-05-26 13:03:09 -07002075 work_done = bnx2_rx_int(bp, orig_budget);
2076 *budget -= work_done;
2077 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002078 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002079
Michael Chanf4e418f2005-11-04 08:53:48 -08002080 bp->last_status_idx = bp->status_blk->status_idx;
2081 rmb();
2082
2083 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002084 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002085 if (likely(bp->flags & USING_MSI_FLAG)) {
2086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088 bp->last_status_idx);
2089 return 0;
2090 }
Michael Chanb6016b72005-05-26 13:03:09 -07002091 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002092 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2093 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2094 bp->last_status_idx);
2095
2096 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2097 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2098 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002099 return 0;
2100 }
2101
2102 return 1;
2103}
2104
Herbert Xu932ff272006-06-09 12:20:56 -07002105/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002106 * from set_multicast.
2107 */
2108static void
2109bnx2_set_rx_mode(struct net_device *dev)
2110{
Michael Chan972ec0d2006-01-23 16:12:43 -08002111 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002112 u32 rx_mode, sort_mode;
2113 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002114
Michael Chanc770a652005-08-25 15:38:39 -07002115 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002116
2117 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2118 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2119 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2120#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002121 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002122 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002123#else
Michael Chane29054f2006-01-23 16:06:06 -08002124 if (!(bp->flags & ASF_ENABLE_FLAG))
2125 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002126#endif
2127 if (dev->flags & IFF_PROMISC) {
2128 /* Promiscuous mode. */
2129 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002130 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2131 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002132 }
2133 else if (dev->flags & IFF_ALLMULTI) {
2134 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2135 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2136 0xffffffff);
2137 }
2138 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2139 }
2140 else {
2141 /* Accept one or more multicast(s). */
2142 struct dev_mc_list *mclist;
2143 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2144 u32 regidx;
2145 u32 bit;
2146 u32 crc;
2147
2148 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2149
2150 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2151 i++, mclist = mclist->next) {
2152
2153 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2154 bit = crc & 0xff;
2155 regidx = (bit & 0xe0) >> 5;
2156 bit &= 0x1f;
2157 mc_filter[regidx] |= (1 << bit);
2158 }
2159
2160 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2161 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2162 mc_filter[i]);
2163 }
2164
2165 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2166 }
2167
2168 if (rx_mode != bp->rx_mode) {
2169 bp->rx_mode = rx_mode;
2170 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2171 }
2172
2173 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2174 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2175 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2176
Michael Chanc770a652005-08-25 15:38:39 -07002177 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002178}
2179
Michael Chanfba9fe92006-06-12 22:21:25 -07002180#define FW_BUF_SIZE 0x8000
2181
2182static int
2183bnx2_gunzip_init(struct bnx2 *bp)
2184{
2185 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2186 goto gunzip_nomem1;
2187
2188 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2189 goto gunzip_nomem2;
2190
2191 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2192 if (bp->strm->workspace == NULL)
2193 goto gunzip_nomem3;
2194
2195 return 0;
2196
2197gunzip_nomem3:
2198 kfree(bp->strm);
2199 bp->strm = NULL;
2200
2201gunzip_nomem2:
2202 vfree(bp->gunzip_buf);
2203 bp->gunzip_buf = NULL;
2204
2205gunzip_nomem1:
2206 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2207 "uncompression.\n", bp->dev->name);
2208 return -ENOMEM;
2209}
2210
2211static void
2212bnx2_gunzip_end(struct bnx2 *bp)
2213{
2214 kfree(bp->strm->workspace);
2215
2216 kfree(bp->strm);
2217 bp->strm = NULL;
2218
2219 if (bp->gunzip_buf) {
2220 vfree(bp->gunzip_buf);
2221 bp->gunzip_buf = NULL;
2222 }
2223}
2224
2225static int
2226bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2227{
2228 int n, rc;
2229
2230 /* check gzip header */
2231 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2232 return -EINVAL;
2233
2234 n = 10;
2235
2236#define FNAME 0x8
2237 if (zbuf[3] & FNAME)
2238 while ((zbuf[n++] != 0) && (n < len));
2239
2240 bp->strm->next_in = zbuf + n;
2241 bp->strm->avail_in = len - n;
2242 bp->strm->next_out = bp->gunzip_buf;
2243 bp->strm->avail_out = FW_BUF_SIZE;
2244
2245 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2246 if (rc != Z_OK)
2247 return rc;
2248
2249 rc = zlib_inflate(bp->strm, Z_FINISH);
2250
2251 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2252 *outbuf = bp->gunzip_buf;
2253
2254 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2255 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2256 bp->dev->name, bp->strm->msg);
2257
2258 zlib_inflateEnd(bp->strm);
2259
2260 if (rc == Z_STREAM_END)
2261 return 0;
2262
2263 return rc;
2264}
2265
Michael Chanb6016b72005-05-26 13:03:09 -07002266static void
2267load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2268 u32 rv2p_proc)
2269{
2270 int i;
2271 u32 val;
2272
2273
2274 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002275 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002276 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002277 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002278 rv2p_code++;
2279
2280 if (rv2p_proc == RV2P_PROC1) {
2281 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2282 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2283 }
2284 else {
2285 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2286 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2287 }
2288 }
2289
2290 /* Reset the processor, un-stall is done later. */
2291 if (rv2p_proc == RV2P_PROC1) {
2292 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2293 }
2294 else {
2295 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2296 }
2297}
2298
Michael Chanaf3ee512006-11-19 14:09:25 -08002299static int
Michael Chanb6016b72005-05-26 13:03:09 -07002300load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2301{
2302 u32 offset;
2303 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002304 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002305
2306 /* Halt the CPU. */
2307 val = REG_RD_IND(bp, cpu_reg->mode);
2308 val |= cpu_reg->mode_value_halt;
2309 REG_WR_IND(bp, cpu_reg->mode, val);
2310 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2311
2312 /* Load the Text area. */
2313 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002314 if (fw->gz_text) {
2315 u32 text_len;
2316 void *text;
2317
2318 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2319 &text_len);
2320 if (rc)
2321 return rc;
2322
2323 fw->text = text;
2324 }
2325 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002326 int j;
2327
2328 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002329 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002330 }
2331 }
2332
2333 /* Load the Data area. */
2334 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2335 if (fw->data) {
2336 int j;
2337
2338 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2339 REG_WR_IND(bp, offset, fw->data[j]);
2340 }
2341 }
2342
2343 /* Load the SBSS area. */
2344 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2345 if (fw->sbss) {
2346 int j;
2347
2348 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2349 REG_WR_IND(bp, offset, fw->sbss[j]);
2350 }
2351 }
2352
2353 /* Load the BSS area. */
2354 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2355 if (fw->bss) {
2356 int j;
2357
2358 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2359 REG_WR_IND(bp, offset, fw->bss[j]);
2360 }
2361 }
2362
2363 /* Load the Read-Only area. */
2364 offset = cpu_reg->spad_base +
2365 (fw->rodata_addr - cpu_reg->mips_view_base);
2366 if (fw->rodata) {
2367 int j;
2368
2369 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2370 REG_WR_IND(bp, offset, fw->rodata[j]);
2371 }
2372 }
2373
2374 /* Clear the pre-fetch instruction. */
2375 REG_WR_IND(bp, cpu_reg->inst, 0);
2376 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2377
2378 /* Start the CPU. */
2379 val = REG_RD_IND(bp, cpu_reg->mode);
2380 val &= ~cpu_reg->mode_value_halt;
2381 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2382 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002383
2384 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002385}
2386
Michael Chanfba9fe92006-06-12 22:21:25 -07002387static int
Michael Chanb6016b72005-05-26 13:03:09 -07002388bnx2_init_cpus(struct bnx2 *bp)
2389{
2390 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002391 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002392 int rc = 0;
2393 void *text;
2394 u32 text_len;
2395
2396 if ((rc = bnx2_gunzip_init(bp)) != 0)
2397 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002398
2399 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002400 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2401 &text_len);
2402 if (rc)
2403 goto init_cpu_err;
2404
2405 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2406
2407 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2408 &text_len);
2409 if (rc)
2410 goto init_cpu_err;
2411
2412 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002413
2414 /* Initialize the RX Processor. */
2415 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2416 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2417 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2418 cpu_reg.state = BNX2_RXP_CPU_STATE;
2419 cpu_reg.state_value_clear = 0xffffff;
2420 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2421 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2422 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2423 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2424 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2425 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2426 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002427
Michael Chand43584c2006-11-19 14:14:35 -08002428 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2429 fw = &bnx2_rxp_fw_09;
2430 else
2431 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002432
Michael Chanaf3ee512006-11-19 14:09:25 -08002433 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002434 if (rc)
2435 goto init_cpu_err;
2436
Michael Chanb6016b72005-05-26 13:03:09 -07002437 /* Initialize the TX Processor. */
2438 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2439 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2440 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2441 cpu_reg.state = BNX2_TXP_CPU_STATE;
2442 cpu_reg.state_value_clear = 0xffffff;
2443 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2444 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2445 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2446 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2447 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2448 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2449 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002450
Michael Chand43584c2006-11-19 14:14:35 -08002451 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2452 fw = &bnx2_txp_fw_09;
2453 else
2454 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002455
Michael Chanaf3ee512006-11-19 14:09:25 -08002456 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002457 if (rc)
2458 goto init_cpu_err;
2459
Michael Chanb6016b72005-05-26 13:03:09 -07002460 /* Initialize the TX Patch-up Processor. */
2461 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2462 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2463 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2464 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2465 cpu_reg.state_value_clear = 0xffffff;
2466 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2467 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2468 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2469 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2470 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2471 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2472 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002473
Michael Chand43584c2006-11-19 14:14:35 -08002474 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2475 fw = &bnx2_tpat_fw_09;
2476 else
2477 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002478
Michael Chanaf3ee512006-11-19 14:09:25 -08002479 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002480 if (rc)
2481 goto init_cpu_err;
2482
Michael Chanb6016b72005-05-26 13:03:09 -07002483 /* Initialize the Completion Processor. */
2484 cpu_reg.mode = BNX2_COM_CPU_MODE;
2485 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2486 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2487 cpu_reg.state = BNX2_COM_CPU_STATE;
2488 cpu_reg.state_value_clear = 0xffffff;
2489 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2490 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2491 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2492 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2493 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2494 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2495 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002496
Michael Chand43584c2006-11-19 14:14:35 -08002497 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2498 fw = &bnx2_com_fw_09;
2499 else
2500 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002501
Michael Chanaf3ee512006-11-19 14:09:25 -08002502 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002503 if (rc)
2504 goto init_cpu_err;
2505
Michael Chand43584c2006-11-19 14:14:35 -08002506 /* Initialize the Command Processor. */
2507 cpu_reg.mode = BNX2_CP_CPU_MODE;
2508 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2509 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2510 cpu_reg.state = BNX2_CP_CPU_STATE;
2511 cpu_reg.state_value_clear = 0xffffff;
2512 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2513 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2514 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2515 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2516 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2517 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2518 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002519
Michael Chand43584c2006-11-19 14:14:35 -08002520 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2521 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002522
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002523 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002524 if (rc)
2525 goto init_cpu_err;
2526 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002527init_cpu_err:
2528 bnx2_gunzip_end(bp);
2529 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002530}
2531
2532static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002533bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002534{
2535 u16 pmcsr;
2536
2537 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2538
2539 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002540 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002541 u32 val;
2542
2543 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2544 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2545 PCI_PM_CTRL_PME_STATUS);
2546
2547 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2548 /* delay required during transition out of D3hot */
2549 msleep(20);
2550
2551 val = REG_RD(bp, BNX2_EMAC_MODE);
2552 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2553 val &= ~BNX2_EMAC_MODE_MPKT;
2554 REG_WR(bp, BNX2_EMAC_MODE, val);
2555
2556 val = REG_RD(bp, BNX2_RPM_CONFIG);
2557 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2558 REG_WR(bp, BNX2_RPM_CONFIG, val);
2559 break;
2560 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002561 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002562 int i;
2563 u32 val, wol_msg;
2564
2565 if (bp->wol) {
2566 u32 advertising;
2567 u8 autoneg;
2568
2569 autoneg = bp->autoneg;
2570 advertising = bp->advertising;
2571
2572 bp->autoneg = AUTONEG_SPEED;
2573 bp->advertising = ADVERTISED_10baseT_Half |
2574 ADVERTISED_10baseT_Full |
2575 ADVERTISED_100baseT_Half |
2576 ADVERTISED_100baseT_Full |
2577 ADVERTISED_Autoneg;
2578
2579 bnx2_setup_copper_phy(bp);
2580
2581 bp->autoneg = autoneg;
2582 bp->advertising = advertising;
2583
2584 bnx2_set_mac_addr(bp);
2585
2586 val = REG_RD(bp, BNX2_EMAC_MODE);
2587
2588 /* Enable port mode. */
2589 val &= ~BNX2_EMAC_MODE_PORT;
2590 val |= BNX2_EMAC_MODE_PORT_MII |
2591 BNX2_EMAC_MODE_MPKT_RCVD |
2592 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002593 BNX2_EMAC_MODE_MPKT;
2594
2595 REG_WR(bp, BNX2_EMAC_MODE, val);
2596
2597 /* receive all multicast */
2598 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2599 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2600 0xffffffff);
2601 }
2602 REG_WR(bp, BNX2_EMAC_RX_MODE,
2603 BNX2_EMAC_RX_MODE_SORT_MODE);
2604
2605 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2606 BNX2_RPM_SORT_USER0_MC_EN;
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2609 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2610 BNX2_RPM_SORT_USER0_ENA);
2611
2612 /* Need to enable EMAC and RPM for WOL. */
2613 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2615 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2616 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2617
2618 val = REG_RD(bp, BNX2_RPM_CONFIG);
2619 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2620 REG_WR(bp, BNX2_RPM_CONFIG, val);
2621
2622 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2623 }
2624 else {
2625 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2626 }
2627
Michael Chandda1e392006-01-23 16:08:14 -08002628 if (!(bp->flags & NO_WOL_FLAG))
2629 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002630
2631 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2632 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2633 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2634
2635 if (bp->wol)
2636 pmcsr |= 3;
2637 }
2638 else {
2639 pmcsr |= 3;
2640 }
2641 if (bp->wol) {
2642 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2643 }
2644 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2645 pmcsr);
2646
2647 /* No more memory access after this point until
2648 * device is brought back to D0.
2649 */
2650 udelay(50);
2651 break;
2652 }
2653 default:
2654 return -EINVAL;
2655 }
2656 return 0;
2657}
2658
2659static int
2660bnx2_acquire_nvram_lock(struct bnx2 *bp)
2661{
2662 u32 val;
2663 int j;
2664
2665 /* Request access to the flash interface. */
2666 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2667 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2668 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2669 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2670 break;
2671
2672 udelay(5);
2673 }
2674
2675 if (j >= NVRAM_TIMEOUT_COUNT)
2676 return -EBUSY;
2677
2678 return 0;
2679}
2680
2681static int
2682bnx2_release_nvram_lock(struct bnx2 *bp)
2683{
2684 int j;
2685 u32 val;
2686
2687 /* Relinquish nvram interface. */
2688 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2689
2690 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2691 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2692 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2693 break;
2694
2695 udelay(5);
2696 }
2697
2698 if (j >= NVRAM_TIMEOUT_COUNT)
2699 return -EBUSY;
2700
2701 return 0;
2702}
2703
2704
2705static int
2706bnx2_enable_nvram_write(struct bnx2 *bp)
2707{
2708 u32 val;
2709
2710 val = REG_RD(bp, BNX2_MISC_CFG);
2711 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2712
2713 if (!bp->flash_info->buffered) {
2714 int j;
2715
2716 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2717 REG_WR(bp, BNX2_NVM_COMMAND,
2718 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2719
2720 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2721 udelay(5);
2722
2723 val = REG_RD(bp, BNX2_NVM_COMMAND);
2724 if (val & BNX2_NVM_COMMAND_DONE)
2725 break;
2726 }
2727
2728 if (j >= NVRAM_TIMEOUT_COUNT)
2729 return -EBUSY;
2730 }
2731 return 0;
2732}
2733
2734static void
2735bnx2_disable_nvram_write(struct bnx2 *bp)
2736{
2737 u32 val;
2738
2739 val = REG_RD(bp, BNX2_MISC_CFG);
2740 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2741}
2742
2743
2744static void
2745bnx2_enable_nvram_access(struct bnx2 *bp)
2746{
2747 u32 val;
2748
2749 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2750 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002751 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002752 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2753}
2754
2755static void
2756bnx2_disable_nvram_access(struct bnx2 *bp)
2757{
2758 u32 val;
2759
2760 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2761 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002762 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07002763 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2764 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2765}
2766
2767static int
2768bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2769{
2770 u32 cmd;
2771 int j;
2772
2773 if (bp->flash_info->buffered)
2774 /* Buffered flash, no erase needed */
2775 return 0;
2776
2777 /* Build an erase command */
2778 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2779 BNX2_NVM_COMMAND_DOIT;
2780
2781 /* Need to clear DONE bit separately. */
2782 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2783
2784 /* Address of the NVRAM to read from. */
2785 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2786
2787 /* Issue an erase command. */
2788 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2789
2790 /* Wait for completion. */
2791 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2792 u32 val;
2793
2794 udelay(5);
2795
2796 val = REG_RD(bp, BNX2_NVM_COMMAND);
2797 if (val & BNX2_NVM_COMMAND_DONE)
2798 break;
2799 }
2800
2801 if (j >= NVRAM_TIMEOUT_COUNT)
2802 return -EBUSY;
2803
2804 return 0;
2805}
2806
2807static int
2808bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2809{
2810 u32 cmd;
2811 int j;
2812
2813 /* Build the command word. */
2814 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2815
2816 /* Calculate an offset of a buffered flash. */
2817 if (bp->flash_info->buffered) {
2818 offset = ((offset / bp->flash_info->page_size) <<
2819 bp->flash_info->page_bits) +
2820 (offset % bp->flash_info->page_size);
2821 }
2822
2823 /* Need to clear DONE bit separately. */
2824 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2825
2826 /* Address of the NVRAM to read from. */
2827 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2828
2829 /* Issue a read command. */
2830 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2831
2832 /* Wait for completion. */
2833 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2834 u32 val;
2835
2836 udelay(5);
2837
2838 val = REG_RD(bp, BNX2_NVM_COMMAND);
2839 if (val & BNX2_NVM_COMMAND_DONE) {
2840 val = REG_RD(bp, BNX2_NVM_READ);
2841
2842 val = be32_to_cpu(val);
2843 memcpy(ret_val, &val, 4);
2844 break;
2845 }
2846 }
2847 if (j >= NVRAM_TIMEOUT_COUNT)
2848 return -EBUSY;
2849
2850 return 0;
2851}
2852
2853
2854static int
2855bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2856{
2857 u32 cmd, val32;
2858 int j;
2859
2860 /* Build the command word. */
2861 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2862
2863 /* Calculate an offset of a buffered flash. */
2864 if (bp->flash_info->buffered) {
2865 offset = ((offset / bp->flash_info->page_size) <<
2866 bp->flash_info->page_bits) +
2867 (offset % bp->flash_info->page_size);
2868 }
2869
2870 /* Need to clear DONE bit separately. */
2871 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2872
2873 memcpy(&val32, val, 4);
2874 val32 = cpu_to_be32(val32);
2875
2876 /* Write the data. */
2877 REG_WR(bp, BNX2_NVM_WRITE, val32);
2878
2879 /* Address of the NVRAM to write to. */
2880 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2881
2882 /* Issue the write command. */
2883 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2884
2885 /* Wait for completion. */
2886 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2887 udelay(5);
2888
2889 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2890 break;
2891 }
2892 if (j >= NVRAM_TIMEOUT_COUNT)
2893 return -EBUSY;
2894
2895 return 0;
2896}
2897
2898static int
2899bnx2_init_nvram(struct bnx2 *bp)
2900{
2901 u32 val;
2902 int j, entry_count, rc;
2903 struct flash_spec *flash;
2904
2905 /* Determine the selected interface. */
2906 val = REG_RD(bp, BNX2_NVM_CFG1);
2907
2908 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2909
2910 rc = 0;
2911 if (val & 0x40000000) {
2912
2913 /* Flash interface has been reconfigured */
2914 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002915 j++, flash++) {
2916 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2917 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002918 bp->flash_info = flash;
2919 break;
2920 }
2921 }
2922 }
2923 else {
Michael Chan37137702005-11-04 08:49:17 -08002924 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002925 /* Not yet been reconfigured */
2926
Michael Chan37137702005-11-04 08:49:17 -08002927 if (val & (1 << 23))
2928 mask = FLASH_BACKUP_STRAP_MASK;
2929 else
2930 mask = FLASH_STRAP_MASK;
2931
Michael Chanb6016b72005-05-26 13:03:09 -07002932 for (j = 0, flash = &flash_table[0]; j < entry_count;
2933 j++, flash++) {
2934
Michael Chan37137702005-11-04 08:49:17 -08002935 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002936 bp->flash_info = flash;
2937
2938 /* Request access to the flash interface. */
2939 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2940 return rc;
2941
2942 /* Enable access to flash interface */
2943 bnx2_enable_nvram_access(bp);
2944
2945 /* Reconfigure the flash interface */
2946 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2947 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2948 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2949 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2950
2951 /* Disable access to flash interface */
2952 bnx2_disable_nvram_access(bp);
2953 bnx2_release_nvram_lock(bp);
2954
2955 break;
2956 }
2957 }
2958 } /* if (val & 0x40000000) */
2959
2960 if (j == entry_count) {
2961 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002962 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002963 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002964 }
2965
Michael Chan1122db72006-01-23 16:11:42 -08002966 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2967 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2968 if (val)
2969 bp->flash_size = val;
2970 else
2971 bp->flash_size = bp->flash_info->total_size;
2972
Michael Chanb6016b72005-05-26 13:03:09 -07002973 return rc;
2974}
2975
2976static int
2977bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2978 int buf_size)
2979{
2980 int rc = 0;
2981 u32 cmd_flags, offset32, len32, extra;
2982
2983 if (buf_size == 0)
2984 return 0;
2985
2986 /* Request access to the flash interface. */
2987 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2988 return rc;
2989
2990 /* Enable access to flash interface */
2991 bnx2_enable_nvram_access(bp);
2992
2993 len32 = buf_size;
2994 offset32 = offset;
2995 extra = 0;
2996
2997 cmd_flags = 0;
2998
2999 if (offset32 & 3) {
3000 u8 buf[4];
3001 u32 pre_len;
3002
3003 offset32 &= ~3;
3004 pre_len = 4 - (offset & 3);
3005
3006 if (pre_len >= len32) {
3007 pre_len = len32;
3008 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3009 BNX2_NVM_COMMAND_LAST;
3010 }
3011 else {
3012 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3013 }
3014
3015 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3016
3017 if (rc)
3018 return rc;
3019
3020 memcpy(ret_buf, buf + (offset & 3), pre_len);
3021
3022 offset32 += 4;
3023 ret_buf += pre_len;
3024 len32 -= pre_len;
3025 }
3026 if (len32 & 3) {
3027 extra = 4 - (len32 & 3);
3028 len32 = (len32 + 4) & ~3;
3029 }
3030
3031 if (len32 == 4) {
3032 u8 buf[4];
3033
3034 if (cmd_flags)
3035 cmd_flags = BNX2_NVM_COMMAND_LAST;
3036 else
3037 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3038 BNX2_NVM_COMMAND_LAST;
3039
3040 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3041
3042 memcpy(ret_buf, buf, 4 - extra);
3043 }
3044 else if (len32 > 0) {
3045 u8 buf[4];
3046
3047 /* Read the first word. */
3048 if (cmd_flags)
3049 cmd_flags = 0;
3050 else
3051 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3052
3053 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3054
3055 /* Advance to the next dword. */
3056 offset32 += 4;
3057 ret_buf += 4;
3058 len32 -= 4;
3059
3060 while (len32 > 4 && rc == 0) {
3061 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3062
3063 /* Advance to the next dword. */
3064 offset32 += 4;
3065 ret_buf += 4;
3066 len32 -= 4;
3067 }
3068
3069 if (rc)
3070 return rc;
3071
3072 cmd_flags = BNX2_NVM_COMMAND_LAST;
3073 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3074
3075 memcpy(ret_buf, buf, 4 - extra);
3076 }
3077
3078 /* Disable access to flash interface */
3079 bnx2_disable_nvram_access(bp);
3080
3081 bnx2_release_nvram_lock(bp);
3082
3083 return rc;
3084}
3085
3086static int
3087bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3088 int buf_size)
3089{
3090 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003091 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003092 int rc = 0;
3093 int align_start, align_end;
3094
3095 buf = data_buf;
3096 offset32 = offset;
3097 len32 = buf_size;
3098 align_start = align_end = 0;
3099
3100 if ((align_start = (offset32 & 3))) {
3101 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003102 len32 += align_start;
3103 if (len32 < 4)
3104 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003105 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3106 return rc;
3107 }
3108
3109 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003110 align_end = 4 - (len32 & 3);
3111 len32 += align_end;
3112 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3113 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003114 }
3115
3116 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003117 align_buf = kmalloc(len32, GFP_KERNEL);
3118 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003119 return -ENOMEM;
3120 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003121 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003122 }
3123 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003124 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003125 }
Michael Chane6be7632007-01-08 19:56:13 -08003126 memcpy(align_buf + align_start, data_buf, buf_size);
3127 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003128 }
3129
Michael Chanae181bc2006-05-22 16:39:20 -07003130 if (bp->flash_info->buffered == 0) {
3131 flash_buffer = kmalloc(264, GFP_KERNEL);
3132 if (flash_buffer == NULL) {
3133 rc = -ENOMEM;
3134 goto nvram_write_end;
3135 }
3136 }
3137
Michael Chanb6016b72005-05-26 13:03:09 -07003138 written = 0;
3139 while ((written < len32) && (rc == 0)) {
3140 u32 page_start, page_end, data_start, data_end;
3141 u32 addr, cmd_flags;
3142 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003143
3144 /* Find the page_start addr */
3145 page_start = offset32 + written;
3146 page_start -= (page_start % bp->flash_info->page_size);
3147 /* Find the page_end addr */
3148 page_end = page_start + bp->flash_info->page_size;
3149 /* Find the data_start addr */
3150 data_start = (written == 0) ? offset32 : page_start;
3151 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003152 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003153 (offset32 + len32) : page_end;
3154
3155 /* Request access to the flash interface. */
3156 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3157 goto nvram_write_end;
3158
3159 /* Enable access to flash interface */
3160 bnx2_enable_nvram_access(bp);
3161
3162 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3163 if (bp->flash_info->buffered == 0) {
3164 int j;
3165
3166 /* Read the whole page into the buffer
3167 * (non-buffer flash only) */
3168 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3169 if (j == (bp->flash_info->page_size - 4)) {
3170 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3171 }
3172 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003173 page_start + j,
3174 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003175 cmd_flags);
3176
3177 if (rc)
3178 goto nvram_write_end;
3179
3180 cmd_flags = 0;
3181 }
3182 }
3183
3184 /* Enable writes to flash interface (unlock write-protect) */
3185 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3186 goto nvram_write_end;
3187
Michael Chanb6016b72005-05-26 13:03:09 -07003188 /* Loop to write back the buffer data from page_start to
3189 * data_start */
3190 i = 0;
3191 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003192 /* Erase the page */
3193 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3194 goto nvram_write_end;
3195
3196 /* Re-enable the write again for the actual write */
3197 bnx2_enable_nvram_write(bp);
3198
Michael Chanb6016b72005-05-26 13:03:09 -07003199 for (addr = page_start; addr < data_start;
3200 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003201
Michael Chanb6016b72005-05-26 13:03:09 -07003202 rc = bnx2_nvram_write_dword(bp, addr,
3203 &flash_buffer[i], cmd_flags);
3204
3205 if (rc != 0)
3206 goto nvram_write_end;
3207
3208 cmd_flags = 0;
3209 }
3210 }
3211
3212 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003213 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003214 if ((addr == page_end - 4) ||
3215 ((bp->flash_info->buffered) &&
3216 (addr == data_end - 4))) {
3217
3218 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3219 }
3220 rc = bnx2_nvram_write_dword(bp, addr, buf,
3221 cmd_flags);
3222
3223 if (rc != 0)
3224 goto nvram_write_end;
3225
3226 cmd_flags = 0;
3227 buf += 4;
3228 }
3229
3230 /* Loop to write back the buffer data from data_end
3231 * to page_end */
3232 if (bp->flash_info->buffered == 0) {
3233 for (addr = data_end; addr < page_end;
3234 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003235
Michael Chanb6016b72005-05-26 13:03:09 -07003236 if (addr == page_end-4) {
3237 cmd_flags = BNX2_NVM_COMMAND_LAST;
3238 }
3239 rc = bnx2_nvram_write_dword(bp, addr,
3240 &flash_buffer[i], cmd_flags);
3241
3242 if (rc != 0)
3243 goto nvram_write_end;
3244
3245 cmd_flags = 0;
3246 }
3247 }
3248
3249 /* Disable writes to flash interface (lock write-protect) */
3250 bnx2_disable_nvram_write(bp);
3251
3252 /* Disable access to flash interface */
3253 bnx2_disable_nvram_access(bp);
3254 bnx2_release_nvram_lock(bp);
3255
3256 /* Increment written */
3257 written += data_end - data_start;
3258 }
3259
3260nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003261 kfree(flash_buffer);
3262 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003263 return rc;
3264}
3265
3266static int
3267bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3268{
3269 u32 val;
3270 int i, rc = 0;
3271
3272 /* Wait for the current PCI transaction to complete before
3273 * issuing a reset. */
3274 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3275 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3277 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3278 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3279 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3280 udelay(5);
3281
Michael Chanb090ae22006-01-23 16:07:10 -08003282 /* Wait for the firmware to tell us it is ok to issue a reset. */
3283 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3284
Michael Chanb6016b72005-05-26 13:03:09 -07003285 /* Deposit a driver reset signature so the firmware knows that
3286 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003287 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003288 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3289
Michael Chanb6016b72005-05-26 13:03:09 -07003290 /* Do a dummy read to force the chip to complete all current transaction
3291 * before we issue a reset. */
3292 val = REG_RD(bp, BNX2_MISC_ID);
3293
Michael Chan234754d2006-11-19 14:11:41 -08003294 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3295 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3296 REG_RD(bp, BNX2_MISC_COMMAND);
3297 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003298
Michael Chan234754d2006-11-19 14:11:41 -08003299 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3300 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003301
Michael Chan234754d2006-11-19 14:11:41 -08003302 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003303
Michael Chan234754d2006-11-19 14:11:41 -08003304 } else {
3305 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3306 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3307 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3308
3309 /* Chip reset. */
3310 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3311
3312 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3313 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3314 current->state = TASK_UNINTERRUPTIBLE;
3315 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003316 }
Michael Chanb6016b72005-05-26 13:03:09 -07003317
Michael Chan234754d2006-11-19 14:11:41 -08003318 /* Reset takes approximate 30 usec */
3319 for (i = 0; i < 10; i++) {
3320 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3321 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3322 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3323 break;
3324 udelay(10);
3325 }
3326
3327 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3328 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3329 printk(KERN_ERR PFX "Chip reset did not complete\n");
3330 return -EBUSY;
3331 }
Michael Chanb6016b72005-05-26 13:03:09 -07003332 }
3333
3334 /* Make sure byte swapping is properly configured. */
3335 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3336 if (val != 0x01020304) {
3337 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3338 return -ENODEV;
3339 }
3340
Michael Chanb6016b72005-05-26 13:03:09 -07003341 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003342 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3343 if (rc)
3344 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003345
3346 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3347 /* Adjust the voltage regular to two steps lower. The default
3348 * of this register is 0x0000000e. */
3349 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3350
3351 /* Remove bad rbuf memory from the free pool. */
3352 rc = bnx2_alloc_bad_rbuf(bp);
3353 }
3354
3355 return rc;
3356}
3357
3358static int
3359bnx2_init_chip(struct bnx2 *bp)
3360{
3361 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003362 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003363
3364 /* Make sure the interrupt is not active. */
3365 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3366
3367 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3368 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3369#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003370 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003371#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003372 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003373 DMA_READ_CHANS << 12 |
3374 DMA_WRITE_CHANS << 16;
3375
3376 val |= (0x2 << 20) | (1 << 11);
3377
Michael Chandda1e392006-01-23 16:08:14 -08003378 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003379 val |= (1 << 23);
3380
3381 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3382 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3383 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3384
3385 REG_WR(bp, BNX2_DMA_CONFIG, val);
3386
3387 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3388 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3389 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3390 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3391 }
3392
3393 if (bp->flags & PCIX_FLAG) {
3394 u16 val16;
3395
3396 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3397 &val16);
3398 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3399 val16 & ~PCI_X_CMD_ERO);
3400 }
3401
3402 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3403 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3404 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3405 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3406
3407 /* Initialize context mapping and zero out the quick contexts. The
3408 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003409 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3410 bnx2_init_5709_context(bp);
3411 else
3412 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003413
Michael Chanfba9fe92006-06-12 22:21:25 -07003414 if ((rc = bnx2_init_cpus(bp)) != 0)
3415 return rc;
3416
Michael Chanb6016b72005-05-26 13:03:09 -07003417 bnx2_init_nvram(bp);
3418
3419 bnx2_set_mac_addr(bp);
3420
3421 val = REG_RD(bp, BNX2_MQ_CONFIG);
3422 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3423 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003424 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3425 val |= BNX2_MQ_CONFIG_HALT_DIS;
3426
Michael Chanb6016b72005-05-26 13:03:09 -07003427 REG_WR(bp, BNX2_MQ_CONFIG, val);
3428
3429 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3430 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3431 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3432
3433 val = (BCM_PAGE_BITS - 8) << 24;
3434 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3435
3436 /* Configure page size. */
3437 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3438 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3439 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3440 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3441
3442 val = bp->mac_addr[0] +
3443 (bp->mac_addr[1] << 8) +
3444 (bp->mac_addr[2] << 16) +
3445 bp->mac_addr[3] +
3446 (bp->mac_addr[4] << 8) +
3447 (bp->mac_addr[5] << 16);
3448 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3449
3450 /* Program the MTU. Also include 4 bytes for CRC32. */
3451 val = bp->dev->mtu + ETH_HLEN + 4;
3452 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3453 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3454 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3455
3456 bp->last_status_idx = 0;
3457 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3458
3459 /* Set up how to generate a link change interrupt. */
3460 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3461
3462 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3463 (u64) bp->status_blk_mapping & 0xffffffff);
3464 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3465
3466 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3467 (u64) bp->stats_blk_mapping & 0xffffffff);
3468 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3469 (u64) bp->stats_blk_mapping >> 32);
3470
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003471 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003472 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3473
3474 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3475 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3476
3477 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3478 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3479
3480 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3481
3482 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3483
3484 REG_WR(bp, BNX2_HC_COM_TICKS,
3485 (bp->com_ticks_int << 16) | bp->com_ticks);
3486
3487 REG_WR(bp, BNX2_HC_CMD_TICKS,
3488 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3489
3490 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3491 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3492
3493 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3494 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3495 else {
3496 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3497 BNX2_HC_CONFIG_TX_TMR_MODE |
3498 BNX2_HC_CONFIG_COLLECT_STATS);
3499 }
3500
3501 /* Clear internal stats counters. */
3502 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3503
3504 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3505
Michael Chane29054f2006-01-23 16:06:06 -08003506 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3507 BNX2_PORT_FEATURE_ASF_ENABLED)
3508 bp->flags |= ASF_ENABLE_FLAG;
3509
Michael Chanb6016b72005-05-26 13:03:09 -07003510 /* Initialize the receive filter. */
3511 bnx2_set_rx_mode(bp->dev);
3512
Michael Chanb090ae22006-01-23 16:07:10 -08003513 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3514 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003515
3516 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3517 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3518
3519 udelay(20);
3520
Michael Chanbf5295b2006-03-23 01:11:56 -08003521 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3522
Michael Chanb090ae22006-01-23 16:07:10 -08003523 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003524}
3525
Michael Chan59b47d82006-11-19 14:10:45 -08003526static void
3527bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3528{
3529 u32 val, offset0, offset1, offset2, offset3;
3530
3531 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3532 offset0 = BNX2_L2CTX_TYPE_XI;
3533 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3534 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3535 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3536 } else {
3537 offset0 = BNX2_L2CTX_TYPE;
3538 offset1 = BNX2_L2CTX_CMD_TYPE;
3539 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3540 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3541 }
3542 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3544
3545 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3546 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3547
3548 val = (u64) bp->tx_desc_mapping >> 32;
3549 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3550
3551 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3552 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3553}
Michael Chanb6016b72005-05-26 13:03:09 -07003554
3555static void
3556bnx2_init_tx_ring(struct bnx2 *bp)
3557{
3558 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003559 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003560
Michael Chan2f8af122006-08-15 01:39:10 -07003561 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3562
Michael Chanb6016b72005-05-26 13:03:09 -07003563 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003564
Michael Chanb6016b72005-05-26 13:03:09 -07003565 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3566 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3567
3568 bp->tx_prod = 0;
3569 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003570 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003571 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003572
Michael Chan59b47d82006-11-19 14:10:45 -08003573 cid = TX_CID;
3574 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3575 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003576
Michael Chan59b47d82006-11-19 14:10:45 -08003577 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003578}
3579
3580static void
3581bnx2_init_rx_ring(struct bnx2 *bp)
3582{
3583 struct rx_bd *rxbd;
3584 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003585 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003586 u32 val;
3587
3588 /* 8 for CRC and VLAN */
3589 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003590 /* hw alignment */
3591 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003592
3593 ring_prod = prod = bp->rx_prod = 0;
3594 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003595 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003596 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003597
Michael Chan13daffa2006-03-20 17:49:20 -08003598 for (i = 0; i < bp->rx_max_ring; i++) {
3599 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003600
Michael Chan13daffa2006-03-20 17:49:20 -08003601 rxbd = &bp->rx_desc_ring[i][0];
3602 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3603 rxbd->rx_bd_len = bp->rx_buf_use_size;
3604 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3605 }
3606 if (i == (bp->rx_max_ring - 1))
3607 j = 0;
3608 else
3609 j = i + 1;
3610 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3611 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3612 0xffffffff;
3613 }
Michael Chanb6016b72005-05-26 13:03:09 -07003614
3615 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3616 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3617 val |= 0x02 << 8;
3618 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3619
Michael Chan13daffa2006-03-20 17:49:20 -08003620 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003621 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3622
Michael Chan13daffa2006-03-20 17:49:20 -08003623 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003624 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3625
Michael Chan236b6392006-03-20 17:49:02 -08003626 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003627 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3628 break;
3629 }
3630 prod = NEXT_RX_BD(prod);
3631 ring_prod = RX_RING_IDX(prod);
3632 }
3633 bp->rx_prod = prod;
3634
3635 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3636
3637 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3638}
3639
3640static void
Michael Chan13daffa2006-03-20 17:49:20 -08003641bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3642{
3643 u32 num_rings, max;
3644
3645 bp->rx_ring_size = size;
3646 num_rings = 1;
3647 while (size > MAX_RX_DESC_CNT) {
3648 size -= MAX_RX_DESC_CNT;
3649 num_rings++;
3650 }
3651 /* round to next power of 2 */
3652 max = MAX_RX_RINGS;
3653 while ((max & num_rings) == 0)
3654 max >>= 1;
3655
3656 if (num_rings != max)
3657 max <<= 1;
3658
3659 bp->rx_max_ring = max;
3660 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3661}
3662
3663static void
Michael Chanb6016b72005-05-26 13:03:09 -07003664bnx2_free_tx_skbs(struct bnx2 *bp)
3665{
3666 int i;
3667
3668 if (bp->tx_buf_ring == NULL)
3669 return;
3670
3671 for (i = 0; i < TX_DESC_CNT; ) {
3672 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3673 struct sk_buff *skb = tx_buf->skb;
3674 int j, last;
3675
3676 if (skb == NULL) {
3677 i++;
3678 continue;
3679 }
3680
3681 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3682 skb_headlen(skb), PCI_DMA_TODEVICE);
3683
3684 tx_buf->skb = NULL;
3685
3686 last = skb_shinfo(skb)->nr_frags;
3687 for (j = 0; j < last; j++) {
3688 tx_buf = &bp->tx_buf_ring[i + j + 1];
3689 pci_unmap_page(bp->pdev,
3690 pci_unmap_addr(tx_buf, mapping),
3691 skb_shinfo(skb)->frags[j].size,
3692 PCI_DMA_TODEVICE);
3693 }
Michael Chan745720e2006-06-29 12:37:41 -07003694 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003695 i += j + 1;
3696 }
3697
3698}
3699
3700static void
3701bnx2_free_rx_skbs(struct bnx2 *bp)
3702{
3703 int i;
3704
3705 if (bp->rx_buf_ring == NULL)
3706 return;
3707
Michael Chan13daffa2006-03-20 17:49:20 -08003708 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003709 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3710 struct sk_buff *skb = rx_buf->skb;
3711
Michael Chan05d0f1c2005-11-04 08:53:48 -08003712 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003713 continue;
3714
3715 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3716 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3717
3718 rx_buf->skb = NULL;
3719
Michael Chan745720e2006-06-29 12:37:41 -07003720 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003721 }
3722}
3723
3724static void
3725bnx2_free_skbs(struct bnx2 *bp)
3726{
3727 bnx2_free_tx_skbs(bp);
3728 bnx2_free_rx_skbs(bp);
3729}
3730
3731static int
3732bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3733{
3734 int rc;
3735
3736 rc = bnx2_reset_chip(bp, reset_code);
3737 bnx2_free_skbs(bp);
3738 if (rc)
3739 return rc;
3740
Michael Chanfba9fe92006-06-12 22:21:25 -07003741 if ((rc = bnx2_init_chip(bp)) != 0)
3742 return rc;
3743
Michael Chanb6016b72005-05-26 13:03:09 -07003744 bnx2_init_tx_ring(bp);
3745 bnx2_init_rx_ring(bp);
3746 return 0;
3747}
3748
3749static int
3750bnx2_init_nic(struct bnx2 *bp)
3751{
3752 int rc;
3753
3754 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3755 return rc;
3756
Michael Chan80be4432006-11-19 14:07:28 -08003757 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003758 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08003759 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003760 bnx2_set_link(bp);
3761 return 0;
3762}
3763
3764static int
3765bnx2_test_registers(struct bnx2 *bp)
3766{
3767 int ret;
3768 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003769 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003770 u16 offset;
3771 u16 flags;
3772 u32 rw_mask;
3773 u32 ro_mask;
3774 } reg_tbl[] = {
3775 { 0x006c, 0, 0x00000000, 0x0000003f },
3776 { 0x0090, 0, 0xffffffff, 0x00000000 },
3777 { 0x0094, 0, 0x00000000, 0x00000000 },
3778
3779 { 0x0404, 0, 0x00003f00, 0x00000000 },
3780 { 0x0418, 0, 0x00000000, 0xffffffff },
3781 { 0x041c, 0, 0x00000000, 0xffffffff },
3782 { 0x0420, 0, 0x00000000, 0x80ffffff },
3783 { 0x0424, 0, 0x00000000, 0x00000000 },
3784 { 0x0428, 0, 0x00000000, 0x00000001 },
3785 { 0x0450, 0, 0x00000000, 0x0000ffff },
3786 { 0x0454, 0, 0x00000000, 0xffffffff },
3787 { 0x0458, 0, 0x00000000, 0xffffffff },
3788
3789 { 0x0808, 0, 0x00000000, 0xffffffff },
3790 { 0x0854, 0, 0x00000000, 0xffffffff },
3791 { 0x0868, 0, 0x00000000, 0x77777777 },
3792 { 0x086c, 0, 0x00000000, 0x77777777 },
3793 { 0x0870, 0, 0x00000000, 0x77777777 },
3794 { 0x0874, 0, 0x00000000, 0x77777777 },
3795
3796 { 0x0c00, 0, 0x00000000, 0x00000001 },
3797 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3798 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003799
3800 { 0x1000, 0, 0x00000000, 0x00000001 },
3801 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003802
3803 { 0x1408, 0, 0x01c00800, 0x00000000 },
3804 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3805 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003806 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003807 { 0x14b0, 0, 0x00000002, 0x00000001 },
3808 { 0x14b8, 0, 0x00000000, 0x00000000 },
3809 { 0x14c0, 0, 0x00000000, 0x00000009 },
3810 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3811 { 0x14cc, 0, 0x00000000, 0x00000001 },
3812 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003813
3814 { 0x1800, 0, 0x00000000, 0x00000001 },
3815 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003816
3817 { 0x2800, 0, 0x00000000, 0x00000001 },
3818 { 0x2804, 0, 0x00000000, 0x00003f01 },
3819 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3820 { 0x2810, 0, 0xffff0000, 0x00000000 },
3821 { 0x2814, 0, 0xffff0000, 0x00000000 },
3822 { 0x2818, 0, 0xffff0000, 0x00000000 },
3823 { 0x281c, 0, 0xffff0000, 0x00000000 },
3824 { 0x2834, 0, 0xffffffff, 0x00000000 },
3825 { 0x2840, 0, 0x00000000, 0xffffffff },
3826 { 0x2844, 0, 0x00000000, 0xffffffff },
3827 { 0x2848, 0, 0xffffffff, 0x00000000 },
3828 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3829
3830 { 0x2c00, 0, 0x00000000, 0x00000011 },
3831 { 0x2c04, 0, 0x00000000, 0x00030007 },
3832
Michael Chanb6016b72005-05-26 13:03:09 -07003833 { 0x3c00, 0, 0x00000000, 0x00000001 },
3834 { 0x3c04, 0, 0x00000000, 0x00070000 },
3835 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3836 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3837 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3838 { 0x3c14, 0, 0x00000000, 0xffffffff },
3839 { 0x3c18, 0, 0x00000000, 0xffffffff },
3840 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3841 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003842
3843 { 0x5004, 0, 0x00000000, 0x0000007f },
3844 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3845 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3846
Michael Chanb6016b72005-05-26 13:03:09 -07003847 { 0x5c00, 0, 0x00000000, 0x00000001 },
3848 { 0x5c04, 0, 0x00000000, 0x0003000f },
3849 { 0x5c08, 0, 0x00000003, 0x00000000 },
3850 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3851 { 0x5c10, 0, 0x00000000, 0xffffffff },
3852 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3853 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3854 { 0x5c88, 0, 0x00000000, 0x00077373 },
3855 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3856
3857 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3858 { 0x680c, 0, 0xffffffff, 0x00000000 },
3859 { 0x6810, 0, 0xffffffff, 0x00000000 },
3860 { 0x6814, 0, 0xffffffff, 0x00000000 },
3861 { 0x6818, 0, 0xffffffff, 0x00000000 },
3862 { 0x681c, 0, 0xffffffff, 0x00000000 },
3863 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3865 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3866 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3869 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3870 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3871 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3872 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3873 { 0x684c, 0, 0xffffffff, 0x00000000 },
3874 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3877 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3878 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3879 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3880
3881 { 0xffff, 0, 0x00000000, 0x00000000 },
3882 };
3883
3884 ret = 0;
3885 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3886 u32 offset, rw_mask, ro_mask, save_val, val;
3887
3888 offset = (u32) reg_tbl[i].offset;
3889 rw_mask = reg_tbl[i].rw_mask;
3890 ro_mask = reg_tbl[i].ro_mask;
3891
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003892 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003893
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003894 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003895
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003896 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003897 if ((val & rw_mask) != 0) {
3898 goto reg_test_err;
3899 }
3900
3901 if ((val & ro_mask) != (save_val & ro_mask)) {
3902 goto reg_test_err;
3903 }
3904
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003905 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003906
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003907 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003908 if ((val & rw_mask) != rw_mask) {
3909 goto reg_test_err;
3910 }
3911
3912 if ((val & ro_mask) != (save_val & ro_mask)) {
3913 goto reg_test_err;
3914 }
3915
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003916 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003917 continue;
3918
3919reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003920 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003921 ret = -ENODEV;
3922 break;
3923 }
3924 return ret;
3925}
3926
3927static int
3928bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3929{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003930 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003931 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3932 int i;
3933
3934 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3935 u32 offset;
3936
3937 for (offset = 0; offset < size; offset += 4) {
3938
3939 REG_WR_IND(bp, start + offset, test_pattern[i]);
3940
3941 if (REG_RD_IND(bp, start + offset) !=
3942 test_pattern[i]) {
3943 return -ENODEV;
3944 }
3945 }
3946 }
3947 return 0;
3948}
3949
3950static int
3951bnx2_test_memory(struct bnx2 *bp)
3952{
3953 int ret = 0;
3954 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003955 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003956 u32 offset;
3957 u32 len;
3958 } mem_tbl[] = {
3959 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003960 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003961 { 0xe0000, 0x4000 },
3962 { 0x120000, 0x4000 },
3963 { 0x1a0000, 0x4000 },
3964 { 0x160000, 0x4000 },
3965 { 0xffffffff, 0 },
3966 };
3967
3968 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3969 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3970 mem_tbl[i].len)) != 0) {
3971 return ret;
3972 }
3973 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003974
Michael Chanb6016b72005-05-26 13:03:09 -07003975 return ret;
3976}
3977
Michael Chanbc5a0692006-01-23 16:13:22 -08003978#define BNX2_MAC_LOOPBACK 0
3979#define BNX2_PHY_LOOPBACK 1
3980
Michael Chanb6016b72005-05-26 13:03:09 -07003981static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003982bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003983{
3984 unsigned int pkt_size, num_pkts, i;
3985 struct sk_buff *skb, *rx_skb;
3986 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003987 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07003988 dma_addr_t map;
3989 struct tx_bd *txbd;
3990 struct sw_bd *rx_buf;
3991 struct l2_fhdr *rx_hdr;
3992 int ret = -ENODEV;
3993
Michael Chanbc5a0692006-01-23 16:13:22 -08003994 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3995 bp->loopback = MAC_LOOPBACK;
3996 bnx2_set_mac_loopback(bp);
3997 }
3998 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08003999 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004000 bnx2_set_phy_loopback(bp);
4001 }
4002 else
4003 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004004
4005 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004006 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004007 if (!skb)
4008 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004009 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004010 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004011 memset(packet + 6, 0x0, 8);
4012 for (i = 14; i < pkt_size; i++)
4013 packet[i] = (unsigned char) (i & 0xff);
4014
4015 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4016 PCI_DMA_TODEVICE);
4017
Michael Chanbf5295b2006-03-23 01:11:56 -08004018 REG_WR(bp, BNX2_HC_COMMAND,
4019 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4020
Michael Chanb6016b72005-05-26 13:03:09 -07004021 REG_RD(bp, BNX2_HC_COMMAND);
4022
4023 udelay(5);
4024 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4025
Michael Chanb6016b72005-05-26 13:03:09 -07004026 num_pkts = 0;
4027
Michael Chanbc5a0692006-01-23 16:13:22 -08004028 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004029
4030 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4031 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4032 txbd->tx_bd_mss_nbytes = pkt_size;
4033 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4034
4035 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004036 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4037 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004038
Michael Chan234754d2006-11-19 14:11:41 -08004039 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4040 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004041
4042 udelay(100);
4043
Michael Chanbf5295b2006-03-23 01:11:56 -08004044 REG_WR(bp, BNX2_HC_COMMAND,
4045 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4046
Michael Chanb6016b72005-05-26 13:03:09 -07004047 REG_RD(bp, BNX2_HC_COMMAND);
4048
4049 udelay(5);
4050
4051 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004052 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004053
Michael Chanbc5a0692006-01-23 16:13:22 -08004054 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004055 goto loopback_test_done;
4056 }
4057
4058 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4059 if (rx_idx != rx_start_idx + num_pkts) {
4060 goto loopback_test_done;
4061 }
4062
4063 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4064 rx_skb = rx_buf->skb;
4065
4066 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4067 skb_reserve(rx_skb, bp->rx_offset);
4068
4069 pci_dma_sync_single_for_cpu(bp->pdev,
4070 pci_unmap_addr(rx_buf, mapping),
4071 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4072
Michael Chanade2bfe2006-01-23 16:09:51 -08004073 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004074 (L2_FHDR_ERRORS_BAD_CRC |
4075 L2_FHDR_ERRORS_PHY_DECODE |
4076 L2_FHDR_ERRORS_ALIGNMENT |
4077 L2_FHDR_ERRORS_TOO_SHORT |
4078 L2_FHDR_ERRORS_GIANT_FRAME)) {
4079
4080 goto loopback_test_done;
4081 }
4082
4083 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4084 goto loopback_test_done;
4085 }
4086
4087 for (i = 14; i < pkt_size; i++) {
4088 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4089 goto loopback_test_done;
4090 }
4091 }
4092
4093 ret = 0;
4094
4095loopback_test_done:
4096 bp->loopback = 0;
4097 return ret;
4098}
4099
Michael Chanbc5a0692006-01-23 16:13:22 -08004100#define BNX2_MAC_LOOPBACK_FAILED 1
4101#define BNX2_PHY_LOOPBACK_FAILED 2
4102#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4103 BNX2_PHY_LOOPBACK_FAILED)
4104
4105static int
4106bnx2_test_loopback(struct bnx2 *bp)
4107{
4108 int rc = 0;
4109
4110 if (!netif_running(bp->dev))
4111 return BNX2_LOOPBACK_FAILED;
4112
4113 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4114 spin_lock_bh(&bp->phy_lock);
4115 bnx2_init_phy(bp);
4116 spin_unlock_bh(&bp->phy_lock);
4117 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4118 rc |= BNX2_MAC_LOOPBACK_FAILED;
4119 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4120 rc |= BNX2_PHY_LOOPBACK_FAILED;
4121 return rc;
4122}
4123
Michael Chanb6016b72005-05-26 13:03:09 -07004124#define NVRAM_SIZE 0x200
4125#define CRC32_RESIDUAL 0xdebb20e3
4126
4127static int
4128bnx2_test_nvram(struct bnx2 *bp)
4129{
4130 u32 buf[NVRAM_SIZE / 4];
4131 u8 *data = (u8 *) buf;
4132 int rc = 0;
4133 u32 magic, csum;
4134
4135 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4136 goto test_nvram_done;
4137
4138 magic = be32_to_cpu(buf[0]);
4139 if (magic != 0x669955aa) {
4140 rc = -ENODEV;
4141 goto test_nvram_done;
4142 }
4143
4144 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4145 goto test_nvram_done;
4146
4147 csum = ether_crc_le(0x100, data);
4148 if (csum != CRC32_RESIDUAL) {
4149 rc = -ENODEV;
4150 goto test_nvram_done;
4151 }
4152
4153 csum = ether_crc_le(0x100, data + 0x100);
4154 if (csum != CRC32_RESIDUAL) {
4155 rc = -ENODEV;
4156 }
4157
4158test_nvram_done:
4159 return rc;
4160}
4161
4162static int
4163bnx2_test_link(struct bnx2 *bp)
4164{
4165 u32 bmsr;
4166
Michael Chanc770a652005-08-25 15:38:39 -07004167 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004168 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4169 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07004170 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004171
Michael Chanb6016b72005-05-26 13:03:09 -07004172 if (bmsr & BMSR_LSTATUS) {
4173 return 0;
4174 }
4175 return -ENODEV;
4176}
4177
4178static int
4179bnx2_test_intr(struct bnx2 *bp)
4180{
4181 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004182 u16 status_idx;
4183
4184 if (!netif_running(bp->dev))
4185 return -ENODEV;
4186
4187 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4188
4189 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004190 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004191 REG_RD(bp, BNX2_HC_COMMAND);
4192
4193 for (i = 0; i < 10; i++) {
4194 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4195 status_idx) {
4196
4197 break;
4198 }
4199
4200 msleep_interruptible(10);
4201 }
4202 if (i < 10)
4203 return 0;
4204
4205 return -ENODEV;
4206}
4207
4208static void
Michael Chan48b01e22006-11-19 14:08:00 -08004209bnx2_5706_serdes_timer(struct bnx2 *bp)
4210{
4211 spin_lock(&bp->phy_lock);
4212 if (bp->serdes_an_pending)
4213 bp->serdes_an_pending--;
4214 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4215 u32 bmcr;
4216
4217 bp->current_interval = bp->timer_interval;
4218
4219 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4220
4221 if (bmcr & BMCR_ANENABLE) {
4222 u32 phy1, phy2;
4223
4224 bnx2_write_phy(bp, 0x1c, 0x7c00);
4225 bnx2_read_phy(bp, 0x1c, &phy1);
4226
4227 bnx2_write_phy(bp, 0x17, 0x0f01);
4228 bnx2_read_phy(bp, 0x15, &phy2);
4229 bnx2_write_phy(bp, 0x17, 0x0f01);
4230 bnx2_read_phy(bp, 0x15, &phy2);
4231
4232 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4233 !(phy2 & 0x20)) { /* no CONFIG */
4234
4235 bmcr &= ~BMCR_ANENABLE;
4236 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4237 bnx2_write_phy(bp, MII_BMCR, bmcr);
4238 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4239 }
4240 }
4241 }
4242 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4243 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4244 u32 phy2;
4245
4246 bnx2_write_phy(bp, 0x17, 0x0f01);
4247 bnx2_read_phy(bp, 0x15, &phy2);
4248 if (phy2 & 0x20) {
4249 u32 bmcr;
4250
4251 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4252 bmcr |= BMCR_ANENABLE;
4253 bnx2_write_phy(bp, MII_BMCR, bmcr);
4254
4255 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4256 }
4257 } else
4258 bp->current_interval = bp->timer_interval;
4259
4260 spin_unlock(&bp->phy_lock);
4261}
4262
4263static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004264bnx2_5708_serdes_timer(struct bnx2 *bp)
4265{
4266 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4267 bp->serdes_an_pending = 0;
4268 return;
4269 }
4270
4271 spin_lock(&bp->phy_lock);
4272 if (bp->serdes_an_pending)
4273 bp->serdes_an_pending--;
4274 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4275 u32 bmcr;
4276
4277 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4278
4279 if (bmcr & BMCR_ANENABLE) {
4280 bmcr &= ~BMCR_ANENABLE;
4281 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4282 bnx2_write_phy(bp, MII_BMCR, bmcr);
4283 bp->current_interval = SERDES_FORCED_TIMEOUT;
4284 } else {
4285 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4286 bmcr |= BMCR_ANENABLE;
4287 bnx2_write_phy(bp, MII_BMCR, bmcr);
4288 bp->serdes_an_pending = 2;
4289 bp->current_interval = bp->timer_interval;
4290 }
4291
4292 } else
4293 bp->current_interval = bp->timer_interval;
4294
4295 spin_unlock(&bp->phy_lock);
4296}
4297
4298static void
Michael Chanb6016b72005-05-26 13:03:09 -07004299bnx2_timer(unsigned long data)
4300{
4301 struct bnx2 *bp = (struct bnx2 *) data;
4302 u32 msg;
4303
Michael Chancd339a02005-08-25 15:35:24 -07004304 if (!netif_running(bp->dev))
4305 return;
4306
Michael Chanb6016b72005-05-26 13:03:09 -07004307 if (atomic_read(&bp->intr_sem) != 0)
4308 goto bnx2_restart_timer;
4309
4310 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004311 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004312
Michael Chancea94db2006-06-12 22:16:13 -07004313 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4314
Michael Chanf8dd0642006-11-19 14:08:29 -08004315 if (bp->phy_flags & PHY_SERDES_FLAG) {
4316 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4317 bnx2_5706_serdes_timer(bp);
4318 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4319 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004320 }
4321
4322bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004323 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004324}
4325
4326/* Called with rtnl_lock */
4327static int
4328bnx2_open(struct net_device *dev)
4329{
Michael Chan972ec0d2006-01-23 16:12:43 -08004330 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004331 int rc;
4332
Pavel Machek829ca9a2005-09-03 15:56:56 -07004333 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004334 bnx2_disable_int(bp);
4335
4336 rc = bnx2_alloc_mem(bp);
4337 if (rc)
4338 return rc;
4339
4340 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4341 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4342 !disable_msi) {
4343
4344 if (pci_enable_msi(bp->pdev) == 0) {
4345 bp->flags |= USING_MSI_FLAG;
4346 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4347 dev);
4348 }
4349 else {
4350 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004351 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004352 }
4353 }
4354 else {
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004355 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
Michael Chanb6016b72005-05-26 13:03:09 -07004356 dev->name, dev);
4357 }
4358 if (rc) {
4359 bnx2_free_mem(bp);
4360 return rc;
4361 }
4362
4363 rc = bnx2_init_nic(bp);
4364
4365 if (rc) {
4366 free_irq(bp->pdev->irq, dev);
4367 if (bp->flags & USING_MSI_FLAG) {
4368 pci_disable_msi(bp->pdev);
4369 bp->flags &= ~USING_MSI_FLAG;
4370 }
4371 bnx2_free_skbs(bp);
4372 bnx2_free_mem(bp);
4373 return rc;
4374 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004375
Michael Chancd339a02005-08-25 15:35:24 -07004376 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004377
4378 atomic_set(&bp->intr_sem, 0);
4379
4380 bnx2_enable_int(bp);
4381
4382 if (bp->flags & USING_MSI_FLAG) {
4383 /* Test MSI to make sure it is working
4384 * If MSI test fails, go back to INTx mode
4385 */
4386 if (bnx2_test_intr(bp) != 0) {
4387 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4388 " using MSI, switching to INTx mode. Please"
4389 " report this failure to the PCI maintainer"
4390 " and include system chipset information.\n",
4391 bp->dev->name);
4392
4393 bnx2_disable_int(bp);
4394 free_irq(bp->pdev->irq, dev);
4395 pci_disable_msi(bp->pdev);
4396 bp->flags &= ~USING_MSI_FLAG;
4397
4398 rc = bnx2_init_nic(bp);
4399
4400 if (!rc) {
4401 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07004402 IRQF_SHARED, dev->name, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004403 }
4404 if (rc) {
4405 bnx2_free_skbs(bp);
4406 bnx2_free_mem(bp);
4407 del_timer_sync(&bp->timer);
4408 return rc;
4409 }
4410 bnx2_enable_int(bp);
4411 }
4412 }
4413 if (bp->flags & USING_MSI_FLAG) {
4414 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4415 }
4416
4417 netif_start_queue(dev);
4418
4419 return 0;
4420}
4421
4422static void
David Howellsc4028952006-11-22 14:57:56 +00004423bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004424{
David Howellsc4028952006-11-22 14:57:56 +00004425 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004426
Michael Chanafdc08b2005-08-25 15:34:29 -07004427 if (!netif_running(bp->dev))
4428 return;
4429
4430 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004431 bnx2_netif_stop(bp);
4432
4433 bnx2_init_nic(bp);
4434
4435 atomic_set(&bp->intr_sem, 1);
4436 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004437 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004438}
4439
4440static void
4441bnx2_tx_timeout(struct net_device *dev)
4442{
Michael Chan972ec0d2006-01-23 16:12:43 -08004443 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004444
4445 /* This allows the netif to be shutdown gracefully before resetting */
4446 schedule_work(&bp->reset_task);
4447}
4448
4449#ifdef BCM_VLAN
4450/* Called with rtnl_lock */
4451static void
4452bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4453{
Michael Chan972ec0d2006-01-23 16:12:43 -08004454 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004455
4456 bnx2_netif_stop(bp);
4457
4458 bp->vlgrp = vlgrp;
4459 bnx2_set_rx_mode(dev);
4460
4461 bnx2_netif_start(bp);
4462}
4463
4464/* Called with rtnl_lock */
4465static void
4466bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4467{
Michael Chan972ec0d2006-01-23 16:12:43 -08004468 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004469
4470 bnx2_netif_stop(bp);
Dan Aloni5c15bde2007-03-02 20:44:51 -08004471 vlan_group_set_device(bp->vlgrp, vid, NULL);
Michael Chanb6016b72005-05-26 13:03:09 -07004472 bnx2_set_rx_mode(dev);
4473
4474 bnx2_netif_start(bp);
4475}
4476#endif
4477
Herbert Xu932ff272006-06-09 12:20:56 -07004478/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004479 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4480 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004481 */
4482static int
4483bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4484{
Michael Chan972ec0d2006-01-23 16:12:43 -08004485 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004486 dma_addr_t mapping;
4487 struct tx_bd *txbd;
4488 struct sw_bd *tx_buf;
4489 u32 len, vlan_tag_flags, last_frag, mss;
4490 u16 prod, ring_prod;
4491 int i;
4492
Michael Chane89bbf12005-08-25 15:36:58 -07004493 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004494 netif_stop_queue(dev);
4495 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4496 dev->name);
4497
4498 return NETDEV_TX_BUSY;
4499 }
4500 len = skb_headlen(skb);
4501 prod = bp->tx_prod;
4502 ring_prod = TX_RING_IDX(prod);
4503
4504 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004505 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004506 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4507 }
4508
4509 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4510 vlan_tag_flags |=
4511 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4512 }
Herbert Xu79671682006-06-22 02:40:14 -07004513 if ((mss = skb_shinfo(skb)->gso_size) &&
Michael Chanb6016b72005-05-26 13:03:09 -07004514 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4515 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004516 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004517
4518 if (skb_header_cloned(skb) &&
4519 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4520 dev_kfree_skb(skb);
4521 return NETDEV_TX_OK;
4522 }
4523
4524 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4525 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4526
4527 tcp_opt_len = 0;
4528 if (skb->h.th->doff > 5) {
4529 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4530 }
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004531 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb6016b72005-05-26 13:03:09 -07004532
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004533 iph = ip_hdr(skb);
4534 iph->check = 0;
4535 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4536 skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4537 0, IPPROTO_TCP, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07004538
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004539 if (tcp_opt_len || (iph->ihl > 5)) {
4540 vlan_tag_flags |= ((iph->ihl - 5) +
4541 (tcp_opt_len >> 2)) << 8;
Michael Chanb6016b72005-05-26 13:03:09 -07004542 }
4543 }
4544 else
Michael Chanb6016b72005-05-26 13:03:09 -07004545 {
4546 mss = 0;
4547 }
4548
4549 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004550
Michael Chanb6016b72005-05-26 13:03:09 -07004551 tx_buf = &bp->tx_buf_ring[ring_prod];
4552 tx_buf->skb = skb;
4553 pci_unmap_addr_set(tx_buf, mapping, mapping);
4554
4555 txbd = &bp->tx_desc_ring[ring_prod];
4556
4557 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4558 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4559 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4560 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4561
4562 last_frag = skb_shinfo(skb)->nr_frags;
4563
4564 for (i = 0; i < last_frag; i++) {
4565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4566
4567 prod = NEXT_TX_BD(prod);
4568 ring_prod = TX_RING_IDX(prod);
4569 txbd = &bp->tx_desc_ring[ring_prod];
4570
4571 len = frag->size;
4572 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4573 len, PCI_DMA_TODEVICE);
4574 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4575 mapping, mapping);
4576
4577 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4578 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4579 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4580 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4581
4582 }
4583 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4584
4585 prod = NEXT_TX_BD(prod);
4586 bp->tx_prod_bseq += skb->len;
4587
Michael Chan234754d2006-11-19 14:11:41 -08004588 REG_WR16(bp, bp->tx_bidx_addr, prod);
4589 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004590
4591 mmiowb();
4592
4593 bp->tx_prod = prod;
4594 dev->trans_start = jiffies;
4595
Michael Chane89bbf12005-08-25 15:36:58 -07004596 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004597 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004598 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004599 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004600 }
4601
4602 return NETDEV_TX_OK;
4603}
4604
4605/* Called with rtnl_lock */
4606static int
4607bnx2_close(struct net_device *dev)
4608{
Michael Chan972ec0d2006-01-23 16:12:43 -08004609 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004610 u32 reset_code;
4611
Michael Chanafdc08b2005-08-25 15:34:29 -07004612 /* Calling flush_scheduled_work() may deadlock because
4613 * linkwatch_event() may be on the workqueue and it will try to get
4614 * the rtnl_lock which we are holding.
4615 */
4616 while (bp->in_reset_task)
4617 msleep(1);
4618
Michael Chanb6016b72005-05-26 13:03:09 -07004619 bnx2_netif_stop(bp);
4620 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004621 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004622 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004623 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004624 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4625 else
4626 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4627 bnx2_reset_chip(bp, reset_code);
4628 free_irq(bp->pdev->irq, dev);
4629 if (bp->flags & USING_MSI_FLAG) {
4630 pci_disable_msi(bp->pdev);
4631 bp->flags &= ~USING_MSI_FLAG;
4632 }
4633 bnx2_free_skbs(bp);
4634 bnx2_free_mem(bp);
4635 bp->link_up = 0;
4636 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004637 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004638 return 0;
4639}
4640
4641#define GET_NET_STATS64(ctr) \
4642 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4643 (unsigned long) (ctr##_lo)
4644
4645#define GET_NET_STATS32(ctr) \
4646 (ctr##_lo)
4647
4648#if (BITS_PER_LONG == 64)
4649#define GET_NET_STATS GET_NET_STATS64
4650#else
4651#define GET_NET_STATS GET_NET_STATS32
4652#endif
4653
4654static struct net_device_stats *
4655bnx2_get_stats(struct net_device *dev)
4656{
Michael Chan972ec0d2006-01-23 16:12:43 -08004657 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004658 struct statistics_block *stats_blk = bp->stats_blk;
4659 struct net_device_stats *net_stats = &bp->net_stats;
4660
4661 if (bp->stats_blk == NULL) {
4662 return net_stats;
4663 }
4664 net_stats->rx_packets =
4665 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4666 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4667 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4668
4669 net_stats->tx_packets =
4670 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4671 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4672 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4673
4674 net_stats->rx_bytes =
4675 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4676
4677 net_stats->tx_bytes =
4678 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4679
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004680 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07004681 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4682
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004683 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07004684 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4685
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004686 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004687 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4688 stats_blk->stat_EtherStatsOverrsizePkts);
4689
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004690 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004691 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4692
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004693 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004694 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4695
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004696 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07004697 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4698
4699 net_stats->rx_errors = net_stats->rx_length_errors +
4700 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4701 net_stats->rx_crc_errors;
4702
4703 net_stats->tx_aborted_errors =
4704 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4705 stats_blk->stat_Dot3StatsLateCollisions);
4706
Michael Chan5b0c76a2005-11-04 08:45:49 -08004707 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4708 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004709 net_stats->tx_carrier_errors = 0;
4710 else {
4711 net_stats->tx_carrier_errors =
4712 (unsigned long)
4713 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4714 }
4715
4716 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004717 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07004718 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4719 +
4720 net_stats->tx_aborted_errors +
4721 net_stats->tx_carrier_errors;
4722
Michael Chancea94db2006-06-12 22:16:13 -07004723 net_stats->rx_missed_errors =
4724 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4725 stats_blk->stat_FwRxDrop);
4726
Michael Chanb6016b72005-05-26 13:03:09 -07004727 return net_stats;
4728}
4729
4730/* All ethtool functions called with rtnl_lock */
4731
4732static int
4733bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4734{
Michael Chan972ec0d2006-01-23 16:12:43 -08004735 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004736
4737 cmd->supported = SUPPORTED_Autoneg;
4738 if (bp->phy_flags & PHY_SERDES_FLAG) {
4739 cmd->supported |= SUPPORTED_1000baseT_Full |
4740 SUPPORTED_FIBRE;
4741
4742 cmd->port = PORT_FIBRE;
4743 }
4744 else {
4745 cmd->supported |= SUPPORTED_10baseT_Half |
4746 SUPPORTED_10baseT_Full |
4747 SUPPORTED_100baseT_Half |
4748 SUPPORTED_100baseT_Full |
4749 SUPPORTED_1000baseT_Full |
4750 SUPPORTED_TP;
4751
4752 cmd->port = PORT_TP;
4753 }
4754
4755 cmd->advertising = bp->advertising;
4756
4757 if (bp->autoneg & AUTONEG_SPEED) {
4758 cmd->autoneg = AUTONEG_ENABLE;
4759 }
4760 else {
4761 cmd->autoneg = AUTONEG_DISABLE;
4762 }
4763
4764 if (netif_carrier_ok(dev)) {
4765 cmd->speed = bp->line_speed;
4766 cmd->duplex = bp->duplex;
4767 }
4768 else {
4769 cmd->speed = -1;
4770 cmd->duplex = -1;
4771 }
4772
4773 cmd->transceiver = XCVR_INTERNAL;
4774 cmd->phy_address = bp->phy_addr;
4775
4776 return 0;
4777}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004778
Michael Chanb6016b72005-05-26 13:03:09 -07004779static int
4780bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4781{
Michael Chan972ec0d2006-01-23 16:12:43 -08004782 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004783 u8 autoneg = bp->autoneg;
4784 u8 req_duplex = bp->req_duplex;
4785 u16 req_line_speed = bp->req_line_speed;
4786 u32 advertising = bp->advertising;
4787
4788 if (cmd->autoneg == AUTONEG_ENABLE) {
4789 autoneg |= AUTONEG_SPEED;
4790
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004791 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07004792
4793 /* allow advertising 1 speed */
4794 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4795 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4796 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4797 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4798
4799 if (bp->phy_flags & PHY_SERDES_FLAG)
4800 return -EINVAL;
4801
4802 advertising = cmd->advertising;
4803
4804 }
4805 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4806 advertising = cmd->advertising;
4807 }
4808 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4809 return -EINVAL;
4810 }
4811 else {
4812 if (bp->phy_flags & PHY_SERDES_FLAG) {
4813 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4814 }
4815 else {
4816 advertising = ETHTOOL_ALL_COPPER_SPEED;
4817 }
4818 }
4819 advertising |= ADVERTISED_Autoneg;
4820 }
4821 else {
4822 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08004823 if ((cmd->speed != SPEED_1000 &&
4824 cmd->speed != SPEED_2500) ||
4825 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07004826 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08004827
4828 if (cmd->speed == SPEED_2500 &&
4829 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4830 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004831 }
4832 else if (cmd->speed == SPEED_1000) {
4833 return -EINVAL;
4834 }
4835 autoneg &= ~AUTONEG_SPEED;
4836 req_line_speed = cmd->speed;
4837 req_duplex = cmd->duplex;
4838 advertising = 0;
4839 }
4840
4841 bp->autoneg = autoneg;
4842 bp->advertising = advertising;
4843 bp->req_line_speed = req_line_speed;
4844 bp->req_duplex = req_duplex;
4845
Michael Chanc770a652005-08-25 15:38:39 -07004846 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004847
4848 bnx2_setup_phy(bp);
4849
Michael Chanc770a652005-08-25 15:38:39 -07004850 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004851
4852 return 0;
4853}
4854
4855static void
4856bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4857{
Michael Chan972ec0d2006-01-23 16:12:43 -08004858 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004859
4860 strcpy(info->driver, DRV_MODULE_NAME);
4861 strcpy(info->version, DRV_MODULE_VERSION);
4862 strcpy(info->bus_info, pci_name(bp->pdev));
4863 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4864 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4865 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004866 info->fw_version[1] = info->fw_version[3] = '.';
4867 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004868}
4869
Michael Chan244ac4f2006-03-20 17:48:46 -08004870#define BNX2_REGDUMP_LEN (32 * 1024)
4871
4872static int
4873bnx2_get_regs_len(struct net_device *dev)
4874{
4875 return BNX2_REGDUMP_LEN;
4876}
4877
4878static void
4879bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4880{
4881 u32 *p = _p, i, offset;
4882 u8 *orig_p = _p;
4883 struct bnx2 *bp = netdev_priv(dev);
4884 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4885 0x0800, 0x0880, 0x0c00, 0x0c10,
4886 0x0c30, 0x0d08, 0x1000, 0x101c,
4887 0x1040, 0x1048, 0x1080, 0x10a4,
4888 0x1400, 0x1490, 0x1498, 0x14f0,
4889 0x1500, 0x155c, 0x1580, 0x15dc,
4890 0x1600, 0x1658, 0x1680, 0x16d8,
4891 0x1800, 0x1820, 0x1840, 0x1854,
4892 0x1880, 0x1894, 0x1900, 0x1984,
4893 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4894 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4895 0x2000, 0x2030, 0x23c0, 0x2400,
4896 0x2800, 0x2820, 0x2830, 0x2850,
4897 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4898 0x3c00, 0x3c94, 0x4000, 0x4010,
4899 0x4080, 0x4090, 0x43c0, 0x4458,
4900 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4901 0x4fc0, 0x5010, 0x53c0, 0x5444,
4902 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4903 0x5fc0, 0x6000, 0x6400, 0x6428,
4904 0x6800, 0x6848, 0x684c, 0x6860,
4905 0x6888, 0x6910, 0x8000 };
4906
4907 regs->version = 0;
4908
4909 memset(p, 0, BNX2_REGDUMP_LEN);
4910
4911 if (!netif_running(bp->dev))
4912 return;
4913
4914 i = 0;
4915 offset = reg_boundaries[0];
4916 p += offset;
4917 while (offset < BNX2_REGDUMP_LEN) {
4918 *p++ = REG_RD(bp, offset);
4919 offset += 4;
4920 if (offset == reg_boundaries[i + 1]) {
4921 offset = reg_boundaries[i + 2];
4922 p = (u32 *) (orig_p + offset);
4923 i += 2;
4924 }
4925 }
4926}
4927
Michael Chanb6016b72005-05-26 13:03:09 -07004928static void
4929bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4930{
Michael Chan972ec0d2006-01-23 16:12:43 -08004931 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004932
4933 if (bp->flags & NO_WOL_FLAG) {
4934 wol->supported = 0;
4935 wol->wolopts = 0;
4936 }
4937 else {
4938 wol->supported = WAKE_MAGIC;
4939 if (bp->wol)
4940 wol->wolopts = WAKE_MAGIC;
4941 else
4942 wol->wolopts = 0;
4943 }
4944 memset(&wol->sopass, 0, sizeof(wol->sopass));
4945}
4946
4947static int
4948bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4949{
Michael Chan972ec0d2006-01-23 16:12:43 -08004950 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004951
4952 if (wol->wolopts & ~WAKE_MAGIC)
4953 return -EINVAL;
4954
4955 if (wol->wolopts & WAKE_MAGIC) {
4956 if (bp->flags & NO_WOL_FLAG)
4957 return -EINVAL;
4958
4959 bp->wol = 1;
4960 }
4961 else {
4962 bp->wol = 0;
4963 }
4964 return 0;
4965}
4966
4967static int
4968bnx2_nway_reset(struct net_device *dev)
4969{
Michael Chan972ec0d2006-01-23 16:12:43 -08004970 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004971 u32 bmcr;
4972
4973 if (!(bp->autoneg & AUTONEG_SPEED)) {
4974 return -EINVAL;
4975 }
4976
Michael Chanc770a652005-08-25 15:38:39 -07004977 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004978
4979 /* Force a link down visible on the other side */
4980 if (bp->phy_flags & PHY_SERDES_FLAG) {
4981 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004982 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004983
4984 msleep(20);
4985
Michael Chanc770a652005-08-25 15:38:39 -07004986 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08004987
4988 bp->current_interval = SERDES_AN_TIMEOUT;
4989 bp->serdes_an_pending = 1;
4990 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004991 }
4992
4993 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4994 bmcr &= ~BMCR_LOOPBACK;
4995 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4996
Michael Chanc770a652005-08-25 15:38:39 -07004997 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004998
4999 return 0;
5000}
5001
5002static int
5003bnx2_get_eeprom_len(struct net_device *dev)
5004{
Michael Chan972ec0d2006-01-23 16:12:43 -08005005 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005006
Michael Chan1122db72006-01-23 16:11:42 -08005007 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005008 return 0;
5009
Michael Chan1122db72006-01-23 16:11:42 -08005010 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005011}
5012
5013static int
5014bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5015 u8 *eebuf)
5016{
Michael Chan972ec0d2006-01-23 16:12:43 -08005017 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005018 int rc;
5019
John W. Linville1064e942005-11-10 12:58:24 -08005020 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005021
5022 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5023
5024 return rc;
5025}
5026
5027static int
5028bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5029 u8 *eebuf)
5030{
Michael Chan972ec0d2006-01-23 16:12:43 -08005031 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005032 int rc;
5033
John W. Linville1064e942005-11-10 12:58:24 -08005034 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005035
5036 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5037
5038 return rc;
5039}
5040
5041static int
5042bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5043{
Michael Chan972ec0d2006-01-23 16:12:43 -08005044 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005045
5046 memset(coal, 0, sizeof(struct ethtool_coalesce));
5047
5048 coal->rx_coalesce_usecs = bp->rx_ticks;
5049 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5050 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5051 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5052
5053 coal->tx_coalesce_usecs = bp->tx_ticks;
5054 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5055 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5056 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5057
5058 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5059
5060 return 0;
5061}
5062
5063static int
5064bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5065{
Michael Chan972ec0d2006-01-23 16:12:43 -08005066 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005067
5068 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5069 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5070
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005071 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005072 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5073
5074 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5075 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5076
5077 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5078 if (bp->rx_quick_cons_trip_int > 0xff)
5079 bp->rx_quick_cons_trip_int = 0xff;
5080
5081 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5082 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5083
5084 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5085 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5086
5087 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5088 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5089
5090 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5091 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5092 0xff;
5093
5094 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5095 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5096 bp->stats_ticks &= 0xffff00;
5097
5098 if (netif_running(bp->dev)) {
5099 bnx2_netif_stop(bp);
5100 bnx2_init_nic(bp);
5101 bnx2_netif_start(bp);
5102 }
5103
5104 return 0;
5105}
5106
5107static void
5108bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5109{
Michael Chan972ec0d2006-01-23 16:12:43 -08005110 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005111
Michael Chan13daffa2006-03-20 17:49:20 -08005112 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005113 ering->rx_mini_max_pending = 0;
5114 ering->rx_jumbo_max_pending = 0;
5115
5116 ering->rx_pending = bp->rx_ring_size;
5117 ering->rx_mini_pending = 0;
5118 ering->rx_jumbo_pending = 0;
5119
5120 ering->tx_max_pending = MAX_TX_DESC_CNT;
5121 ering->tx_pending = bp->tx_ring_size;
5122}
5123
5124static int
5125bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5126{
Michael Chan972ec0d2006-01-23 16:12:43 -08005127 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005128
Michael Chan13daffa2006-03-20 17:49:20 -08005129 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005130 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5131 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5132
5133 return -EINVAL;
5134 }
Michael Chan13daffa2006-03-20 17:49:20 -08005135 if (netif_running(bp->dev)) {
5136 bnx2_netif_stop(bp);
5137 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5138 bnx2_free_skbs(bp);
5139 bnx2_free_mem(bp);
5140 }
5141
5142 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005143 bp->tx_ring_size = ering->tx_pending;
5144
5145 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005146 int rc;
5147
5148 rc = bnx2_alloc_mem(bp);
5149 if (rc)
5150 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005151 bnx2_init_nic(bp);
5152 bnx2_netif_start(bp);
5153 }
5154
5155 return 0;
5156}
5157
5158static void
5159bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5160{
Michael Chan972ec0d2006-01-23 16:12:43 -08005161 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005162
5163 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5164 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5165 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5166}
5167
5168static int
5169bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5170{
Michael Chan972ec0d2006-01-23 16:12:43 -08005171 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005172
5173 bp->req_flow_ctrl = 0;
5174 if (epause->rx_pause)
5175 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5176 if (epause->tx_pause)
5177 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5178
5179 if (epause->autoneg) {
5180 bp->autoneg |= AUTONEG_FLOW_CTRL;
5181 }
5182 else {
5183 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5184 }
5185
Michael Chanc770a652005-08-25 15:38:39 -07005186 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005187
5188 bnx2_setup_phy(bp);
5189
Michael Chanc770a652005-08-25 15:38:39 -07005190 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005191
5192 return 0;
5193}
5194
5195static u32
5196bnx2_get_rx_csum(struct net_device *dev)
5197{
Michael Chan972ec0d2006-01-23 16:12:43 -08005198 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005199
5200 return bp->rx_csum;
5201}
5202
5203static int
5204bnx2_set_rx_csum(struct net_device *dev, u32 data)
5205{
Michael Chan972ec0d2006-01-23 16:12:43 -08005206 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005207
5208 bp->rx_csum = data;
5209 return 0;
5210}
5211
Michael Chanb11d6212006-06-29 12:31:21 -07005212static int
5213bnx2_set_tso(struct net_device *dev, u32 data)
5214{
5215 if (data)
5216 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5217 else
5218 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5219 return 0;
5220}
5221
Michael Chancea94db2006-06-12 22:16:13 -07005222#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005223
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005224static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005225 char string[ETH_GSTRING_LEN];
5226} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5227 { "rx_bytes" },
5228 { "rx_error_bytes" },
5229 { "tx_bytes" },
5230 { "tx_error_bytes" },
5231 { "rx_ucast_packets" },
5232 { "rx_mcast_packets" },
5233 { "rx_bcast_packets" },
5234 { "tx_ucast_packets" },
5235 { "tx_mcast_packets" },
5236 { "tx_bcast_packets" },
5237 { "tx_mac_errors" },
5238 { "tx_carrier_errors" },
5239 { "rx_crc_errors" },
5240 { "rx_align_errors" },
5241 { "tx_single_collisions" },
5242 { "tx_multi_collisions" },
5243 { "tx_deferred" },
5244 { "tx_excess_collisions" },
5245 { "tx_late_collisions" },
5246 { "tx_total_collisions" },
5247 { "rx_fragments" },
5248 { "rx_jabbers" },
5249 { "rx_undersize_packets" },
5250 { "rx_oversize_packets" },
5251 { "rx_64_byte_packets" },
5252 { "rx_65_to_127_byte_packets" },
5253 { "rx_128_to_255_byte_packets" },
5254 { "rx_256_to_511_byte_packets" },
5255 { "rx_512_to_1023_byte_packets" },
5256 { "rx_1024_to_1522_byte_packets" },
5257 { "rx_1523_to_9022_byte_packets" },
5258 { "tx_64_byte_packets" },
5259 { "tx_65_to_127_byte_packets" },
5260 { "tx_128_to_255_byte_packets" },
5261 { "tx_256_to_511_byte_packets" },
5262 { "tx_512_to_1023_byte_packets" },
5263 { "tx_1024_to_1522_byte_packets" },
5264 { "tx_1523_to_9022_byte_packets" },
5265 { "rx_xon_frames" },
5266 { "rx_xoff_frames" },
5267 { "tx_xon_frames" },
5268 { "tx_xoff_frames" },
5269 { "rx_mac_ctrl_frames" },
5270 { "rx_filtered_packets" },
5271 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005272 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005273};
5274
5275#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5276
Arjan van de Venf71e1302006-03-03 21:33:57 -05005277static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005278 STATS_OFFSET32(stat_IfHCInOctets_hi),
5279 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5280 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5281 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5282 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5283 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5284 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5286 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5287 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5288 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005289 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5290 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5291 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5292 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5293 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5294 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5295 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5296 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5297 STATS_OFFSET32(stat_EtherStatsCollisions),
5298 STATS_OFFSET32(stat_EtherStatsFragments),
5299 STATS_OFFSET32(stat_EtherStatsJabbers),
5300 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5301 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5314 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5315 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5316 STATS_OFFSET32(stat_XonPauseFramesReceived),
5317 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5318 STATS_OFFSET32(stat_OutXonSent),
5319 STATS_OFFSET32(stat_OutXoffSent),
5320 STATS_OFFSET32(stat_MacControlFramesReceived),
5321 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5322 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005323 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005324};
5325
5326/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5327 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005328 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005329static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005330 8,0,8,8,8,8,8,8,8,8,
5331 4,0,4,4,4,4,4,4,4,4,
5332 4,4,4,4,4,4,4,4,4,4,
5333 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005334 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005335};
5336
Michael Chan5b0c76a2005-11-04 08:45:49 -08005337static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5338 8,0,8,8,8,8,8,8,8,8,
5339 4,4,4,4,4,4,4,4,4,4,
5340 4,4,4,4,4,4,4,4,4,4,
5341 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005342 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005343};
5344
Michael Chanb6016b72005-05-26 13:03:09 -07005345#define BNX2_NUM_TESTS 6
5346
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005347static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005348 char string[ETH_GSTRING_LEN];
5349} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5350 { "register_test (offline)" },
5351 { "memory_test (offline)" },
5352 { "loopback_test (offline)" },
5353 { "nvram_test (online)" },
5354 { "interrupt_test (online)" },
5355 { "link_test (online)" },
5356};
5357
5358static int
5359bnx2_self_test_count(struct net_device *dev)
5360{
5361 return BNX2_NUM_TESTS;
5362}
5363
5364static void
5365bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5366{
Michael Chan972ec0d2006-01-23 16:12:43 -08005367 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005368
5369 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5370 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005371 int i;
5372
Michael Chanb6016b72005-05-26 13:03:09 -07005373 bnx2_netif_stop(bp);
5374 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5375 bnx2_free_skbs(bp);
5376
5377 if (bnx2_test_registers(bp) != 0) {
5378 buf[0] = 1;
5379 etest->flags |= ETH_TEST_FL_FAILED;
5380 }
5381 if (bnx2_test_memory(bp) != 0) {
5382 buf[1] = 1;
5383 etest->flags |= ETH_TEST_FL_FAILED;
5384 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005385 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005386 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005387
5388 if (!netif_running(bp->dev)) {
5389 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5390 }
5391 else {
5392 bnx2_init_nic(bp);
5393 bnx2_netif_start(bp);
5394 }
5395
5396 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005397 for (i = 0; i < 7; i++) {
5398 if (bp->link_up)
5399 break;
5400 msleep_interruptible(1000);
5401 }
Michael Chanb6016b72005-05-26 13:03:09 -07005402 }
5403
5404 if (bnx2_test_nvram(bp) != 0) {
5405 buf[3] = 1;
5406 etest->flags |= ETH_TEST_FL_FAILED;
5407 }
5408 if (bnx2_test_intr(bp) != 0) {
5409 buf[4] = 1;
5410 etest->flags |= ETH_TEST_FL_FAILED;
5411 }
5412
5413 if (bnx2_test_link(bp) != 0) {
5414 buf[5] = 1;
5415 etest->flags |= ETH_TEST_FL_FAILED;
5416
5417 }
5418}
5419
5420static void
5421bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5422{
5423 switch (stringset) {
5424 case ETH_SS_STATS:
5425 memcpy(buf, bnx2_stats_str_arr,
5426 sizeof(bnx2_stats_str_arr));
5427 break;
5428 case ETH_SS_TEST:
5429 memcpy(buf, bnx2_tests_str_arr,
5430 sizeof(bnx2_tests_str_arr));
5431 break;
5432 }
5433}
5434
5435static int
5436bnx2_get_stats_count(struct net_device *dev)
5437{
5438 return BNX2_NUM_STATS;
5439}
5440
5441static void
5442bnx2_get_ethtool_stats(struct net_device *dev,
5443 struct ethtool_stats *stats, u64 *buf)
5444{
Michael Chan972ec0d2006-01-23 16:12:43 -08005445 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005446 int i;
5447 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005448 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005449
5450 if (hw_stats == NULL) {
5451 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5452 return;
5453 }
5454
Michael Chan5b0c76a2005-11-04 08:45:49 -08005455 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5456 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5457 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5458 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005459 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005460 else
5461 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005462
5463 for (i = 0; i < BNX2_NUM_STATS; i++) {
5464 if (stats_len_arr[i] == 0) {
5465 /* skip this counter */
5466 buf[i] = 0;
5467 continue;
5468 }
5469 if (stats_len_arr[i] == 4) {
5470 /* 4-byte counter */
5471 buf[i] = (u64)
5472 *(hw_stats + bnx2_stats_offset_arr[i]);
5473 continue;
5474 }
5475 /* 8-byte counter */
5476 buf[i] = (((u64) *(hw_stats +
5477 bnx2_stats_offset_arr[i])) << 32) +
5478 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5479 }
5480}
5481
5482static int
5483bnx2_phys_id(struct net_device *dev, u32 data)
5484{
Michael Chan972ec0d2006-01-23 16:12:43 -08005485 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005486 int i;
5487 u32 save;
5488
5489 if (data == 0)
5490 data = 2;
5491
5492 save = REG_RD(bp, BNX2_MISC_CFG);
5493 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5494
5495 for (i = 0; i < (data * 2); i++) {
5496 if ((i % 2) == 0) {
5497 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5498 }
5499 else {
5500 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5501 BNX2_EMAC_LED_1000MB_OVERRIDE |
5502 BNX2_EMAC_LED_100MB_OVERRIDE |
5503 BNX2_EMAC_LED_10MB_OVERRIDE |
5504 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5505 BNX2_EMAC_LED_TRAFFIC);
5506 }
5507 msleep_interruptible(500);
5508 if (signal_pending(current))
5509 break;
5510 }
5511 REG_WR(bp, BNX2_EMAC_LED, 0);
5512 REG_WR(bp, BNX2_MISC_CFG, save);
5513 return 0;
5514}
5515
Jeff Garzik7282d492006-09-13 14:30:00 -04005516static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005517 .get_settings = bnx2_get_settings,
5518 .set_settings = bnx2_set_settings,
5519 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005520 .get_regs_len = bnx2_get_regs_len,
5521 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005522 .get_wol = bnx2_get_wol,
5523 .set_wol = bnx2_set_wol,
5524 .nway_reset = bnx2_nway_reset,
5525 .get_link = ethtool_op_get_link,
5526 .get_eeprom_len = bnx2_get_eeprom_len,
5527 .get_eeprom = bnx2_get_eeprom,
5528 .set_eeprom = bnx2_set_eeprom,
5529 .get_coalesce = bnx2_get_coalesce,
5530 .set_coalesce = bnx2_set_coalesce,
5531 .get_ringparam = bnx2_get_ringparam,
5532 .set_ringparam = bnx2_set_ringparam,
5533 .get_pauseparam = bnx2_get_pauseparam,
5534 .set_pauseparam = bnx2_set_pauseparam,
5535 .get_rx_csum = bnx2_get_rx_csum,
5536 .set_rx_csum = bnx2_set_rx_csum,
5537 .get_tx_csum = ethtool_op_get_tx_csum,
5538 .set_tx_csum = ethtool_op_set_tx_csum,
5539 .get_sg = ethtool_op_get_sg,
5540 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005541 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005542 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005543 .self_test_count = bnx2_self_test_count,
5544 .self_test = bnx2_self_test,
5545 .get_strings = bnx2_get_strings,
5546 .phys_id = bnx2_phys_id,
5547 .get_stats_count = bnx2_get_stats_count,
5548 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005549 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005550};
5551
5552/* Called with rtnl_lock */
5553static int
5554bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5555{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005556 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005557 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005558 int err;
5559
5560 switch(cmd) {
5561 case SIOCGMIIPHY:
5562 data->phy_id = bp->phy_addr;
5563
5564 /* fallthru */
5565 case SIOCGMIIREG: {
5566 u32 mii_regval;
5567
Michael Chanc770a652005-08-25 15:38:39 -07005568 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005569 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005570 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005571
5572 data->val_out = mii_regval;
5573
5574 return err;
5575 }
5576
5577 case SIOCSMIIREG:
5578 if (!capable(CAP_NET_ADMIN))
5579 return -EPERM;
5580
Michael Chanc770a652005-08-25 15:38:39 -07005581 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005582 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005583 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005584
5585 return err;
5586
5587 default:
5588 /* do nothing */
5589 break;
5590 }
5591 return -EOPNOTSUPP;
5592}
5593
5594/* Called with rtnl_lock */
5595static int
5596bnx2_change_mac_addr(struct net_device *dev, void *p)
5597{
5598 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005599 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005600
Michael Chan73eef4c2005-08-25 15:39:15 -07005601 if (!is_valid_ether_addr(addr->sa_data))
5602 return -EINVAL;
5603
Michael Chanb6016b72005-05-26 13:03:09 -07005604 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5605 if (netif_running(dev))
5606 bnx2_set_mac_addr(bp);
5607
5608 return 0;
5609}
5610
5611/* Called with rtnl_lock */
5612static int
5613bnx2_change_mtu(struct net_device *dev, int new_mtu)
5614{
Michael Chan972ec0d2006-01-23 16:12:43 -08005615 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005616
5617 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5618 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5619 return -EINVAL;
5620
5621 dev->mtu = new_mtu;
5622 if (netif_running(dev)) {
5623 bnx2_netif_stop(bp);
5624
5625 bnx2_init_nic(bp);
5626
5627 bnx2_netif_start(bp);
5628 }
5629 return 0;
5630}
5631
5632#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5633static void
5634poll_bnx2(struct net_device *dev)
5635{
Michael Chan972ec0d2006-01-23 16:12:43 -08005636 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005637
5638 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005639 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005640 enable_irq(bp->pdev->irq);
5641}
5642#endif
5643
Michael Chan253c8b72007-01-08 19:56:01 -08005644static void __devinit
5645bnx2_get_5709_media(struct bnx2 *bp)
5646{
5647 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5648 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5649 u32 strap;
5650
5651 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5652 return;
5653 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5654 bp->phy_flags |= PHY_SERDES_FLAG;
5655 return;
5656 }
5657
5658 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5659 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5660 else
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5662
5663 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5664 switch (strap) {
5665 case 0x4:
5666 case 0x5:
5667 case 0x6:
5668 bp->phy_flags |= PHY_SERDES_FLAG;
5669 return;
5670 }
5671 } else {
5672 switch (strap) {
5673 case 0x1:
5674 case 0x2:
5675 case 0x4:
5676 bp->phy_flags |= PHY_SERDES_FLAG;
5677 return;
5678 }
5679 }
5680}
5681
Michael Chanb6016b72005-05-26 13:03:09 -07005682static int __devinit
5683bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5684{
5685 struct bnx2 *bp;
5686 unsigned long mem_len;
5687 int rc;
5688 u32 reg;
5689
5690 SET_MODULE_OWNER(dev);
5691 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005692 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005693
5694 bp->flags = 0;
5695 bp->phy_flags = 0;
5696
5697 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5698 rc = pci_enable_device(pdev);
5699 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005700 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07005701 goto err_out;
5702 }
5703
5704 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005705 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005706 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005707 rc = -ENODEV;
5708 goto err_out_disable;
5709 }
5710
5711 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5712 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005713 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005714 goto err_out_disable;
5715 }
5716
5717 pci_set_master(pdev);
5718
5719 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5720 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005721 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005722 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005723 rc = -EIO;
5724 goto err_out_release;
5725 }
5726
Michael Chanb6016b72005-05-26 13:03:09 -07005727 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5728 bp->flags |= USING_DAC_FLAG;
5729 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005730 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005731 "pci_set_consistent_dma_mask failed, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005732 rc = -EIO;
5733 goto err_out_release;
5734 }
5735 }
5736 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005737 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005738 rc = -EIO;
5739 goto err_out_release;
5740 }
5741
5742 bp->dev = dev;
5743 bp->pdev = pdev;
5744
5745 spin_lock_init(&bp->phy_lock);
David Howellsc4028952006-11-22 14:57:56 +00005746 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005747
5748 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08005749 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07005750 dev->mem_end = dev->mem_start + mem_len;
5751 dev->irq = pdev->irq;
5752
5753 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5754
5755 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005756 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005757 rc = -ENOMEM;
5758 goto err_out_release;
5759 }
5760
5761 /* Configure byte swap and enable write to the reg_window registers.
5762 * Rely on CPU to do target byte swapping on big endian systems
5763 * The chip's target access swapping will not swap all accesses
5764 */
5765 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5766 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5767 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5768
Pavel Machek829ca9a2005-09-03 15:56:56 -07005769 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005770
5771 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5772
Michael Chan59b47d82006-11-19 14:10:45 -08005773 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5774 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5775 if (bp->pcix_cap == 0) {
5776 dev_err(&pdev->dev,
5777 "Cannot find PCIX capability, aborting.\n");
5778 rc = -EIO;
5779 goto err_out_unmap;
5780 }
5781 }
5782
Michael Chanb6016b72005-05-26 13:03:09 -07005783 /* Get bus information. */
5784 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5785 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5786 u32 clkreg;
5787
5788 bp->flags |= PCIX_FLAG;
5789
5790 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005791
Michael Chanb6016b72005-05-26 13:03:09 -07005792 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5793 switch (clkreg) {
5794 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5795 bp->bus_speed_mhz = 133;
5796 break;
5797
5798 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5799 bp->bus_speed_mhz = 100;
5800 break;
5801
5802 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5803 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5804 bp->bus_speed_mhz = 66;
5805 break;
5806
5807 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5808 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5809 bp->bus_speed_mhz = 50;
5810 break;
5811
5812 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5813 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5814 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5815 bp->bus_speed_mhz = 33;
5816 break;
5817 }
5818 }
5819 else {
5820 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5821 bp->bus_speed_mhz = 66;
5822 else
5823 bp->bus_speed_mhz = 33;
5824 }
5825
5826 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5827 bp->flags |= PCI_32BIT_FLAG;
5828
5829 /* 5706A0 may falsely detect SERR and PERR. */
5830 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5831 reg = REG_RD(bp, PCI_COMMAND);
5832 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5833 REG_WR(bp, PCI_COMMAND, reg);
5834 }
5835 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5836 !(bp->flags & PCIX_FLAG)) {
5837
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005838 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04005839 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005840 goto err_out_unmap;
5841 }
5842
5843 bnx2_init_nvram(bp);
5844
Michael Chane3648b32005-11-04 08:51:21 -08005845 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5846
5847 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08005848 BNX2_SHM_HDR_SIGNATURE_SIG) {
5849 u32 off = PCI_FUNC(pdev->devfn) << 2;
5850
5851 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5852 } else
Michael Chane3648b32005-11-04 08:51:21 -08005853 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5854
Michael Chanb6016b72005-05-26 13:03:09 -07005855 /* Get the permanent MAC address. First we need to make sure the
5856 * firmware is actually running.
5857 */
Michael Chane3648b32005-11-04 08:51:21 -08005858 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005859
5860 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5861 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04005862 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07005863 rc = -ENODEV;
5864 goto err_out_unmap;
5865 }
5866
Michael Chane3648b32005-11-04 08:51:21 -08005867 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005868
Michael Chane3648b32005-11-04 08:51:21 -08005869 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005870 bp->mac_addr[0] = (u8) (reg >> 8);
5871 bp->mac_addr[1] = (u8) reg;
5872
Michael Chane3648b32005-11-04 08:51:21 -08005873 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005874 bp->mac_addr[2] = (u8) (reg >> 24);
5875 bp->mac_addr[3] = (u8) (reg >> 16);
5876 bp->mac_addr[4] = (u8) (reg >> 8);
5877 bp->mac_addr[5] = (u8) reg;
5878
5879 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07005880 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07005881
5882 bp->rx_csum = 1;
5883
5884 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5885
5886 bp->tx_quick_cons_trip_int = 20;
5887 bp->tx_quick_cons_trip = 20;
5888 bp->tx_ticks_int = 80;
5889 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005890
Michael Chanb6016b72005-05-26 13:03:09 -07005891 bp->rx_quick_cons_trip_int = 6;
5892 bp->rx_quick_cons_trip = 6;
5893 bp->rx_ticks_int = 18;
5894 bp->rx_ticks = 18;
5895
5896 bp->stats_ticks = 1000000 & 0xffff00;
5897
5898 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005899 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005900
Michael Chan5b0c76a2005-11-04 08:45:49 -08005901 bp->phy_addr = 1;
5902
Michael Chanb6016b72005-05-26 13:03:09 -07005903 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08005904 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5905 bnx2_get_5709_media(bp);
5906 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07005907 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005908
5909 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07005910 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08005911 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08005912 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005913 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005914 BNX2_SHARED_HW_CFG_CONFIG);
5915 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5916 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5917 }
Michael Chan261dd5c2007-01-08 19:55:46 -08005918 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5919 CHIP_NUM(bp) == CHIP_NUM_5708)
5920 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08005921 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5922 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07005923
Michael Chan16088272006-06-12 22:16:43 -07005924 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5925 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5926 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08005927 bp->flags |= NO_WOL_FLAG;
5928
Michael Chanb6016b72005-05-26 13:03:09 -07005929 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5930 bp->tx_quick_cons_trip_int =
5931 bp->tx_quick_cons_trip;
5932 bp->tx_ticks_int = bp->tx_ticks;
5933 bp->rx_quick_cons_trip_int =
5934 bp->rx_quick_cons_trip;
5935 bp->rx_ticks_int = bp->rx_ticks;
5936 bp->comp_prod_trip_int = bp->comp_prod_trip;
5937 bp->com_ticks_int = bp->com_ticks;
5938 bp->cmd_ticks_int = bp->cmd_ticks;
5939 }
5940
Michael Chanf9317a42006-09-29 17:06:23 -07005941 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5942 *
5943 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5944 * with byte enables disabled on the unused 32-bit word. This is legal
5945 * but causes problems on the AMD 8132 which will eventually stop
5946 * responding after a while.
5947 *
5948 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11005949 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07005950 */
5951 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5952 struct pci_dev *amd_8132 = NULL;
5953
5954 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5955 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5956 amd_8132))) {
5957 u8 rev;
5958
5959 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5960 if (rev >= 0x10 && rev <= 0x13) {
5961 disable_msi = 1;
5962 pci_dev_put(amd_8132);
5963 break;
5964 }
5965 }
5966 }
5967
Michael Chanb6016b72005-05-26 13:03:09 -07005968 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5969 bp->req_line_speed = 0;
5970 if (bp->phy_flags & PHY_SERDES_FLAG) {
5971 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005972
Michael Chane3648b32005-11-04 08:51:21 -08005973 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005974 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5975 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5976 bp->autoneg = 0;
5977 bp->req_line_speed = bp->line_speed = SPEED_1000;
5978 bp->req_duplex = DUPLEX_FULL;
5979 }
Michael Chanb6016b72005-05-26 13:03:09 -07005980 }
5981 else {
5982 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5983 }
5984
5985 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5986
Michael Chancd339a02005-08-25 15:35:24 -07005987 init_timer(&bp->timer);
5988 bp->timer.expires = RUN_AT(bp->timer_interval);
5989 bp->timer.data = (unsigned long) bp;
5990 bp->timer.function = bnx2_timer;
5991
Michael Chanb6016b72005-05-26 13:03:09 -07005992 return 0;
5993
5994err_out_unmap:
5995 if (bp->regview) {
5996 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005997 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005998 }
5999
6000err_out_release:
6001 pci_release_regions(pdev);
6002
6003err_out_disable:
6004 pci_disable_device(pdev);
6005 pci_set_drvdata(pdev, NULL);
6006
6007err_out:
6008 return rc;
6009}
6010
6011static int __devinit
6012bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6013{
6014 static int version_printed = 0;
6015 struct net_device *dev = NULL;
6016 struct bnx2 *bp;
6017 int rc, i;
6018
6019 if (version_printed++ == 0)
6020 printk(KERN_INFO "%s", version);
6021
6022 /* dev zeroed in init_etherdev */
6023 dev = alloc_etherdev(sizeof(*bp));
6024
6025 if (!dev)
6026 return -ENOMEM;
6027
6028 rc = bnx2_init_board(pdev, dev);
6029 if (rc < 0) {
6030 free_netdev(dev);
6031 return rc;
6032 }
6033
6034 dev->open = bnx2_open;
6035 dev->hard_start_xmit = bnx2_start_xmit;
6036 dev->stop = bnx2_close;
6037 dev->get_stats = bnx2_get_stats;
6038 dev->set_multicast_list = bnx2_set_rx_mode;
6039 dev->do_ioctl = bnx2_ioctl;
6040 dev->set_mac_address = bnx2_change_mac_addr;
6041 dev->change_mtu = bnx2_change_mtu;
6042 dev->tx_timeout = bnx2_tx_timeout;
6043 dev->watchdog_timeo = TX_TIMEOUT;
6044#ifdef BCM_VLAN
6045 dev->vlan_rx_register = bnx2_vlan_rx_register;
6046 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6047#endif
6048 dev->poll = bnx2_poll;
6049 dev->ethtool_ops = &bnx2_ethtool_ops;
6050 dev->weight = 64;
6051
Michael Chan972ec0d2006-01-23 16:12:43 -08006052 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006053
6054#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6055 dev->poll_controller = poll_bnx2;
6056#endif
6057
6058 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006059 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006060 if (bp->regview)
6061 iounmap(bp->regview);
6062 pci_release_regions(pdev);
6063 pci_disable_device(pdev);
6064 pci_set_drvdata(pdev, NULL);
6065 free_netdev(dev);
6066 return rc;
6067 }
6068
6069 pci_set_drvdata(pdev, dev);
6070
6071 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07006072 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07006073 bp->name = board_info[ent->driver_data].name,
6074 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6075 "IRQ %d, ",
6076 dev->name,
6077 bp->name,
6078 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6079 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6080 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6081 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6082 bp->bus_speed_mhz,
6083 dev->base_addr,
6084 bp->pdev->irq);
6085
6086 printk("node addr ");
6087 for (i = 0; i < 6; i++)
6088 printk("%2.2x", dev->dev_addr[i]);
6089 printk("\n");
6090
6091 dev->features |= NETIF_F_SG;
6092 if (bp->flags & USING_DAC_FLAG)
6093 dev->features |= NETIF_F_HIGHDMA;
6094 dev->features |= NETIF_F_IP_CSUM;
6095#ifdef BCM_VLAN
6096 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6097#endif
Michael Chanb11d6212006-06-29 12:31:21 -07006098 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chanb6016b72005-05-26 13:03:09 -07006099
6100 netif_carrier_off(bp->dev);
6101
6102 return 0;
6103}
6104
6105static void __devexit
6106bnx2_remove_one(struct pci_dev *pdev)
6107{
6108 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006109 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006110
Michael Chanafdc08b2005-08-25 15:34:29 -07006111 flush_scheduled_work();
6112
Michael Chanb6016b72005-05-26 13:03:09 -07006113 unregister_netdev(dev);
6114
6115 if (bp->regview)
6116 iounmap(bp->regview);
6117
6118 free_netdev(dev);
6119 pci_release_regions(pdev);
6120 pci_disable_device(pdev);
6121 pci_set_drvdata(pdev, NULL);
6122}
6123
6124static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006125bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006126{
6127 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006128 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006129 u32 reset_code;
6130
6131 if (!netif_running(dev))
6132 return 0;
6133
Michael Chan1d60290f2006-03-20 17:50:08 -08006134 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006135 bnx2_netif_stop(bp);
6136 netif_device_detach(dev);
6137 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006138 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006139 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006140 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006141 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6142 else
6143 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6144 bnx2_reset_chip(bp, reset_code);
6145 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006146 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006147 return 0;
6148}
6149
6150static int
6151bnx2_resume(struct pci_dev *pdev)
6152{
6153 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006154 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006155
6156 if (!netif_running(dev))
6157 return 0;
6158
Pavel Machek829ca9a2005-09-03 15:56:56 -07006159 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006160 netif_device_attach(dev);
6161 bnx2_init_nic(bp);
6162 bnx2_netif_start(bp);
6163 return 0;
6164}
6165
6166static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006167 .name = DRV_MODULE_NAME,
6168 .id_table = bnx2_pci_tbl,
6169 .probe = bnx2_init_one,
6170 .remove = __devexit_p(bnx2_remove_one),
6171 .suspend = bnx2_suspend,
6172 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006173};
6174
6175static int __init bnx2_init(void)
6176{
Jeff Garzik29917622006-08-19 17:48:59 -04006177 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006178}
6179
6180static void __exit bnx2_cleanup(void)
6181{
6182 pci_unregister_driver(&bnx2_pci_driver);
6183}
6184
6185module_init(bnx2_init);
6186module_exit(bnx2_cleanup);
6187
6188
6189