blob: c571da60241d599da18e0aa05586a2b0d65dca9a [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chanb91b9fd2007-06-04 21:24:42 -070057#define DRV_MODULE_VERSION "1.5.11"
58#define DRV_MODULE_RELDATE "June 4, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
Michael Chan0d8a6572007-07-07 22:49:43 -0700553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554 return;
555
Michael Chane3648b32005-11-04 08:51:21 -0800556 if (bp->link_up) {
557 u32 bmsr;
558
559 switch (bp->line_speed) {
560 case SPEED_10:
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
563 else
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
565 break;
566 case SPEED_100:
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
569 else
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
571 break;
572 case SPEED_1000:
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575 else
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577 break;
578 case SPEED_2500:
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581 else
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 break;
584 }
585
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588 if (bp->autoneg) {
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
Michael Chanca58c3a2007-05-03 13:22:52 -0700591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800593
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597 else
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 }
600 }
601 else
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605}
606
607static void
Michael Chanb6016b72005-05-26 13:03:09 -0700608bnx2_report_link(struct bnx2 *bp)
609{
610 if (bp->link_up) {
611 netif_carrier_on(bp->dev);
612 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
613
614 printk("%d Mbps ", bp->line_speed);
615
616 if (bp->duplex == DUPLEX_FULL)
617 printk("full duplex");
618 else
619 printk("half duplex");
620
621 if (bp->flow_ctrl) {
622 if (bp->flow_ctrl & FLOW_CTRL_RX) {
623 printk(", receive ");
624 if (bp->flow_ctrl & FLOW_CTRL_TX)
625 printk("& transmit ");
626 }
627 else {
628 printk(", transmit ");
629 }
630 printk("flow control ON");
631 }
632 printk("\n");
633 }
634 else {
635 netif_carrier_off(bp->dev);
636 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
637 }
Michael Chane3648b32005-11-04 08:51:21 -0800638
639 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700640}
641
642static void
643bnx2_resolve_flow_ctrl(struct bnx2 *bp)
644{
645 u32 local_adv, remote_adv;
646
647 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400648 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700649 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
650
651 if (bp->duplex == DUPLEX_FULL) {
652 bp->flow_ctrl = bp->req_flow_ctrl;
653 }
654 return;
655 }
656
657 if (bp->duplex != DUPLEX_FULL) {
658 return;
659 }
660
Michael Chan5b0c76a2005-11-04 08:45:49 -0800661 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
662 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
663 u32 val;
664
665 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
666 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
667 bp->flow_ctrl |= FLOW_CTRL_TX;
668 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
669 bp->flow_ctrl |= FLOW_CTRL_RX;
670 return;
671 }
672
Michael Chanca58c3a2007-05-03 13:22:52 -0700673 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
674 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700675
676 if (bp->phy_flags & PHY_SERDES_FLAG) {
677 u32 new_local_adv = 0;
678 u32 new_remote_adv = 0;
679
680 if (local_adv & ADVERTISE_1000XPAUSE)
681 new_local_adv |= ADVERTISE_PAUSE_CAP;
682 if (local_adv & ADVERTISE_1000XPSE_ASYM)
683 new_local_adv |= ADVERTISE_PAUSE_ASYM;
684 if (remote_adv & ADVERTISE_1000XPAUSE)
685 new_remote_adv |= ADVERTISE_PAUSE_CAP;
686 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
687 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
688
689 local_adv = new_local_adv;
690 remote_adv = new_remote_adv;
691 }
692
693 /* See Table 28B-3 of 802.3ab-1999 spec. */
694 if (local_adv & ADVERTISE_PAUSE_CAP) {
695 if(local_adv & ADVERTISE_PAUSE_ASYM) {
696 if (remote_adv & ADVERTISE_PAUSE_CAP) {
697 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
698 }
699 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
700 bp->flow_ctrl = FLOW_CTRL_RX;
701 }
702 }
703 else {
704 if (remote_adv & ADVERTISE_PAUSE_CAP) {
705 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
706 }
707 }
708 }
709 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
710 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
711 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
712
713 bp->flow_ctrl = FLOW_CTRL_TX;
714 }
715 }
716}
717
718static int
Michael Chan27a005b2007-05-03 13:23:41 -0700719bnx2_5709s_linkup(struct bnx2 *bp)
720{
721 u32 val, speed;
722
723 bp->link_up = 1;
724
725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
726 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
727 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
728
729 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
730 bp->line_speed = bp->req_line_speed;
731 bp->duplex = bp->req_duplex;
732 return 0;
733 }
734 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
735 switch (speed) {
736 case MII_BNX2_GP_TOP_AN_SPEED_10:
737 bp->line_speed = SPEED_10;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_100:
740 bp->line_speed = SPEED_100;
741 break;
742 case MII_BNX2_GP_TOP_AN_SPEED_1G:
743 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
744 bp->line_speed = SPEED_1000;
745 break;
746 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
747 bp->line_speed = SPEED_2500;
748 break;
749 }
750 if (val & MII_BNX2_GP_TOP_AN_FD)
751 bp->duplex = DUPLEX_FULL;
752 else
753 bp->duplex = DUPLEX_HALF;
754 return 0;
755}
756
757static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800758bnx2_5708s_linkup(struct bnx2 *bp)
759{
760 u32 val;
761
762 bp->link_up = 1;
763 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
764 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
765 case BCM5708S_1000X_STAT1_SPEED_10:
766 bp->line_speed = SPEED_10;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_100:
769 bp->line_speed = SPEED_100;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_1G:
772 bp->line_speed = SPEED_1000;
773 break;
774 case BCM5708S_1000X_STAT1_SPEED_2G5:
775 bp->line_speed = SPEED_2500;
776 break;
777 }
778 if (val & BCM5708S_1000X_STAT1_FD)
779 bp->duplex = DUPLEX_FULL;
780 else
781 bp->duplex = DUPLEX_HALF;
782
783 return 0;
784}
785
786static int
787bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700788{
789 u32 bmcr, local_adv, remote_adv, common;
790
791 bp->link_up = 1;
792 bp->line_speed = SPEED_1000;
793
Michael Chanca58c3a2007-05-03 13:22:52 -0700794 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700795 if (bmcr & BMCR_FULLDPLX) {
796 bp->duplex = DUPLEX_FULL;
797 }
798 else {
799 bp->duplex = DUPLEX_HALF;
800 }
801
802 if (!(bmcr & BMCR_ANENABLE)) {
803 return 0;
804 }
805
Michael Chanca58c3a2007-05-03 13:22:52 -0700806 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
807 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700808
809 common = local_adv & remote_adv;
810 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
811
812 if (common & ADVERTISE_1000XFULL) {
813 bp->duplex = DUPLEX_FULL;
814 }
815 else {
816 bp->duplex = DUPLEX_HALF;
817 }
818 }
819
820 return 0;
821}
822
823static int
824bnx2_copper_linkup(struct bnx2 *bp)
825{
826 u32 bmcr;
827
Michael Chanca58c3a2007-05-03 13:22:52 -0700828 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700829 if (bmcr & BMCR_ANENABLE) {
830 u32 local_adv, remote_adv, common;
831
832 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
833 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
834
835 common = local_adv & (remote_adv >> 2);
836 if (common & ADVERTISE_1000FULL) {
837 bp->line_speed = SPEED_1000;
838 bp->duplex = DUPLEX_FULL;
839 }
840 else if (common & ADVERTISE_1000HALF) {
841 bp->line_speed = SPEED_1000;
842 bp->duplex = DUPLEX_HALF;
843 }
844 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700845 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
846 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700847
848 common = local_adv & remote_adv;
849 if (common & ADVERTISE_100FULL) {
850 bp->line_speed = SPEED_100;
851 bp->duplex = DUPLEX_FULL;
852 }
853 else if (common & ADVERTISE_100HALF) {
854 bp->line_speed = SPEED_100;
855 bp->duplex = DUPLEX_HALF;
856 }
857 else if (common & ADVERTISE_10FULL) {
858 bp->line_speed = SPEED_10;
859 bp->duplex = DUPLEX_FULL;
860 }
861 else if (common & ADVERTISE_10HALF) {
862 bp->line_speed = SPEED_10;
863 bp->duplex = DUPLEX_HALF;
864 }
865 else {
866 bp->line_speed = 0;
867 bp->link_up = 0;
868 }
869 }
870 }
871 else {
872 if (bmcr & BMCR_SPEED100) {
873 bp->line_speed = SPEED_100;
874 }
875 else {
876 bp->line_speed = SPEED_10;
877 }
878 if (bmcr & BMCR_FULLDPLX) {
879 bp->duplex = DUPLEX_FULL;
880 }
881 else {
882 bp->duplex = DUPLEX_HALF;
883 }
884 }
885
886 return 0;
887}
888
889static int
890bnx2_set_mac_link(struct bnx2 *bp)
891{
892 u32 val;
893
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
895 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
896 (bp->duplex == DUPLEX_HALF)) {
897 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
898 }
899
900 /* Configure the EMAC mode register. */
901 val = REG_RD(bp, BNX2_EMAC_MODE);
902
903 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800904 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800905 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700906
907 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800908 switch (bp->line_speed) {
909 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800910 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
911 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800912 break;
913 }
914 /* fall through */
915 case SPEED_100:
916 val |= BNX2_EMAC_MODE_PORT_MII;
917 break;
918 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800919 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800920 /* fall through */
921 case SPEED_1000:
922 val |= BNX2_EMAC_MODE_PORT_GMII;
923 break;
924 }
Michael Chanb6016b72005-05-26 13:03:09 -0700925 }
926 else {
927 val |= BNX2_EMAC_MODE_PORT_GMII;
928 }
929
930 /* Set the MAC to operate in the appropriate duplex mode. */
931 if (bp->duplex == DUPLEX_HALF)
932 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
933 REG_WR(bp, BNX2_EMAC_MODE, val);
934
935 /* Enable/disable rx PAUSE. */
936 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
937
938 if (bp->flow_ctrl & FLOW_CTRL_RX)
939 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
940 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
941
942 /* Enable/disable tx PAUSE. */
943 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
944 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
945
946 if (bp->flow_ctrl & FLOW_CTRL_TX)
947 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
948 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
949
950 /* Acknowledge the interrupt. */
951 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
952
953 return 0;
954}
955
Michael Chan27a005b2007-05-03 13:23:41 -0700956static void
957bnx2_enable_bmsr1(struct bnx2 *bp)
958{
959 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
960 (CHIP_NUM(bp) == CHIP_NUM_5709))
961 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
962 MII_BNX2_BLK_ADDR_GP_STATUS);
963}
964
965static void
966bnx2_disable_bmsr1(struct bnx2 *bp)
967{
968 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
969 (CHIP_NUM(bp) == CHIP_NUM_5709))
970 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
971 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
972}
973
Michael Chanb6016b72005-05-26 13:03:09 -0700974static int
Michael Chan605a9e22007-05-03 13:23:13 -0700975bnx2_test_and_enable_2g5(struct bnx2 *bp)
976{
977 u32 up1;
978 int ret = 1;
979
980 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
981 return 0;
982
983 if (bp->autoneg & AUTONEG_SPEED)
984 bp->advertising |= ADVERTISED_2500baseX_Full;
985
Michael Chan27a005b2007-05-03 13:23:41 -0700986 if (CHIP_NUM(bp) == CHIP_NUM_5709)
987 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
988
Michael Chan605a9e22007-05-03 13:23:13 -0700989 bnx2_read_phy(bp, bp->mii_up1, &up1);
990 if (!(up1 & BCM5708S_UP1_2G5)) {
991 up1 |= BCM5708S_UP1_2G5;
992 bnx2_write_phy(bp, bp->mii_up1, up1);
993 ret = 0;
994 }
995
Michael Chan27a005b2007-05-03 13:23:41 -0700996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
998 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
999
Michael Chan605a9e22007-05-03 13:23:13 -07001000 return ret;
1001}
1002
1003static int
1004bnx2_test_and_disable_2g5(struct bnx2 *bp)
1005{
1006 u32 up1;
1007 int ret = 0;
1008
1009 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1010 return 0;
1011
Michael Chan27a005b2007-05-03 13:23:41 -07001012 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1013 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1014
Michael Chan605a9e22007-05-03 13:23:13 -07001015 bnx2_read_phy(bp, bp->mii_up1, &up1);
1016 if (up1 & BCM5708S_UP1_2G5) {
1017 up1 &= ~BCM5708S_UP1_2G5;
1018 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 ret = 1;
1020 }
1021
Michael Chan27a005b2007-05-03 13:23:41 -07001022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1024 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1025
Michael Chan605a9e22007-05-03 13:23:13 -07001026 return ret;
1027}
1028
1029static void
1030bnx2_enable_forced_2g5(struct bnx2 *bp)
1031{
1032 u32 bmcr;
1033
1034 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1035 return;
1036
Michael Chan27a005b2007-05-03 13:23:41 -07001037 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1038 u32 val;
1039
1040 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1041 MII_BNX2_BLK_ADDR_SERDES_DIG);
1042 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1043 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1044 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1045 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1046
1047 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1048 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050
1051 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001052 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1053 bmcr |= BCM5708S_BMCR_FORCE_2500;
1054 }
1055
1056 if (bp->autoneg & AUTONEG_SPEED) {
1057 bmcr &= ~BMCR_ANENABLE;
1058 if (bp->req_duplex == DUPLEX_FULL)
1059 bmcr |= BMCR_FULLDPLX;
1060 }
1061 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1062}
1063
1064static void
1065bnx2_disable_forced_2g5(struct bnx2 *bp)
1066{
1067 u32 bmcr;
1068
1069 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070 return;
1071
Michael Chan27a005b2007-05-03 13:23:41 -07001072 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1073 u32 val;
1074
1075 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1076 MII_BNX2_BLK_ADDR_SERDES_DIG);
1077 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1078 val &= ~MII_BNX2_SD_MISC1_FORCE;
1079 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1080
1081 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1082 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084
1085 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001086 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1087 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1088 }
1089
1090 if (bp->autoneg & AUTONEG_SPEED)
1091 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1092 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1093}
1094
1095static int
Michael Chanb6016b72005-05-26 13:03:09 -07001096bnx2_set_link(struct bnx2 *bp)
1097{
1098 u32 bmsr;
1099 u8 link_up;
1100
Michael Chan80be4432006-11-19 14:07:28 -08001101 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001102 bp->link_up = 1;
1103 return 0;
1104 }
1105
Michael Chan0d8a6572007-07-07 22:49:43 -07001106 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1107 return 0;
1108
Michael Chanb6016b72005-05-26 13:03:09 -07001109 link_up = bp->link_up;
1110
Michael Chan27a005b2007-05-03 13:23:41 -07001111 bnx2_enable_bmsr1(bp);
1112 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1113 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1114 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001115
1116 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1117 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1118 u32 val;
1119
1120 val = REG_RD(bp, BNX2_EMAC_STATUS);
1121 if (val & BNX2_EMAC_STATUS_LINK)
1122 bmsr |= BMSR_LSTATUS;
1123 else
1124 bmsr &= ~BMSR_LSTATUS;
1125 }
1126
1127 if (bmsr & BMSR_LSTATUS) {
1128 bp->link_up = 1;
1129
1130 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001131 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1132 bnx2_5706s_linkup(bp);
1133 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1134 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001135 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1136 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001137 }
1138 else {
1139 bnx2_copper_linkup(bp);
1140 }
1141 bnx2_resolve_flow_ctrl(bp);
1142 }
1143 else {
1144 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001145 (bp->autoneg & AUTONEG_SPEED))
1146 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001147
Michael Chanb6016b72005-05-26 13:03:09 -07001148 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1149 bp->link_up = 0;
1150 }
1151
1152 if (bp->link_up != link_up) {
1153 bnx2_report_link(bp);
1154 }
1155
1156 bnx2_set_mac_link(bp);
1157
1158 return 0;
1159}
1160
1161static int
1162bnx2_reset_phy(struct bnx2 *bp)
1163{
1164 int i;
1165 u32 reg;
1166
Michael Chanca58c3a2007-05-03 13:22:52 -07001167 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001168
1169#define PHY_RESET_MAX_WAIT 100
1170 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1171 udelay(10);
1172
Michael Chanca58c3a2007-05-03 13:22:52 -07001173 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001174 if (!(reg & BMCR_RESET)) {
1175 udelay(20);
1176 break;
1177 }
1178 }
1179 if (i == PHY_RESET_MAX_WAIT) {
1180 return -EBUSY;
1181 }
1182 return 0;
1183}
1184
1185static u32
1186bnx2_phy_get_pause_adv(struct bnx2 *bp)
1187{
1188 u32 adv = 0;
1189
1190 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1191 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1192
1193 if (bp->phy_flags & PHY_SERDES_FLAG) {
1194 adv = ADVERTISE_1000XPAUSE;
1195 }
1196 else {
1197 adv = ADVERTISE_PAUSE_CAP;
1198 }
1199 }
1200 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1201 if (bp->phy_flags & PHY_SERDES_FLAG) {
1202 adv = ADVERTISE_1000XPSE_ASYM;
1203 }
1204 else {
1205 adv = ADVERTISE_PAUSE_ASYM;
1206 }
1207 }
1208 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1209 if (bp->phy_flags & PHY_SERDES_FLAG) {
1210 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1211 }
1212 else {
1213 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1214 }
1215 }
1216 return adv;
1217}
1218
Michael Chan0d8a6572007-07-07 22:49:43 -07001219static int bnx2_fw_sync(struct bnx2 *, u32, int);
1220
Michael Chanb6016b72005-05-26 13:03:09 -07001221static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001222bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1223{
1224 u32 speed_arg = 0, pause_adv;
1225
1226 pause_adv = bnx2_phy_get_pause_adv(bp);
1227
1228 if (bp->autoneg & AUTONEG_SPEED) {
1229 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1230 if (bp->advertising & ADVERTISED_10baseT_Half)
1231 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1232 if (bp->advertising & ADVERTISED_10baseT_Full)
1233 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1234 if (bp->advertising & ADVERTISED_100baseT_Half)
1235 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1236 if (bp->advertising & ADVERTISED_100baseT_Full)
1237 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1238 if (bp->advertising & ADVERTISED_1000baseT_Full)
1239 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1240 if (bp->advertising & ADVERTISED_2500baseX_Full)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1242 } else {
1243 if (bp->req_line_speed == SPEED_2500)
1244 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1245 else if (bp->req_line_speed == SPEED_1000)
1246 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1247 else if (bp->req_line_speed == SPEED_100) {
1248 if (bp->req_duplex == DUPLEX_FULL)
1249 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1250 else
1251 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1252 } else if (bp->req_line_speed == SPEED_10) {
1253 if (bp->req_duplex == DUPLEX_FULL)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1255 else
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1257 }
1258 }
1259
1260 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1261 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1262 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1263 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1264
1265 if (port == PORT_TP)
1266 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1267 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1268
1269 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1270
1271 spin_unlock_bh(&bp->phy_lock);
1272 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1273 spin_lock_bh(&bp->phy_lock);
1274
1275 return 0;
1276}
1277
1278static int
1279bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001280{
Michael Chan605a9e22007-05-03 13:23:13 -07001281 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001282 u32 new_adv = 0;
1283
Michael Chan0d8a6572007-07-07 22:49:43 -07001284 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1285 return (bnx2_setup_remote_phy(bp, port));
1286
Michael Chanb6016b72005-05-26 13:03:09 -07001287 if (!(bp->autoneg & AUTONEG_SPEED)) {
1288 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001289 int force_link_down = 0;
1290
Michael Chan605a9e22007-05-03 13:23:13 -07001291 if (bp->req_line_speed == SPEED_2500) {
1292 if (!bnx2_test_and_enable_2g5(bp))
1293 force_link_down = 1;
1294 } else if (bp->req_line_speed == SPEED_1000) {
1295 if (bnx2_test_and_disable_2g5(bp))
1296 force_link_down = 1;
1297 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001298 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001299 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1300
Michael Chanca58c3a2007-05-03 13:22:52 -07001301 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001302 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001303 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001304
Michael Chan27a005b2007-05-03 13:23:41 -07001305 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1306 if (bp->req_line_speed == SPEED_2500)
1307 bnx2_enable_forced_2g5(bp);
1308 else if (bp->req_line_speed == SPEED_1000) {
1309 bnx2_disable_forced_2g5(bp);
1310 new_bmcr &= ~0x2000;
1311 }
1312
1313 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001314 if (bp->req_line_speed == SPEED_2500)
1315 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1316 else
1317 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001318 }
1319
Michael Chanb6016b72005-05-26 13:03:09 -07001320 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001321 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001322 new_bmcr |= BMCR_FULLDPLX;
1323 }
1324 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001325 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001326 new_bmcr &= ~BMCR_FULLDPLX;
1327 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001328 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001329 /* Force a link down visible on the other side */
1330 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001331 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001332 ~(ADVERTISE_1000XFULL |
1333 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001334 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001335 BMCR_ANRESTART | BMCR_ANENABLE);
1336
1337 bp->link_up = 0;
1338 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001339 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001340 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001341 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001342 bnx2_write_phy(bp, bp->mii_adv, adv);
1343 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001344 } else {
1345 bnx2_resolve_flow_ctrl(bp);
1346 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001347 }
1348 return 0;
1349 }
1350
Michael Chan605a9e22007-05-03 13:23:13 -07001351 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001352
Michael Chanb6016b72005-05-26 13:03:09 -07001353 if (bp->advertising & ADVERTISED_1000baseT_Full)
1354 new_adv |= ADVERTISE_1000XFULL;
1355
1356 new_adv |= bnx2_phy_get_pause_adv(bp);
1357
Michael Chanca58c3a2007-05-03 13:22:52 -07001358 bnx2_read_phy(bp, bp->mii_adv, &adv);
1359 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001360
1361 bp->serdes_an_pending = 0;
1362 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1363 /* Force a link down visible on the other side */
1364 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001365 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001366 spin_unlock_bh(&bp->phy_lock);
1367 msleep(20);
1368 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001369 }
1370
Michael Chanca58c3a2007-05-03 13:22:52 -07001371 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1372 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001373 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001374 /* Speed up link-up time when the link partner
1375 * does not autonegotiate which is very common
1376 * in blade servers. Some blade servers use
1377 * IPMI for kerboard input and it's important
1378 * to minimize link disruptions. Autoneg. involves
1379 * exchanging base pages plus 3 next pages and
1380 * normally completes in about 120 msec.
1381 */
1382 bp->current_interval = SERDES_AN_TIMEOUT;
1383 bp->serdes_an_pending = 1;
1384 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001385 } else {
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001388 }
1389
1390 return 0;
1391}
1392
1393#define ETHTOOL_ALL_FIBRE_SPEED \
Michael Chandeaf3912007-07-07 22:48:00 -07001394 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1395 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1396 (ADVERTISED_1000baseT_Full)
Michael Chanb6016b72005-05-26 13:03:09 -07001397
1398#define ETHTOOL_ALL_COPPER_SPEED \
1399 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1400 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1401 ADVERTISED_1000baseT_Full)
1402
1403#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1404 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001405
Michael Chanb6016b72005-05-26 13:03:09 -07001406#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1407
Michael Chandeaf3912007-07-07 22:48:00 -07001408static void
Michael Chan0d8a6572007-07-07 22:49:43 -07001409bnx2_set_default_remote_link(struct bnx2 *bp)
1410{
1411 u32 link;
1412
1413 if (bp->phy_port == PORT_TP)
1414 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1415 else
1416 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1417
1418 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1419 bp->req_line_speed = 0;
1420 bp->autoneg |= AUTONEG_SPEED;
1421 bp->advertising = ADVERTISED_Autoneg;
1422 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1423 bp->advertising |= ADVERTISED_10baseT_Half;
1424 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1425 bp->advertising |= ADVERTISED_10baseT_Full;
1426 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1427 bp->advertising |= ADVERTISED_100baseT_Half;
1428 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1429 bp->advertising |= ADVERTISED_100baseT_Full;
1430 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1431 bp->advertising |= ADVERTISED_1000baseT_Full;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1433 bp->advertising |= ADVERTISED_2500baseX_Full;
1434 } else {
1435 bp->autoneg = 0;
1436 bp->advertising = 0;
1437 bp->req_duplex = DUPLEX_FULL;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1439 bp->req_line_speed = SPEED_10;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1441 bp->req_duplex = DUPLEX_HALF;
1442 }
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1444 bp->req_line_speed = SPEED_100;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1446 bp->req_duplex = DUPLEX_HALF;
1447 }
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1449 bp->req_line_speed = SPEED_1000;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1451 bp->req_line_speed = SPEED_2500;
1452 }
1453}
1454
1455static void
Michael Chandeaf3912007-07-07 22:48:00 -07001456bnx2_set_default_link(struct bnx2 *bp)
1457{
Michael Chan0d8a6572007-07-07 22:49:43 -07001458 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1459 return bnx2_set_default_remote_link(bp);
1460
Michael Chandeaf3912007-07-07 22:48:00 -07001461 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1462 bp->req_line_speed = 0;
1463 if (bp->phy_flags & PHY_SERDES_FLAG) {
1464 u32 reg;
1465
1466 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1467
1468 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1469 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1470 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1471 bp->autoneg = 0;
1472 bp->req_line_speed = bp->line_speed = SPEED_1000;
1473 bp->req_duplex = DUPLEX_FULL;
1474 }
1475 } else
1476 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1477}
1478
Michael Chan0d8a6572007-07-07 22:49:43 -07001479static void
1480bnx2_remote_phy_event(struct bnx2 *bp)
1481{
1482 u32 msg;
1483 u8 link_up = bp->link_up;
1484 u8 old_port;
1485
1486 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1487
1488 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1489 bp->link_up = 0;
1490 else {
1491 u32 speed;
1492
1493 bp->link_up = 1;
1494 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1495 bp->duplex = DUPLEX_FULL;
1496 switch (speed) {
1497 case BNX2_LINK_STATUS_10HALF:
1498 bp->duplex = DUPLEX_HALF;
1499 case BNX2_LINK_STATUS_10FULL:
1500 bp->line_speed = SPEED_10;
1501 break;
1502 case BNX2_LINK_STATUS_100HALF:
1503 bp->duplex = DUPLEX_HALF;
1504 case BNX2_LINK_STATUS_100BASE_T4:
1505 case BNX2_LINK_STATUS_100FULL:
1506 bp->line_speed = SPEED_100;
1507 break;
1508 case BNX2_LINK_STATUS_1000HALF:
1509 bp->duplex = DUPLEX_HALF;
1510 case BNX2_LINK_STATUS_1000FULL:
1511 bp->line_speed = SPEED_1000;
1512 break;
1513 case BNX2_LINK_STATUS_2500HALF:
1514 bp->duplex = DUPLEX_HALF;
1515 case BNX2_LINK_STATUS_2500FULL:
1516 bp->line_speed = SPEED_2500;
1517 break;
1518 default:
1519 bp->line_speed = 0;
1520 break;
1521 }
1522
1523 spin_lock(&bp->phy_lock);
1524 bp->flow_ctrl = 0;
1525 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1526 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1527 if (bp->duplex == DUPLEX_FULL)
1528 bp->flow_ctrl = bp->req_flow_ctrl;
1529 } else {
1530 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1531 bp->flow_ctrl |= FLOW_CTRL_TX;
1532 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1533 bp->flow_ctrl |= FLOW_CTRL_RX;
1534 }
1535
1536 old_port = bp->phy_port;
1537 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1538 bp->phy_port = PORT_FIBRE;
1539 else
1540 bp->phy_port = PORT_TP;
1541
1542 if (old_port != bp->phy_port)
1543 bnx2_set_default_link(bp);
1544
1545 spin_unlock(&bp->phy_lock);
1546 }
1547 if (bp->link_up != link_up)
1548 bnx2_report_link(bp);
1549
1550 bnx2_set_mac_link(bp);
1551}
1552
1553static int
1554bnx2_set_remote_link(struct bnx2 *bp)
1555{
1556 u32 evt_code;
1557
1558 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1559 switch (evt_code) {
1560 case BNX2_FW_EVT_CODE_LINK_EVENT:
1561 bnx2_remote_phy_event(bp);
1562 break;
1563 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1564 default:
1565 break;
1566 }
1567 return 0;
1568}
1569
Michael Chanb6016b72005-05-26 13:03:09 -07001570static int
1571bnx2_setup_copper_phy(struct bnx2 *bp)
1572{
1573 u32 bmcr;
1574 u32 new_bmcr;
1575
Michael Chanca58c3a2007-05-03 13:22:52 -07001576 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001577
1578 if (bp->autoneg & AUTONEG_SPEED) {
1579 u32 adv_reg, adv1000_reg;
1580 u32 new_adv_reg = 0;
1581 u32 new_adv1000_reg = 0;
1582
Michael Chanca58c3a2007-05-03 13:22:52 -07001583 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001584 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1585 ADVERTISE_PAUSE_ASYM);
1586
1587 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1588 adv1000_reg &= PHY_ALL_1000_SPEED;
1589
1590 if (bp->advertising & ADVERTISED_10baseT_Half)
1591 new_adv_reg |= ADVERTISE_10HALF;
1592 if (bp->advertising & ADVERTISED_10baseT_Full)
1593 new_adv_reg |= ADVERTISE_10FULL;
1594 if (bp->advertising & ADVERTISED_100baseT_Half)
1595 new_adv_reg |= ADVERTISE_100HALF;
1596 if (bp->advertising & ADVERTISED_100baseT_Full)
1597 new_adv_reg |= ADVERTISE_100FULL;
1598 if (bp->advertising & ADVERTISED_1000baseT_Full)
1599 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001600
Michael Chanb6016b72005-05-26 13:03:09 -07001601 new_adv_reg |= ADVERTISE_CSMA;
1602
1603 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1604
1605 if ((adv1000_reg != new_adv1000_reg) ||
1606 (adv_reg != new_adv_reg) ||
1607 ((bmcr & BMCR_ANENABLE) == 0)) {
1608
Michael Chanca58c3a2007-05-03 13:22:52 -07001609 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001610 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001611 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001612 BMCR_ANENABLE);
1613 }
1614 else if (bp->link_up) {
1615 /* Flow ctrl may have changed from auto to forced */
1616 /* or vice-versa. */
1617
1618 bnx2_resolve_flow_ctrl(bp);
1619 bnx2_set_mac_link(bp);
1620 }
1621 return 0;
1622 }
1623
1624 new_bmcr = 0;
1625 if (bp->req_line_speed == SPEED_100) {
1626 new_bmcr |= BMCR_SPEED100;
1627 }
1628 if (bp->req_duplex == DUPLEX_FULL) {
1629 new_bmcr |= BMCR_FULLDPLX;
1630 }
1631 if (new_bmcr != bmcr) {
1632 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001633
Michael Chanca58c3a2007-05-03 13:22:52 -07001634 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1635 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001636
Michael Chanb6016b72005-05-26 13:03:09 -07001637 if (bmsr & BMSR_LSTATUS) {
1638 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001639 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001640 spin_unlock_bh(&bp->phy_lock);
1641 msleep(50);
1642 spin_lock_bh(&bp->phy_lock);
1643
Michael Chanca58c3a2007-05-03 13:22:52 -07001644 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001646 }
1647
Michael Chanca58c3a2007-05-03 13:22:52 -07001648 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001649
1650 /* Normally, the new speed is setup after the link has
1651 * gone down and up again. In some cases, link will not go
1652 * down so we need to set up the new speed here.
1653 */
1654 if (bmsr & BMSR_LSTATUS) {
1655 bp->line_speed = bp->req_line_speed;
1656 bp->duplex = bp->req_duplex;
1657 bnx2_resolve_flow_ctrl(bp);
1658 bnx2_set_mac_link(bp);
1659 }
Michael Chan27a005b2007-05-03 13:23:41 -07001660 } else {
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001663 }
1664 return 0;
1665}
1666
1667static int
Michael Chan0d8a6572007-07-07 22:49:43 -07001668bnx2_setup_phy(struct bnx2 *bp, u8 port)
Michael Chanb6016b72005-05-26 13:03:09 -07001669{
1670 if (bp->loopback == MAC_LOOPBACK)
1671 return 0;
1672
1673 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07001674 return (bnx2_setup_serdes_phy(bp, port));
Michael Chanb6016b72005-05-26 13:03:09 -07001675 }
1676 else {
1677 return (bnx2_setup_copper_phy(bp));
1678 }
1679}
1680
1681static int
Michael Chan27a005b2007-05-03 13:23:41 -07001682bnx2_init_5709s_phy(struct bnx2 *bp)
1683{
1684 u32 val;
1685
1686 bp->mii_bmcr = MII_BMCR + 0x10;
1687 bp->mii_bmsr = MII_BMSR + 0x10;
1688 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1689 bp->mii_adv = MII_ADVERTISE + 0x10;
1690 bp->mii_lpa = MII_LPA + 0x10;
1691 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1692
1693 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1694 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1695
1696 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1697 bnx2_reset_phy(bp);
1698
1699 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1700
1701 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1702 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1703 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1704 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1705
1706 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1707 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1708 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1709 val |= BCM5708S_UP1_2G5;
1710 else
1711 val &= ~BCM5708S_UP1_2G5;
1712 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1713
1714 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1715 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1716 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1717 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1718
1719 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1720
1721 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1722 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1723 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1724
1725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1726
1727 return 0;
1728}
1729
1730static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001731bnx2_init_5708s_phy(struct bnx2 *bp)
1732{
1733 u32 val;
1734
Michael Chan27a005b2007-05-03 13:23:41 -07001735 bnx2_reset_phy(bp);
1736
1737 bp->mii_up1 = BCM5708S_UP1;
1738
Michael Chan5b0c76a2005-11-04 08:45:49 -08001739 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1740 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1741 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1742
1743 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1744 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1745 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1746
1747 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1748 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1749 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1750
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1752 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1753 val |= BCM5708S_UP1_2G5;
1754 bnx2_write_phy(bp, BCM5708S_UP1, val);
1755 }
1756
1757 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001758 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1759 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001760 /* increase tx signal amplitude */
1761 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1762 BCM5708S_BLK_ADDR_TX_MISC);
1763 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1764 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1765 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1766 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1767 }
1768
Michael Chane3648b32005-11-04 08:51:21 -08001769 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001770 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1771
1772 if (val) {
1773 u32 is_backplane;
1774
Michael Chane3648b32005-11-04 08:51:21 -08001775 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001776 BNX2_SHARED_HW_CFG_CONFIG);
1777 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1778 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1779 BCM5708S_BLK_ADDR_TX_MISC);
1780 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1781 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1782 BCM5708S_BLK_ADDR_DIG);
1783 }
1784 }
1785 return 0;
1786}
1787
1788static int
1789bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001790{
Michael Chan27a005b2007-05-03 13:23:41 -07001791 bnx2_reset_phy(bp);
1792
Michael Chanb6016b72005-05-26 13:03:09 -07001793 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1794
Michael Chan59b47d82006-11-19 14:10:45 -08001795 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1796 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001797
1798 if (bp->dev->mtu > 1500) {
1799 u32 val;
1800
1801 /* Set extended packet length bit */
1802 bnx2_write_phy(bp, 0x18, 0x7);
1803 bnx2_read_phy(bp, 0x18, &val);
1804 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1805
1806 bnx2_write_phy(bp, 0x1c, 0x6c00);
1807 bnx2_read_phy(bp, 0x1c, &val);
1808 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1809 }
1810 else {
1811 u32 val;
1812
1813 bnx2_write_phy(bp, 0x18, 0x7);
1814 bnx2_read_phy(bp, 0x18, &val);
1815 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1816
1817 bnx2_write_phy(bp, 0x1c, 0x6c00);
1818 bnx2_read_phy(bp, 0x1c, &val);
1819 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1820 }
1821
1822 return 0;
1823}
1824
1825static int
1826bnx2_init_copper_phy(struct bnx2 *bp)
1827{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001828 u32 val;
1829
Michael Chan27a005b2007-05-03 13:23:41 -07001830 bnx2_reset_phy(bp);
1831
Michael Chanb6016b72005-05-26 13:03:09 -07001832 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1833 bnx2_write_phy(bp, 0x18, 0x0c00);
1834 bnx2_write_phy(bp, 0x17, 0x000a);
1835 bnx2_write_phy(bp, 0x15, 0x310b);
1836 bnx2_write_phy(bp, 0x17, 0x201f);
1837 bnx2_write_phy(bp, 0x15, 0x9506);
1838 bnx2_write_phy(bp, 0x17, 0x401f);
1839 bnx2_write_phy(bp, 0x15, 0x14e2);
1840 bnx2_write_phy(bp, 0x18, 0x0400);
1841 }
1842
Michael Chanb659f442007-02-02 00:46:35 -08001843 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1844 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1845 MII_BNX2_DSP_EXPAND_REG | 0x8);
1846 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1847 val &= ~(1 << 8);
1848 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1849 }
1850
Michael Chanb6016b72005-05-26 13:03:09 -07001851 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001852 /* Set extended packet length bit */
1853 bnx2_write_phy(bp, 0x18, 0x7);
1854 bnx2_read_phy(bp, 0x18, &val);
1855 bnx2_write_phy(bp, 0x18, val | 0x4000);
1856
1857 bnx2_read_phy(bp, 0x10, &val);
1858 bnx2_write_phy(bp, 0x10, val | 0x1);
1859 }
1860 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001861 bnx2_write_phy(bp, 0x18, 0x7);
1862 bnx2_read_phy(bp, 0x18, &val);
1863 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1864
1865 bnx2_read_phy(bp, 0x10, &val);
1866 bnx2_write_phy(bp, 0x10, val & ~0x1);
1867 }
1868
Michael Chan5b0c76a2005-11-04 08:45:49 -08001869 /* ethernet@wirespeed */
1870 bnx2_write_phy(bp, 0x18, 0x7007);
1871 bnx2_read_phy(bp, 0x18, &val);
1872 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001873 return 0;
1874}
1875
1876
1877static int
1878bnx2_init_phy(struct bnx2 *bp)
1879{
1880 u32 val;
1881 int rc = 0;
1882
1883 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1884 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1885
Michael Chanca58c3a2007-05-03 13:22:52 -07001886 bp->mii_bmcr = MII_BMCR;
1887 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001888 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001889 bp->mii_adv = MII_ADVERTISE;
1890 bp->mii_lpa = MII_LPA;
1891
Michael Chanb6016b72005-05-26 13:03:09 -07001892 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1893
Michael Chan0d8a6572007-07-07 22:49:43 -07001894 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1895 goto setup_phy;
1896
Michael Chanb6016b72005-05-26 13:03:09 -07001897 bnx2_read_phy(bp, MII_PHYSID1, &val);
1898 bp->phy_id = val << 16;
1899 bnx2_read_phy(bp, MII_PHYSID2, &val);
1900 bp->phy_id |= val & 0xffff;
1901
1902 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001903 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1904 rc = bnx2_init_5706s_phy(bp);
1905 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1906 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001907 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1908 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001909 }
1910 else {
1911 rc = bnx2_init_copper_phy(bp);
1912 }
1913
Michael Chan0d8a6572007-07-07 22:49:43 -07001914setup_phy:
1915 if (!rc)
1916 rc = bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07001917
1918 return rc;
1919}
1920
1921static int
1922bnx2_set_mac_loopback(struct bnx2 *bp)
1923{
1924 u32 mac_mode;
1925
1926 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1927 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1928 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1929 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1930 bp->link_up = 1;
1931 return 0;
1932}
1933
Michael Chanbc5a0692006-01-23 16:13:22 -08001934static int bnx2_test_link(struct bnx2 *);
1935
1936static int
1937bnx2_set_phy_loopback(struct bnx2 *bp)
1938{
1939 u32 mac_mode;
1940 int rc, i;
1941
1942 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001943 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001944 BMCR_SPEED1000);
1945 spin_unlock_bh(&bp->phy_lock);
1946 if (rc)
1947 return rc;
1948
1949 for (i = 0; i < 10; i++) {
1950 if (bnx2_test_link(bp) == 0)
1951 break;
Michael Chan80be4432006-11-19 14:07:28 -08001952 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001953 }
1954
1955 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1956 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1957 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001958 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001959
1960 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1961 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1962 bp->link_up = 1;
1963 return 0;
1964}
1965
Michael Chanb6016b72005-05-26 13:03:09 -07001966static int
Michael Chanb090ae22006-01-23 16:07:10 -08001967bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001968{
1969 int i;
1970 u32 val;
1971
Michael Chanb6016b72005-05-26 13:03:09 -07001972 bp->fw_wr_seq++;
1973 msg_data |= bp->fw_wr_seq;
1974
Michael Chane3648b32005-11-04 08:51:21 -08001975 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001976
1977 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001978 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1979 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001980
Michael Chane3648b32005-11-04 08:51:21 -08001981 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001982
1983 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1984 break;
1985 }
Michael Chanb090ae22006-01-23 16:07:10 -08001986 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1987 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001988
1989 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001990 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1991 if (!silent)
1992 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1993 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001994
1995 msg_data &= ~BNX2_DRV_MSG_CODE;
1996 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1997
Michael Chane3648b32005-11-04 08:51:21 -08001998 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001999
Michael Chanb6016b72005-05-26 13:03:09 -07002000 return -EBUSY;
2001 }
2002
Michael Chanb090ae22006-01-23 16:07:10 -08002003 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2004 return -EIO;
2005
Michael Chanb6016b72005-05-26 13:03:09 -07002006 return 0;
2007}
2008
Michael Chan59b47d82006-11-19 14:10:45 -08002009static int
2010bnx2_init_5709_context(struct bnx2 *bp)
2011{
2012 int i, ret = 0;
2013 u32 val;
2014
2015 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2016 val |= (BCM_PAGE_BITS - 8) << 16;
2017 REG_WR(bp, BNX2_CTX_COMMAND, val);
Michael Chan641bdcd2007-06-04 21:22:24 -07002018 for (i = 0; i < 10; i++) {
2019 val = REG_RD(bp, BNX2_CTX_COMMAND);
2020 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2021 break;
2022 udelay(2);
2023 }
2024 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2025 return -EBUSY;
2026
Michael Chan59b47d82006-11-19 14:10:45 -08002027 for (i = 0; i < bp->ctx_pages; i++) {
2028 int j;
2029
2030 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2031 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2032 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2033 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2034 (u64) bp->ctx_blk_mapping[i] >> 32);
2035 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2036 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2037 for (j = 0; j < 10; j++) {
2038
2039 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2040 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2041 break;
2042 udelay(5);
2043 }
2044 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2045 ret = -EBUSY;
2046 break;
2047 }
2048 }
2049 return ret;
2050}
2051
Michael Chanb6016b72005-05-26 13:03:09 -07002052static void
2053bnx2_init_context(struct bnx2 *bp)
2054{
2055 u32 vcid;
2056
2057 vcid = 96;
2058 while (vcid) {
2059 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07002060 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002061
2062 vcid--;
2063
2064 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2065 u32 new_vcid;
2066
2067 vcid_addr = GET_PCID_ADDR(vcid);
2068 if (vcid & 0x8) {
2069 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2070 }
2071 else {
2072 new_vcid = vcid;
2073 }
2074 pcid_addr = GET_PCID_ADDR(new_vcid);
2075 }
2076 else {
2077 vcid_addr = GET_CID_ADDR(vcid);
2078 pcid_addr = vcid_addr;
2079 }
2080
Michael Chan7947b202007-06-04 21:17:10 -07002081 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2082 vcid_addr += (i << PHY_CTX_SHIFT);
2083 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07002084
Michael Chan7947b202007-06-04 21:17:10 -07002085 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2086 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2087
2088 /* Zero out the context. */
2089 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2090 CTX_WR(bp, 0x00, offset, 0);
2091
2092 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2093 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
Michael Chanb6016b72005-05-26 13:03:09 -07002094 }
Michael Chanb6016b72005-05-26 13:03:09 -07002095 }
2096}
2097
2098static int
2099bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2100{
2101 u16 *good_mbuf;
2102 u32 good_mbuf_cnt;
2103 u32 val;
2104
2105 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2106 if (good_mbuf == NULL) {
2107 printk(KERN_ERR PFX "Failed to allocate memory in "
2108 "bnx2_alloc_bad_rbuf\n");
2109 return -ENOMEM;
2110 }
2111
2112 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2113 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2114
2115 good_mbuf_cnt = 0;
2116
2117 /* Allocate a bunch of mbufs and save the good ones in an array. */
2118 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2119 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2120 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2121
2122 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2123
2124 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2125
2126 /* The addresses with Bit 9 set are bad memory blocks. */
2127 if (!(val & (1 << 9))) {
2128 good_mbuf[good_mbuf_cnt] = (u16) val;
2129 good_mbuf_cnt++;
2130 }
2131
2132 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2133 }
2134
2135 /* Free the good ones back to the mbuf pool thus discarding
2136 * all the bad ones. */
2137 while (good_mbuf_cnt) {
2138 good_mbuf_cnt--;
2139
2140 val = good_mbuf[good_mbuf_cnt];
2141 val = (val << 9) | val | 1;
2142
2143 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2144 }
2145 kfree(good_mbuf);
2146 return 0;
2147}
2148
2149static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002150bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07002151{
2152 u32 val;
2153 u8 *mac_addr = bp->dev->dev_addr;
2154
2155 val = (mac_addr[0] << 8) | mac_addr[1];
2156
2157 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2158
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002159 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07002160 (mac_addr[4] << 8) | mac_addr[5];
2161
2162 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2163}
2164
2165static inline int
2166bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2167{
2168 struct sk_buff *skb;
2169 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2170 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08002171 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07002172 unsigned long align;
2173
Michael Chan932f3772006-08-15 01:39:36 -07002174 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07002175 if (skb == NULL) {
2176 return -ENOMEM;
2177 }
2178
Michael Chan59b47d82006-11-19 14:10:45 -08002179 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2180 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07002181
Michael Chanb6016b72005-05-26 13:03:09 -07002182 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2183 PCI_DMA_FROMDEVICE);
2184
2185 rx_buf->skb = skb;
2186 pci_unmap_addr_set(rx_buf, mapping, mapping);
2187
2188 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2189 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2190
2191 bp->rx_prod_bseq += bp->rx_buf_use_size;
2192
2193 return 0;
2194}
2195
Michael Chanda3e4fb2007-05-03 13:24:23 -07002196static int
2197bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2198{
2199 struct status_block *sblk = bp->status_blk;
2200 u32 new_link_state, old_link_state;
2201 int is_set = 1;
2202
2203 new_link_state = sblk->status_attn_bits & event;
2204 old_link_state = sblk->status_attn_bits_ack & event;
2205 if (new_link_state != old_link_state) {
2206 if (new_link_state)
2207 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2208 else
2209 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2210 } else
2211 is_set = 0;
2212
2213 return is_set;
2214}
2215
Michael Chanb6016b72005-05-26 13:03:09 -07002216static void
2217bnx2_phy_int(struct bnx2 *bp)
2218{
Michael Chanda3e4fb2007-05-03 13:24:23 -07002219 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2220 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002221 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002222 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002223 }
Michael Chan0d8a6572007-07-07 22:49:43 -07002224 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2225 bnx2_set_remote_link(bp);
2226
Michael Chanb6016b72005-05-26 13:03:09 -07002227}
2228
2229static void
2230bnx2_tx_int(struct bnx2 *bp)
2231{
Michael Chanf4e418f2005-11-04 08:53:48 -08002232 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002233 u16 hw_cons, sw_cons, sw_ring_cons;
2234 int tx_free_bd = 0;
2235
Michael Chanf4e418f2005-11-04 08:53:48 -08002236 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002237 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2238 hw_cons++;
2239 }
2240 sw_cons = bp->tx_cons;
2241
2242 while (sw_cons != hw_cons) {
2243 struct sw_bd *tx_buf;
2244 struct sk_buff *skb;
2245 int i, last;
2246
2247 sw_ring_cons = TX_RING_IDX(sw_cons);
2248
2249 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2250 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002251
Michael Chanb6016b72005-05-26 13:03:09 -07002252 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002253 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002254 u16 last_idx, last_ring_idx;
2255
2256 last_idx = sw_cons +
2257 skb_shinfo(skb)->nr_frags + 1;
2258 last_ring_idx = sw_ring_cons +
2259 skb_shinfo(skb)->nr_frags + 1;
2260 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2261 last_idx++;
2262 }
2263 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2264 break;
2265 }
2266 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002267
Michael Chanb6016b72005-05-26 13:03:09 -07002268 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2269 skb_headlen(skb), PCI_DMA_TODEVICE);
2270
2271 tx_buf->skb = NULL;
2272 last = skb_shinfo(skb)->nr_frags;
2273
2274 for (i = 0; i < last; i++) {
2275 sw_cons = NEXT_TX_BD(sw_cons);
2276
2277 pci_unmap_page(bp->pdev,
2278 pci_unmap_addr(
2279 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2280 mapping),
2281 skb_shinfo(skb)->frags[i].size,
2282 PCI_DMA_TODEVICE);
2283 }
2284
2285 sw_cons = NEXT_TX_BD(sw_cons);
2286
2287 tx_free_bd += last + 1;
2288
Michael Chan745720e2006-06-29 12:37:41 -07002289 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002290
Michael Chanf4e418f2005-11-04 08:53:48 -08002291 hw_cons = bp->hw_tx_cons =
2292 sblk->status_tx_quick_consumer_index0;
2293
Michael Chanb6016b72005-05-26 13:03:09 -07002294 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2295 hw_cons++;
2296 }
2297 }
2298
Michael Chane89bbf12005-08-25 15:36:58 -07002299 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002300 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2301 * before checking for netif_queue_stopped(). Without the
2302 * memory barrier, there is a small possibility that bnx2_start_xmit()
2303 * will miss it and cause the queue to be stopped forever.
2304 */
2305 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002306
Michael Chan2f8af122006-08-15 01:39:10 -07002307 if (unlikely(netif_queue_stopped(bp->dev)) &&
2308 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2309 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002310 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002311 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002312 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002313 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002314 }
Michael Chanb6016b72005-05-26 13:03:09 -07002315}
2316
2317static inline void
2318bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2319 u16 cons, u16 prod)
2320{
Michael Chan236b6392006-03-20 17:49:02 -08002321 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2322 struct rx_bd *cons_bd, *prod_bd;
2323
2324 cons_rx_buf = &bp->rx_buf_ring[cons];
2325 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002326
2327 pci_dma_sync_single_for_device(bp->pdev,
2328 pci_unmap_addr(cons_rx_buf, mapping),
2329 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2330
Michael Chan236b6392006-03-20 17:49:02 -08002331 bp->rx_prod_bseq += bp->rx_buf_use_size;
2332
2333 prod_rx_buf->skb = skb;
2334
2335 if (cons == prod)
2336 return;
2337
Michael Chanb6016b72005-05-26 13:03:09 -07002338 pci_unmap_addr_set(prod_rx_buf, mapping,
2339 pci_unmap_addr(cons_rx_buf, mapping));
2340
Michael Chan3fdfcc22006-03-20 17:49:49 -08002341 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2342 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002343 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2344 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002345}
2346
2347static int
2348bnx2_rx_int(struct bnx2 *bp, int budget)
2349{
Michael Chanf4e418f2005-11-04 08:53:48 -08002350 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002351 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2352 struct l2_fhdr *rx_hdr;
2353 int rx_pkt = 0;
2354
Michael Chanf4e418f2005-11-04 08:53:48 -08002355 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002356 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2357 hw_cons++;
2358 }
2359 sw_cons = bp->rx_cons;
2360 sw_prod = bp->rx_prod;
2361
2362 /* Memory barrier necessary as speculative reads of the rx
2363 * buffer can be ahead of the index in the status block
2364 */
2365 rmb();
2366 while (sw_cons != hw_cons) {
2367 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002368 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002369 struct sw_bd *rx_buf;
2370 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002371 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002372
2373 sw_ring_cons = RX_RING_IDX(sw_cons);
2374 sw_ring_prod = RX_RING_IDX(sw_prod);
2375
2376 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2377 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002378
2379 rx_buf->skb = NULL;
2380
2381 dma_addr = pci_unmap_addr(rx_buf, mapping);
2382
2383 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002384 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2385
2386 rx_hdr = (struct l2_fhdr *) skb->data;
2387 len = rx_hdr->l2_fhdr_pkt_len - 4;
2388
Michael Chanade2bfe2006-01-23 16:09:51 -08002389 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002390 (L2_FHDR_ERRORS_BAD_CRC |
2391 L2_FHDR_ERRORS_PHY_DECODE |
2392 L2_FHDR_ERRORS_ALIGNMENT |
2393 L2_FHDR_ERRORS_TOO_SHORT |
2394 L2_FHDR_ERRORS_GIANT_FRAME)) {
2395
2396 goto reuse_rx;
2397 }
2398
2399 /* Since we don't have a jumbo ring, copy small packets
2400 * if mtu > 1500
2401 */
2402 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2403 struct sk_buff *new_skb;
2404
Michael Chan932f3772006-08-15 01:39:36 -07002405 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002406 if (new_skb == NULL)
2407 goto reuse_rx;
2408
2409 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002410 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2411 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002412 skb_reserve(new_skb, 2);
2413 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002414
2415 bnx2_reuse_rx_skb(bp, skb,
2416 sw_ring_cons, sw_ring_prod);
2417
2418 skb = new_skb;
2419 }
2420 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002421 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002422 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2423
2424 skb_reserve(skb, bp->rx_offset);
2425 skb_put(skb, len);
2426 }
2427 else {
2428reuse_rx:
2429 bnx2_reuse_rx_skb(bp, skb,
2430 sw_ring_cons, sw_ring_prod);
2431 goto next_rx;
2432 }
2433
2434 skb->protocol = eth_type_trans(skb, bp->dev);
2435
2436 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002437 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002438
Michael Chan745720e2006-06-29 12:37:41 -07002439 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002440 goto next_rx;
2441
2442 }
2443
Michael Chanb6016b72005-05-26 13:03:09 -07002444 skb->ip_summed = CHECKSUM_NONE;
2445 if (bp->rx_csum &&
2446 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2447 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2448
Michael Chanade2bfe2006-01-23 16:09:51 -08002449 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2450 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002451 skb->ip_summed = CHECKSUM_UNNECESSARY;
2452 }
2453
2454#ifdef BCM_VLAN
2455 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2456 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2457 rx_hdr->l2_fhdr_vlan_tag);
2458 }
2459 else
2460#endif
2461 netif_receive_skb(skb);
2462
2463 bp->dev->last_rx = jiffies;
2464 rx_pkt++;
2465
2466next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002467 sw_cons = NEXT_RX_BD(sw_cons);
2468 sw_prod = NEXT_RX_BD(sw_prod);
2469
2470 if ((rx_pkt == budget))
2471 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002472
2473 /* Refresh hw_cons to see if there is new work */
2474 if (sw_cons == hw_cons) {
2475 hw_cons = bp->hw_rx_cons =
2476 sblk->status_rx_quick_consumer_index0;
2477 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2478 hw_cons++;
2479 rmb();
2480 }
Michael Chanb6016b72005-05-26 13:03:09 -07002481 }
2482 bp->rx_cons = sw_cons;
2483 bp->rx_prod = sw_prod;
2484
2485 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2486
2487 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2488
2489 mmiowb();
2490
2491 return rx_pkt;
2492
2493}
2494
2495/* MSI ISR - The only difference between this and the INTx ISR
2496 * is that the MSI interrupt is always serviced.
2497 */
2498static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002499bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002500{
2501 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002502 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002503
Michael Chanc921e4c2005-09-08 13:15:32 -07002504 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002505 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2506 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2507 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2508
2509 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002510 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2511 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002512
Michael Chan73eef4c2005-08-25 15:39:15 -07002513 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002514
Michael Chan73eef4c2005-08-25 15:39:15 -07002515 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002516}
2517
2518static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002519bnx2_msi_1shot(int irq, void *dev_instance)
2520{
2521 struct net_device *dev = dev_instance;
2522 struct bnx2 *bp = netdev_priv(dev);
2523
2524 prefetch(bp->status_blk);
2525
2526 /* Return here if interrupt is disabled. */
2527 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2528 return IRQ_HANDLED;
2529
2530 netif_rx_schedule(dev);
2531
2532 return IRQ_HANDLED;
2533}
2534
2535static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002536bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002537{
2538 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002539 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002540
2541 /* When using INTx, it is possible for the interrupt to arrive
2542 * at the CPU before the status block posted prior to the
2543 * interrupt. Reading a register will flush the status block.
2544 * When using MSI, the MSI message will always complete after
2545 * the status block write.
2546 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002547 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002548 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2549 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002550 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002551
2552 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2553 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2554 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2555
2556 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002557 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2558 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002559
Michael Chan73eef4c2005-08-25 15:39:15 -07002560 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002561
Michael Chan73eef4c2005-08-25 15:39:15 -07002562 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002563}
2564
Michael Chan0d8a6572007-07-07 22:49:43 -07002565#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2566 STATUS_ATTN_BITS_TIMER_ABORT)
Michael Chanda3e4fb2007-05-03 13:24:23 -07002567
Michael Chanf4e418f2005-11-04 08:53:48 -08002568static inline int
2569bnx2_has_work(struct bnx2 *bp)
2570{
2571 struct status_block *sblk = bp->status_blk;
2572
2573 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2574 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2575 return 1;
2576
Michael Chanda3e4fb2007-05-03 13:24:23 -07002577 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2578 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002579 return 1;
2580
2581 return 0;
2582}
2583
Michael Chanb6016b72005-05-26 13:03:09 -07002584static int
2585bnx2_poll(struct net_device *dev, int *budget)
2586{
Michael Chan972ec0d2006-01-23 16:12:43 -08002587 struct bnx2 *bp = netdev_priv(dev);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002588 struct status_block *sblk = bp->status_blk;
2589 u32 status_attn_bits = sblk->status_attn_bits;
2590 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002591
Michael Chanda3e4fb2007-05-03 13:24:23 -07002592 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2593 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002594
Michael Chanb6016b72005-05-26 13:03:09 -07002595 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002596
2597 /* This is needed to take care of transient status
2598 * during link changes.
2599 */
2600 REG_WR(bp, BNX2_HC_COMMAND,
2601 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2602 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002603 }
2604
Michael Chanf4e418f2005-11-04 08:53:48 -08002605 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002606 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002607
Michael Chanf4e418f2005-11-04 08:53:48 -08002608 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002609 int orig_budget = *budget;
2610 int work_done;
2611
2612 if (orig_budget > dev->quota)
2613 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002614
Michael Chanb6016b72005-05-26 13:03:09 -07002615 work_done = bnx2_rx_int(bp, orig_budget);
2616 *budget -= work_done;
2617 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002618 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002619
Michael Chanf4e418f2005-11-04 08:53:48 -08002620 bp->last_status_idx = bp->status_blk->status_idx;
2621 rmb();
2622
2623 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002624 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002625 if (likely(bp->flags & USING_MSI_FLAG)) {
2626 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2627 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2628 bp->last_status_idx);
2629 return 0;
2630 }
Michael Chanb6016b72005-05-26 13:03:09 -07002631 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002632 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2633 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2634 bp->last_status_idx);
2635
2636 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2637 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2638 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002639 return 0;
2640 }
2641
2642 return 1;
2643}
2644
Herbert Xu932ff272006-06-09 12:20:56 -07002645/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002646 * from set_multicast.
2647 */
2648static void
2649bnx2_set_rx_mode(struct net_device *dev)
2650{
Michael Chan972ec0d2006-01-23 16:12:43 -08002651 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002652 u32 rx_mode, sort_mode;
2653 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002654
Michael Chanc770a652005-08-25 15:38:39 -07002655 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002656
2657 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2658 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2659 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2660#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002661 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002662 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002663#else
Michael Chane29054f2006-01-23 16:06:06 -08002664 if (!(bp->flags & ASF_ENABLE_FLAG))
2665 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002666#endif
2667 if (dev->flags & IFF_PROMISC) {
2668 /* Promiscuous mode. */
2669 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002670 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2671 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002672 }
2673 else if (dev->flags & IFF_ALLMULTI) {
2674 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2675 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2676 0xffffffff);
2677 }
2678 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2679 }
2680 else {
2681 /* Accept one or more multicast(s). */
2682 struct dev_mc_list *mclist;
2683 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2684 u32 regidx;
2685 u32 bit;
2686 u32 crc;
2687
2688 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2689
2690 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2691 i++, mclist = mclist->next) {
2692
2693 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2694 bit = crc & 0xff;
2695 regidx = (bit & 0xe0) >> 5;
2696 bit &= 0x1f;
2697 mc_filter[regidx] |= (1 << bit);
2698 }
2699
2700 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2701 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2702 mc_filter[i]);
2703 }
2704
2705 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2706 }
2707
2708 if (rx_mode != bp->rx_mode) {
2709 bp->rx_mode = rx_mode;
2710 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2711 }
2712
2713 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2714 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2715 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2716
Michael Chanc770a652005-08-25 15:38:39 -07002717 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002718}
2719
Michael Chanfba9fe92006-06-12 22:21:25 -07002720#define FW_BUF_SIZE 0x8000
2721
2722static int
2723bnx2_gunzip_init(struct bnx2 *bp)
2724{
2725 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2726 goto gunzip_nomem1;
2727
2728 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2729 goto gunzip_nomem2;
2730
2731 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2732 if (bp->strm->workspace == NULL)
2733 goto gunzip_nomem3;
2734
2735 return 0;
2736
2737gunzip_nomem3:
2738 kfree(bp->strm);
2739 bp->strm = NULL;
2740
2741gunzip_nomem2:
2742 vfree(bp->gunzip_buf);
2743 bp->gunzip_buf = NULL;
2744
2745gunzip_nomem1:
2746 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2747 "uncompression.\n", bp->dev->name);
2748 return -ENOMEM;
2749}
2750
2751static void
2752bnx2_gunzip_end(struct bnx2 *bp)
2753{
2754 kfree(bp->strm->workspace);
2755
2756 kfree(bp->strm);
2757 bp->strm = NULL;
2758
2759 if (bp->gunzip_buf) {
2760 vfree(bp->gunzip_buf);
2761 bp->gunzip_buf = NULL;
2762 }
2763}
2764
2765static int
2766bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2767{
2768 int n, rc;
2769
2770 /* check gzip header */
2771 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2772 return -EINVAL;
2773
2774 n = 10;
2775
2776#define FNAME 0x8
2777 if (zbuf[3] & FNAME)
2778 while ((zbuf[n++] != 0) && (n < len));
2779
2780 bp->strm->next_in = zbuf + n;
2781 bp->strm->avail_in = len - n;
2782 bp->strm->next_out = bp->gunzip_buf;
2783 bp->strm->avail_out = FW_BUF_SIZE;
2784
2785 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2786 if (rc != Z_OK)
2787 return rc;
2788
2789 rc = zlib_inflate(bp->strm, Z_FINISH);
2790
2791 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2792 *outbuf = bp->gunzip_buf;
2793
2794 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2795 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2796 bp->dev->name, bp->strm->msg);
2797
2798 zlib_inflateEnd(bp->strm);
2799
2800 if (rc == Z_STREAM_END)
2801 return 0;
2802
2803 return rc;
2804}
2805
Michael Chanb6016b72005-05-26 13:03:09 -07002806static void
2807load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2808 u32 rv2p_proc)
2809{
2810 int i;
2811 u32 val;
2812
2813
2814 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002815 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002816 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002817 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002818 rv2p_code++;
2819
2820 if (rv2p_proc == RV2P_PROC1) {
2821 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2822 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2823 }
2824 else {
2825 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2826 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2827 }
2828 }
2829
2830 /* Reset the processor, un-stall is done later. */
2831 if (rv2p_proc == RV2P_PROC1) {
2832 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2833 }
2834 else {
2835 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2836 }
2837}
2838
Michael Chanaf3ee512006-11-19 14:09:25 -08002839static int
Michael Chanb6016b72005-05-26 13:03:09 -07002840load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2841{
2842 u32 offset;
2843 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002844 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002845
2846 /* Halt the CPU. */
2847 val = REG_RD_IND(bp, cpu_reg->mode);
2848 val |= cpu_reg->mode_value_halt;
2849 REG_WR_IND(bp, cpu_reg->mode, val);
2850 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2851
2852 /* Load the Text area. */
2853 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002854 if (fw->gz_text) {
2855 u32 text_len;
2856 void *text;
2857
2858 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2859 &text_len);
2860 if (rc)
2861 return rc;
2862
2863 fw->text = text;
2864 }
2865 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002866 int j;
2867
2868 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002869 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002870 }
2871 }
2872
2873 /* Load the Data area. */
2874 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2875 if (fw->data) {
2876 int j;
2877
2878 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2879 REG_WR_IND(bp, offset, fw->data[j]);
2880 }
2881 }
2882
2883 /* Load the SBSS area. */
2884 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2885 if (fw->sbss) {
2886 int j;
2887
2888 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2889 REG_WR_IND(bp, offset, fw->sbss[j]);
2890 }
2891 }
2892
2893 /* Load the BSS area. */
2894 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2895 if (fw->bss) {
2896 int j;
2897
2898 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2899 REG_WR_IND(bp, offset, fw->bss[j]);
2900 }
2901 }
2902
2903 /* Load the Read-Only area. */
2904 offset = cpu_reg->spad_base +
2905 (fw->rodata_addr - cpu_reg->mips_view_base);
2906 if (fw->rodata) {
2907 int j;
2908
2909 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2910 REG_WR_IND(bp, offset, fw->rodata[j]);
2911 }
2912 }
2913
2914 /* Clear the pre-fetch instruction. */
2915 REG_WR_IND(bp, cpu_reg->inst, 0);
2916 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2917
2918 /* Start the CPU. */
2919 val = REG_RD_IND(bp, cpu_reg->mode);
2920 val &= ~cpu_reg->mode_value_halt;
2921 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2922 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002923
2924 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002925}
2926
Michael Chanfba9fe92006-06-12 22:21:25 -07002927static int
Michael Chanb6016b72005-05-26 13:03:09 -07002928bnx2_init_cpus(struct bnx2 *bp)
2929{
2930 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002931 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002932 int rc = 0;
2933 void *text;
2934 u32 text_len;
2935
2936 if ((rc = bnx2_gunzip_init(bp)) != 0)
2937 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002938
2939 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002940 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2941 &text_len);
2942 if (rc)
2943 goto init_cpu_err;
2944
2945 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2946
2947 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2948 &text_len);
2949 if (rc)
2950 goto init_cpu_err;
2951
2952 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002953
2954 /* Initialize the RX Processor. */
2955 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2956 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2957 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2958 cpu_reg.state = BNX2_RXP_CPU_STATE;
2959 cpu_reg.state_value_clear = 0xffffff;
2960 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2961 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2962 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2963 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2964 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2965 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2966 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002967
Michael Chand43584c2006-11-19 14:14:35 -08002968 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2969 fw = &bnx2_rxp_fw_09;
2970 else
2971 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002972
Michael Chanaf3ee512006-11-19 14:09:25 -08002973 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002974 if (rc)
2975 goto init_cpu_err;
2976
Michael Chanb6016b72005-05-26 13:03:09 -07002977 /* Initialize the TX Processor. */
2978 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2979 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2980 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2981 cpu_reg.state = BNX2_TXP_CPU_STATE;
2982 cpu_reg.state_value_clear = 0xffffff;
2983 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2984 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2985 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2986 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2987 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2988 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2989 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002990
Michael Chand43584c2006-11-19 14:14:35 -08002991 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2992 fw = &bnx2_txp_fw_09;
2993 else
2994 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002995
Michael Chanaf3ee512006-11-19 14:09:25 -08002996 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002997 if (rc)
2998 goto init_cpu_err;
2999
Michael Chanb6016b72005-05-26 13:03:09 -07003000 /* Initialize the TX Patch-up Processor. */
3001 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3002 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3003 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3004 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3005 cpu_reg.state_value_clear = 0xffffff;
3006 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3007 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3008 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3009 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3010 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3011 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3012 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003013
Michael Chand43584c2006-11-19 14:14:35 -08003014 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015 fw = &bnx2_tpat_fw_09;
3016 else
3017 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003018
Michael Chanaf3ee512006-11-19 14:09:25 -08003019 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003020 if (rc)
3021 goto init_cpu_err;
3022
Michael Chanb6016b72005-05-26 13:03:09 -07003023 /* Initialize the Completion Processor. */
3024 cpu_reg.mode = BNX2_COM_CPU_MODE;
3025 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3026 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3027 cpu_reg.state = BNX2_COM_CPU_STATE;
3028 cpu_reg.state_value_clear = 0xffffff;
3029 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3030 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3031 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3032 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3033 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3034 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3035 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003036
Michael Chand43584c2006-11-19 14:14:35 -08003037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3038 fw = &bnx2_com_fw_09;
3039 else
3040 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07003041
Michael Chanaf3ee512006-11-19 14:09:25 -08003042 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07003043 if (rc)
3044 goto init_cpu_err;
3045
Michael Chand43584c2006-11-19 14:14:35 -08003046 /* Initialize the Command Processor. */
3047 cpu_reg.mode = BNX2_CP_CPU_MODE;
3048 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3049 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3050 cpu_reg.state = BNX2_CP_CPU_STATE;
3051 cpu_reg.state_value_clear = 0xffffff;
3052 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3053 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3054 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3055 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3056 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3057 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3058 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07003059
Michael Chand43584c2006-11-19 14:14:35 -08003060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3061 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07003062
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08003063 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08003064 if (rc)
3065 goto init_cpu_err;
3066 }
Michael Chanfba9fe92006-06-12 22:21:25 -07003067init_cpu_err:
3068 bnx2_gunzip_end(bp);
3069 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003070}
3071
3072static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07003073bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07003074{
3075 u16 pmcsr;
3076
3077 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3078
3079 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07003080 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07003081 u32 val;
3082
3083 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3084 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3085 PCI_PM_CTRL_PME_STATUS);
3086
3087 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3088 /* delay required during transition out of D3hot */
3089 msleep(20);
3090
3091 val = REG_RD(bp, BNX2_EMAC_MODE);
3092 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3093 val &= ~BNX2_EMAC_MODE_MPKT;
3094 REG_WR(bp, BNX2_EMAC_MODE, val);
3095
3096 val = REG_RD(bp, BNX2_RPM_CONFIG);
3097 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3098 REG_WR(bp, BNX2_RPM_CONFIG, val);
3099 break;
3100 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07003101 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07003102 int i;
3103 u32 val, wol_msg;
3104
3105 if (bp->wol) {
3106 u32 advertising;
3107 u8 autoneg;
3108
3109 autoneg = bp->autoneg;
3110 advertising = bp->advertising;
3111
3112 bp->autoneg = AUTONEG_SPEED;
3113 bp->advertising = ADVERTISED_10baseT_Half |
3114 ADVERTISED_10baseT_Full |
3115 ADVERTISED_100baseT_Half |
3116 ADVERTISED_100baseT_Full |
3117 ADVERTISED_Autoneg;
3118
3119 bnx2_setup_copper_phy(bp);
3120
3121 bp->autoneg = autoneg;
3122 bp->advertising = advertising;
3123
3124 bnx2_set_mac_addr(bp);
3125
3126 val = REG_RD(bp, BNX2_EMAC_MODE);
3127
3128 /* Enable port mode. */
3129 val &= ~BNX2_EMAC_MODE_PORT;
3130 val |= BNX2_EMAC_MODE_PORT_MII |
3131 BNX2_EMAC_MODE_MPKT_RCVD |
3132 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07003133 BNX2_EMAC_MODE_MPKT;
3134
3135 REG_WR(bp, BNX2_EMAC_MODE, val);
3136
3137 /* receive all multicast */
3138 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3139 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3140 0xffffffff);
3141 }
3142 REG_WR(bp, BNX2_EMAC_RX_MODE,
3143 BNX2_EMAC_RX_MODE_SORT_MODE);
3144
3145 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3146 BNX2_RPM_SORT_USER0_MC_EN;
3147 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3148 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3149 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3150 BNX2_RPM_SORT_USER0_ENA);
3151
3152 /* Need to enable EMAC and RPM for WOL. */
3153 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3154 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3155 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3156 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3157
3158 val = REG_RD(bp, BNX2_RPM_CONFIG);
3159 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3160 REG_WR(bp, BNX2_RPM_CONFIG, val);
3161
3162 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3163 }
3164 else {
3165 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3166 }
3167
Michael Chandda1e392006-01-23 16:08:14 -08003168 if (!(bp->flags & NO_WOL_FLAG))
3169 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003170
3171 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3172 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3173 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3174
3175 if (bp->wol)
3176 pmcsr |= 3;
3177 }
3178 else {
3179 pmcsr |= 3;
3180 }
3181 if (bp->wol) {
3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183 }
3184 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3185 pmcsr);
3186
3187 /* No more memory access after this point until
3188 * device is brought back to D0.
3189 */
3190 udelay(50);
3191 break;
3192 }
3193 default:
3194 return -EINVAL;
3195 }
3196 return 0;
3197}
3198
3199static int
3200bnx2_acquire_nvram_lock(struct bnx2 *bp)
3201{
3202 u32 val;
3203 int j;
3204
3205 /* Request access to the flash interface. */
3206 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3207 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3208 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3209 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3210 break;
3211
3212 udelay(5);
3213 }
3214
3215 if (j >= NVRAM_TIMEOUT_COUNT)
3216 return -EBUSY;
3217
3218 return 0;
3219}
3220
3221static int
3222bnx2_release_nvram_lock(struct bnx2 *bp)
3223{
3224 int j;
3225 u32 val;
3226
3227 /* Relinquish nvram interface. */
3228 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3229
3230 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3231 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3232 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3233 break;
3234
3235 udelay(5);
3236 }
3237
3238 if (j >= NVRAM_TIMEOUT_COUNT)
3239 return -EBUSY;
3240
3241 return 0;
3242}
3243
3244
3245static int
3246bnx2_enable_nvram_write(struct bnx2 *bp)
3247{
3248 u32 val;
3249
3250 val = REG_RD(bp, BNX2_MISC_CFG);
3251 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3252
3253 if (!bp->flash_info->buffered) {
3254 int j;
3255
3256 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3257 REG_WR(bp, BNX2_NVM_COMMAND,
3258 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3259
3260 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3261 udelay(5);
3262
3263 val = REG_RD(bp, BNX2_NVM_COMMAND);
3264 if (val & BNX2_NVM_COMMAND_DONE)
3265 break;
3266 }
3267
3268 if (j >= NVRAM_TIMEOUT_COUNT)
3269 return -EBUSY;
3270 }
3271 return 0;
3272}
3273
3274static void
3275bnx2_disable_nvram_write(struct bnx2 *bp)
3276{
3277 u32 val;
3278
3279 val = REG_RD(bp, BNX2_MISC_CFG);
3280 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3281}
3282
3283
3284static void
3285bnx2_enable_nvram_access(struct bnx2 *bp)
3286{
3287 u32 val;
3288
3289 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003291 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003292 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3293}
3294
3295static void
3296bnx2_disable_nvram_access(struct bnx2 *bp)
3297{
3298 u32 val;
3299
3300 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3301 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003302 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003303 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3304 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3305}
3306
3307static int
3308bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3309{
3310 u32 cmd;
3311 int j;
3312
3313 if (bp->flash_info->buffered)
3314 /* Buffered flash, no erase needed */
3315 return 0;
3316
3317 /* Build an erase command */
3318 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3319 BNX2_NVM_COMMAND_DOIT;
3320
3321 /* Need to clear DONE bit separately. */
3322 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3323
3324 /* Address of the NVRAM to read from. */
3325 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3326
3327 /* Issue an erase command. */
3328 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3329
3330 /* Wait for completion. */
3331 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3332 u32 val;
3333
3334 udelay(5);
3335
3336 val = REG_RD(bp, BNX2_NVM_COMMAND);
3337 if (val & BNX2_NVM_COMMAND_DONE)
3338 break;
3339 }
3340
3341 if (j >= NVRAM_TIMEOUT_COUNT)
3342 return -EBUSY;
3343
3344 return 0;
3345}
3346
3347static int
3348bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3349{
3350 u32 cmd;
3351 int j;
3352
3353 /* Build the command word. */
3354 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3355
3356 /* Calculate an offset of a buffered flash. */
3357 if (bp->flash_info->buffered) {
3358 offset = ((offset / bp->flash_info->page_size) <<
3359 bp->flash_info->page_bits) +
3360 (offset % bp->flash_info->page_size);
3361 }
3362
3363 /* Need to clear DONE bit separately. */
3364 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3365
3366 /* Address of the NVRAM to read from. */
3367 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3368
3369 /* Issue a read command. */
3370 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3371
3372 /* Wait for completion. */
3373 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3374 u32 val;
3375
3376 udelay(5);
3377
3378 val = REG_RD(bp, BNX2_NVM_COMMAND);
3379 if (val & BNX2_NVM_COMMAND_DONE) {
3380 val = REG_RD(bp, BNX2_NVM_READ);
3381
3382 val = be32_to_cpu(val);
3383 memcpy(ret_val, &val, 4);
3384 break;
3385 }
3386 }
3387 if (j >= NVRAM_TIMEOUT_COUNT)
3388 return -EBUSY;
3389
3390 return 0;
3391}
3392
3393
3394static int
3395bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3396{
3397 u32 cmd, val32;
3398 int j;
3399
3400 /* Build the command word. */
3401 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3402
3403 /* Calculate an offset of a buffered flash. */
3404 if (bp->flash_info->buffered) {
3405 offset = ((offset / bp->flash_info->page_size) <<
3406 bp->flash_info->page_bits) +
3407 (offset % bp->flash_info->page_size);
3408 }
3409
3410 /* Need to clear DONE bit separately. */
3411 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3412
3413 memcpy(&val32, val, 4);
3414 val32 = cpu_to_be32(val32);
3415
3416 /* Write the data. */
3417 REG_WR(bp, BNX2_NVM_WRITE, val32);
3418
3419 /* Address of the NVRAM to write to. */
3420 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3421
3422 /* Issue the write command. */
3423 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3424
3425 /* Wait for completion. */
3426 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427 udelay(5);
3428
3429 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3430 break;
3431 }
3432 if (j >= NVRAM_TIMEOUT_COUNT)
3433 return -EBUSY;
3434
3435 return 0;
3436}
3437
3438static int
3439bnx2_init_nvram(struct bnx2 *bp)
3440{
3441 u32 val;
3442 int j, entry_count, rc;
3443 struct flash_spec *flash;
3444
3445 /* Determine the selected interface. */
3446 val = REG_RD(bp, BNX2_NVM_CFG1);
3447
3448 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3449
3450 rc = 0;
3451 if (val & 0x40000000) {
3452
3453 /* Flash interface has been reconfigured */
3454 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003455 j++, flash++) {
3456 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3457 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003458 bp->flash_info = flash;
3459 break;
3460 }
3461 }
3462 }
3463 else {
Michael Chan37137702005-11-04 08:49:17 -08003464 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003465 /* Not yet been reconfigured */
3466
Michael Chan37137702005-11-04 08:49:17 -08003467 if (val & (1 << 23))
3468 mask = FLASH_BACKUP_STRAP_MASK;
3469 else
3470 mask = FLASH_STRAP_MASK;
3471
Michael Chanb6016b72005-05-26 13:03:09 -07003472 for (j = 0, flash = &flash_table[0]; j < entry_count;
3473 j++, flash++) {
3474
Michael Chan37137702005-11-04 08:49:17 -08003475 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003476 bp->flash_info = flash;
3477
3478 /* Request access to the flash interface. */
3479 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3480 return rc;
3481
3482 /* Enable access to flash interface */
3483 bnx2_enable_nvram_access(bp);
3484
3485 /* Reconfigure the flash interface */
3486 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3487 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3488 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3489 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3490
3491 /* Disable access to flash interface */
3492 bnx2_disable_nvram_access(bp);
3493 bnx2_release_nvram_lock(bp);
3494
3495 break;
3496 }
3497 }
3498 } /* if (val & 0x40000000) */
3499
3500 if (j == entry_count) {
3501 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003502 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003503 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003504 }
3505
Michael Chan1122db72006-01-23 16:11:42 -08003506 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3507 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3508 if (val)
3509 bp->flash_size = val;
3510 else
3511 bp->flash_size = bp->flash_info->total_size;
3512
Michael Chanb6016b72005-05-26 13:03:09 -07003513 return rc;
3514}
3515
3516static int
3517bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3518 int buf_size)
3519{
3520 int rc = 0;
3521 u32 cmd_flags, offset32, len32, extra;
3522
3523 if (buf_size == 0)
3524 return 0;
3525
3526 /* Request access to the flash interface. */
3527 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3528 return rc;
3529
3530 /* Enable access to flash interface */
3531 bnx2_enable_nvram_access(bp);
3532
3533 len32 = buf_size;
3534 offset32 = offset;
3535 extra = 0;
3536
3537 cmd_flags = 0;
3538
3539 if (offset32 & 3) {
3540 u8 buf[4];
3541 u32 pre_len;
3542
3543 offset32 &= ~3;
3544 pre_len = 4 - (offset & 3);
3545
3546 if (pre_len >= len32) {
3547 pre_len = len32;
3548 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3549 BNX2_NVM_COMMAND_LAST;
3550 }
3551 else {
3552 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3553 }
3554
3555 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3556
3557 if (rc)
3558 return rc;
3559
3560 memcpy(ret_buf, buf + (offset & 3), pre_len);
3561
3562 offset32 += 4;
3563 ret_buf += pre_len;
3564 len32 -= pre_len;
3565 }
3566 if (len32 & 3) {
3567 extra = 4 - (len32 & 3);
3568 len32 = (len32 + 4) & ~3;
3569 }
3570
3571 if (len32 == 4) {
3572 u8 buf[4];
3573
3574 if (cmd_flags)
3575 cmd_flags = BNX2_NVM_COMMAND_LAST;
3576 else
3577 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3578 BNX2_NVM_COMMAND_LAST;
3579
3580 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3581
3582 memcpy(ret_buf, buf, 4 - extra);
3583 }
3584 else if (len32 > 0) {
3585 u8 buf[4];
3586
3587 /* Read the first word. */
3588 if (cmd_flags)
3589 cmd_flags = 0;
3590 else
3591 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3592
3593 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3594
3595 /* Advance to the next dword. */
3596 offset32 += 4;
3597 ret_buf += 4;
3598 len32 -= 4;
3599
3600 while (len32 > 4 && rc == 0) {
3601 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3602
3603 /* Advance to the next dword. */
3604 offset32 += 4;
3605 ret_buf += 4;
3606 len32 -= 4;
3607 }
3608
3609 if (rc)
3610 return rc;
3611
3612 cmd_flags = BNX2_NVM_COMMAND_LAST;
3613 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3614
3615 memcpy(ret_buf, buf, 4 - extra);
3616 }
3617
3618 /* Disable access to flash interface */
3619 bnx2_disable_nvram_access(bp);
3620
3621 bnx2_release_nvram_lock(bp);
3622
3623 return rc;
3624}
3625
3626static int
3627bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3628 int buf_size)
3629{
3630 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003631 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003632 int rc = 0;
3633 int align_start, align_end;
3634
3635 buf = data_buf;
3636 offset32 = offset;
3637 len32 = buf_size;
3638 align_start = align_end = 0;
3639
3640 if ((align_start = (offset32 & 3))) {
3641 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003642 len32 += align_start;
3643 if (len32 < 4)
3644 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003645 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3646 return rc;
3647 }
3648
3649 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003650 align_end = 4 - (len32 & 3);
3651 len32 += align_end;
3652 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3653 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003654 }
3655
3656 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003657 align_buf = kmalloc(len32, GFP_KERNEL);
3658 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003659 return -ENOMEM;
3660 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003661 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003662 }
3663 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003664 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003665 }
Michael Chane6be7632007-01-08 19:56:13 -08003666 memcpy(align_buf + align_start, data_buf, buf_size);
3667 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003668 }
3669
Michael Chanae181bc2006-05-22 16:39:20 -07003670 if (bp->flash_info->buffered == 0) {
3671 flash_buffer = kmalloc(264, GFP_KERNEL);
3672 if (flash_buffer == NULL) {
3673 rc = -ENOMEM;
3674 goto nvram_write_end;
3675 }
3676 }
3677
Michael Chanb6016b72005-05-26 13:03:09 -07003678 written = 0;
3679 while ((written < len32) && (rc == 0)) {
3680 u32 page_start, page_end, data_start, data_end;
3681 u32 addr, cmd_flags;
3682 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003683
3684 /* Find the page_start addr */
3685 page_start = offset32 + written;
3686 page_start -= (page_start % bp->flash_info->page_size);
3687 /* Find the page_end addr */
3688 page_end = page_start + bp->flash_info->page_size;
3689 /* Find the data_start addr */
3690 data_start = (written == 0) ? offset32 : page_start;
3691 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003692 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003693 (offset32 + len32) : page_end;
3694
3695 /* Request access to the flash interface. */
3696 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3697 goto nvram_write_end;
3698
3699 /* Enable access to flash interface */
3700 bnx2_enable_nvram_access(bp);
3701
3702 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3703 if (bp->flash_info->buffered == 0) {
3704 int j;
3705
3706 /* Read the whole page into the buffer
3707 * (non-buffer flash only) */
3708 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3709 if (j == (bp->flash_info->page_size - 4)) {
3710 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3711 }
3712 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003713 page_start + j,
3714 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003715 cmd_flags);
3716
3717 if (rc)
3718 goto nvram_write_end;
3719
3720 cmd_flags = 0;
3721 }
3722 }
3723
3724 /* Enable writes to flash interface (unlock write-protect) */
3725 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3726 goto nvram_write_end;
3727
Michael Chanb6016b72005-05-26 13:03:09 -07003728 /* Loop to write back the buffer data from page_start to
3729 * data_start */
3730 i = 0;
3731 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003732 /* Erase the page */
3733 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3734 goto nvram_write_end;
3735
3736 /* Re-enable the write again for the actual write */
3737 bnx2_enable_nvram_write(bp);
3738
Michael Chanb6016b72005-05-26 13:03:09 -07003739 for (addr = page_start; addr < data_start;
3740 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003741
Michael Chanb6016b72005-05-26 13:03:09 -07003742 rc = bnx2_nvram_write_dword(bp, addr,
3743 &flash_buffer[i], cmd_flags);
3744
3745 if (rc != 0)
3746 goto nvram_write_end;
3747
3748 cmd_flags = 0;
3749 }
3750 }
3751
3752 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003753 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003754 if ((addr == page_end - 4) ||
3755 ((bp->flash_info->buffered) &&
3756 (addr == data_end - 4))) {
3757
3758 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3759 }
3760 rc = bnx2_nvram_write_dword(bp, addr, buf,
3761 cmd_flags);
3762
3763 if (rc != 0)
3764 goto nvram_write_end;
3765
3766 cmd_flags = 0;
3767 buf += 4;
3768 }
3769
3770 /* Loop to write back the buffer data from data_end
3771 * to page_end */
3772 if (bp->flash_info->buffered == 0) {
3773 for (addr = data_end; addr < page_end;
3774 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003775
Michael Chanb6016b72005-05-26 13:03:09 -07003776 if (addr == page_end-4) {
3777 cmd_flags = BNX2_NVM_COMMAND_LAST;
3778 }
3779 rc = bnx2_nvram_write_dword(bp, addr,
3780 &flash_buffer[i], cmd_flags);
3781
3782 if (rc != 0)
3783 goto nvram_write_end;
3784
3785 cmd_flags = 0;
3786 }
3787 }
3788
3789 /* Disable writes to flash interface (lock write-protect) */
3790 bnx2_disable_nvram_write(bp);
3791
3792 /* Disable access to flash interface */
3793 bnx2_disable_nvram_access(bp);
3794 bnx2_release_nvram_lock(bp);
3795
3796 /* Increment written */
3797 written += data_end - data_start;
3798 }
3799
3800nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003801 kfree(flash_buffer);
3802 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003803 return rc;
3804}
3805
Michael Chan0d8a6572007-07-07 22:49:43 -07003806static void
3807bnx2_init_remote_phy(struct bnx2 *bp)
3808{
3809 u32 val;
3810
3811 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3812 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3813 return;
3814
3815 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3816 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3817 return;
3818
3819 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3820 if (netif_running(bp->dev)) {
3821 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3822 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3823 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3824 val);
3825 }
3826 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3827
3828 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3829 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3830 bp->phy_port = PORT_FIBRE;
3831 else
3832 bp->phy_port = PORT_TP;
3833 }
3834}
3835
Michael Chanb6016b72005-05-26 13:03:09 -07003836static int
3837bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3838{
3839 u32 val;
3840 int i, rc = 0;
3841
3842 /* Wait for the current PCI transaction to complete before
3843 * issuing a reset. */
3844 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3845 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3846 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3847 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3848 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3849 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3850 udelay(5);
3851
Michael Chanb090ae22006-01-23 16:07:10 -08003852 /* Wait for the firmware to tell us it is ok to issue a reset. */
3853 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3854
Michael Chanb6016b72005-05-26 13:03:09 -07003855 /* Deposit a driver reset signature so the firmware knows that
3856 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003857 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003858 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3859
Michael Chanb6016b72005-05-26 13:03:09 -07003860 /* Do a dummy read to force the chip to complete all current transaction
3861 * before we issue a reset. */
3862 val = REG_RD(bp, BNX2_MISC_ID);
3863
Michael Chan234754d2006-11-19 14:11:41 -08003864 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3865 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3866 REG_RD(bp, BNX2_MISC_COMMAND);
3867 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003868
Michael Chan234754d2006-11-19 14:11:41 -08003869 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3870 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003871
Michael Chan234754d2006-11-19 14:11:41 -08003872 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003873
Michael Chan234754d2006-11-19 14:11:41 -08003874 } else {
3875 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3876 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3877 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3878
3879 /* Chip reset. */
3880 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3881
3882 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3883 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3884 current->state = TASK_UNINTERRUPTIBLE;
3885 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003886 }
Michael Chanb6016b72005-05-26 13:03:09 -07003887
Michael Chan234754d2006-11-19 14:11:41 -08003888 /* Reset takes approximate 30 usec */
3889 for (i = 0; i < 10; i++) {
3890 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3891 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3892 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3893 break;
3894 udelay(10);
3895 }
3896
3897 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3898 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3899 printk(KERN_ERR PFX "Chip reset did not complete\n");
3900 return -EBUSY;
3901 }
Michael Chanb6016b72005-05-26 13:03:09 -07003902 }
3903
3904 /* Make sure byte swapping is properly configured. */
3905 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3906 if (val != 0x01020304) {
3907 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3908 return -ENODEV;
3909 }
3910
Michael Chanb6016b72005-05-26 13:03:09 -07003911 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003912 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3913 if (rc)
3914 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003915
Michael Chan0d8a6572007-07-07 22:49:43 -07003916 spin_lock_bh(&bp->phy_lock);
3917 bnx2_init_remote_phy(bp);
3918 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3919 bnx2_set_default_remote_link(bp);
3920 spin_unlock_bh(&bp->phy_lock);
3921
Michael Chanb6016b72005-05-26 13:03:09 -07003922 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3923 /* Adjust the voltage regular to two steps lower. The default
3924 * of this register is 0x0000000e. */
3925 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3926
3927 /* Remove bad rbuf memory from the free pool. */
3928 rc = bnx2_alloc_bad_rbuf(bp);
3929 }
3930
3931 return rc;
3932}
3933
3934static int
3935bnx2_init_chip(struct bnx2 *bp)
3936{
3937 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003938 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003939
3940 /* Make sure the interrupt is not active. */
3941 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3942
3943 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3944 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3945#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003946 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003947#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003948 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003949 DMA_READ_CHANS << 12 |
3950 DMA_WRITE_CHANS << 16;
3951
3952 val |= (0x2 << 20) | (1 << 11);
3953
Michael Chandda1e392006-01-23 16:08:14 -08003954 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003955 val |= (1 << 23);
3956
3957 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3958 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3959 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3960
3961 REG_WR(bp, BNX2_DMA_CONFIG, val);
3962
3963 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3964 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3965 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3966 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3967 }
3968
3969 if (bp->flags & PCIX_FLAG) {
3970 u16 val16;
3971
3972 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3973 &val16);
3974 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3975 val16 & ~PCI_X_CMD_ERO);
3976 }
3977
3978 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3979 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3980 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3981 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3982
3983 /* Initialize context mapping and zero out the quick contexts. The
3984 * context block must have already been enabled. */
Michael Chan641bdcd2007-06-04 21:22:24 -07003985 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3986 rc = bnx2_init_5709_context(bp);
3987 if (rc)
3988 return rc;
3989 } else
Michael Chan59b47d82006-11-19 14:10:45 -08003990 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003991
Michael Chanfba9fe92006-06-12 22:21:25 -07003992 if ((rc = bnx2_init_cpus(bp)) != 0)
3993 return rc;
3994
Michael Chanb6016b72005-05-26 13:03:09 -07003995 bnx2_init_nvram(bp);
3996
3997 bnx2_set_mac_addr(bp);
3998
3999 val = REG_RD(bp, BNX2_MQ_CONFIG);
4000 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4001 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07004002 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4003 val |= BNX2_MQ_CONFIG_HALT_DIS;
4004
Michael Chanb6016b72005-05-26 13:03:09 -07004005 REG_WR(bp, BNX2_MQ_CONFIG, val);
4006
4007 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4008 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4009 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4010
4011 val = (BCM_PAGE_BITS - 8) << 24;
4012 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4013
4014 /* Configure page size. */
4015 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4016 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4017 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4018 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4019
4020 val = bp->mac_addr[0] +
4021 (bp->mac_addr[1] << 8) +
4022 (bp->mac_addr[2] << 16) +
4023 bp->mac_addr[3] +
4024 (bp->mac_addr[4] << 8) +
4025 (bp->mac_addr[5] << 16);
4026 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4027
4028 /* Program the MTU. Also include 4 bytes for CRC32. */
4029 val = bp->dev->mtu + ETH_HLEN + 4;
4030 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4031 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4032 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4033
4034 bp->last_status_idx = 0;
4035 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4036
4037 /* Set up how to generate a link change interrupt. */
4038 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4039
4040 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4041 (u64) bp->status_blk_mapping & 0xffffffff);
4042 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4043
4044 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4045 (u64) bp->stats_blk_mapping & 0xffffffff);
4046 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4047 (u64) bp->stats_blk_mapping >> 32);
4048
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004049 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07004050 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4051
4052 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4053 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4054
4055 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4056 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4057
4058 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4059
4060 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4061
4062 REG_WR(bp, BNX2_HC_COM_TICKS,
4063 (bp->com_ticks_int << 16) | bp->com_ticks);
4064
4065 REG_WR(bp, BNX2_HC_CMD_TICKS,
4066 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4067
Michael Chan02537b062007-06-04 21:24:07 -07004068 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4069 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4070 else
4071 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
Michael Chanb6016b72005-05-26 13:03:09 -07004072 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4073
4074 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07004075 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004076 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004077 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4078 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07004079 }
4080
Michael Chan8e6a72c2007-05-03 13:24:48 -07004081 if (bp->flags & ONE_SHOT_MSI_FLAG)
4082 val |= BNX2_HC_CONFIG_ONE_SHOT;
4083
4084 REG_WR(bp, BNX2_HC_CONFIG, val);
4085
Michael Chanb6016b72005-05-26 13:03:09 -07004086 /* Clear internal stats counters. */
4087 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4088
Michael Chanda3e4fb2007-05-03 13:24:23 -07004089 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07004090
Michael Chane29054f2006-01-23 16:06:06 -08004091 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4092 BNX2_PORT_FEATURE_ASF_ENABLED)
4093 bp->flags |= ASF_ENABLE_FLAG;
4094
Michael Chanb6016b72005-05-26 13:03:09 -07004095 /* Initialize the receive filter. */
4096 bnx2_set_rx_mode(bp->dev);
4097
Michael Chan0aa38df2007-06-04 21:23:06 -07004098 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4099 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4100 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4101 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4102 }
Michael Chanb090ae22006-01-23 16:07:10 -08004103 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4104 0);
Michael Chanb6016b72005-05-26 13:03:09 -07004105
4106 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4107 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4108
4109 udelay(20);
4110
Michael Chanbf5295b2006-03-23 01:11:56 -08004111 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4112
Michael Chanb090ae22006-01-23 16:07:10 -08004113 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07004114}
4115
Michael Chan59b47d82006-11-19 14:10:45 -08004116static void
4117bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4118{
4119 u32 val, offset0, offset1, offset2, offset3;
4120
4121 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4122 offset0 = BNX2_L2CTX_TYPE_XI;
4123 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4124 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4125 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4126 } else {
4127 offset0 = BNX2_L2CTX_TYPE;
4128 offset1 = BNX2_L2CTX_CMD_TYPE;
4129 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4130 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4131 }
4132 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4133 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4134
4135 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4136 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4137
4138 val = (u64) bp->tx_desc_mapping >> 32;
4139 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4140
4141 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4142 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4143}
Michael Chanb6016b72005-05-26 13:03:09 -07004144
4145static void
4146bnx2_init_tx_ring(struct bnx2 *bp)
4147{
4148 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08004149 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07004150
Michael Chan2f8af122006-08-15 01:39:10 -07004151 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4152
Michael Chanb6016b72005-05-26 13:03:09 -07004153 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004154
Michael Chanb6016b72005-05-26 13:03:09 -07004155 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4156 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4157
4158 bp->tx_prod = 0;
4159 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08004160 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004161 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004162
Michael Chan59b47d82006-11-19 14:10:45 -08004163 cid = TX_CID;
4164 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4165 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07004166
Michael Chan59b47d82006-11-19 14:10:45 -08004167 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07004168}
4169
4170static void
4171bnx2_init_rx_ring(struct bnx2 *bp)
4172{
4173 struct rx_bd *rxbd;
4174 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004175 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07004176 u32 val;
4177
4178 /* 8 for CRC and VLAN */
4179 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08004180 /* hw alignment */
4181 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07004182
4183 ring_prod = prod = bp->rx_prod = 0;
4184 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08004185 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004186 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004187
Michael Chan13daffa2006-03-20 17:49:20 -08004188 for (i = 0; i < bp->rx_max_ring; i++) {
4189 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07004190
Michael Chan13daffa2006-03-20 17:49:20 -08004191 rxbd = &bp->rx_desc_ring[i][0];
4192 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4193 rxbd->rx_bd_len = bp->rx_buf_use_size;
4194 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4195 }
4196 if (i == (bp->rx_max_ring - 1))
4197 j = 0;
4198 else
4199 j = i + 1;
4200 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4201 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4202 0xffffffff;
4203 }
Michael Chanb6016b72005-05-26 13:03:09 -07004204
4205 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4206 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4207 val |= 0x02 << 8;
4208 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4209
Michael Chan13daffa2006-03-20 17:49:20 -08004210 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07004211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4212
Michael Chan13daffa2006-03-20 17:49:20 -08004213 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07004214 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4215
Michael Chan236b6392006-03-20 17:49:02 -08004216 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004217 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4218 break;
4219 }
4220 prod = NEXT_RX_BD(prod);
4221 ring_prod = RX_RING_IDX(prod);
4222 }
4223 bp->rx_prod = prod;
4224
4225 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4226
4227 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4228}
4229
4230static void
Michael Chan13daffa2006-03-20 17:49:20 -08004231bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4232{
4233 u32 num_rings, max;
4234
4235 bp->rx_ring_size = size;
4236 num_rings = 1;
4237 while (size > MAX_RX_DESC_CNT) {
4238 size -= MAX_RX_DESC_CNT;
4239 num_rings++;
4240 }
4241 /* round to next power of 2 */
4242 max = MAX_RX_RINGS;
4243 while ((max & num_rings) == 0)
4244 max >>= 1;
4245
4246 if (num_rings != max)
4247 max <<= 1;
4248
4249 bp->rx_max_ring = max;
4250 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4251}
4252
4253static void
Michael Chanb6016b72005-05-26 13:03:09 -07004254bnx2_free_tx_skbs(struct bnx2 *bp)
4255{
4256 int i;
4257
4258 if (bp->tx_buf_ring == NULL)
4259 return;
4260
4261 for (i = 0; i < TX_DESC_CNT; ) {
4262 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4263 struct sk_buff *skb = tx_buf->skb;
4264 int j, last;
4265
4266 if (skb == NULL) {
4267 i++;
4268 continue;
4269 }
4270
4271 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4272 skb_headlen(skb), PCI_DMA_TODEVICE);
4273
4274 tx_buf->skb = NULL;
4275
4276 last = skb_shinfo(skb)->nr_frags;
4277 for (j = 0; j < last; j++) {
4278 tx_buf = &bp->tx_buf_ring[i + j + 1];
4279 pci_unmap_page(bp->pdev,
4280 pci_unmap_addr(tx_buf, mapping),
4281 skb_shinfo(skb)->frags[j].size,
4282 PCI_DMA_TODEVICE);
4283 }
Michael Chan745720e2006-06-29 12:37:41 -07004284 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004285 i += j + 1;
4286 }
4287
4288}
4289
4290static void
4291bnx2_free_rx_skbs(struct bnx2 *bp)
4292{
4293 int i;
4294
4295 if (bp->rx_buf_ring == NULL)
4296 return;
4297
Michael Chan13daffa2006-03-20 17:49:20 -08004298 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004299 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4300 struct sk_buff *skb = rx_buf->skb;
4301
Michael Chan05d0f1c2005-11-04 08:53:48 -08004302 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004303 continue;
4304
4305 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4306 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4307
4308 rx_buf->skb = NULL;
4309
Michael Chan745720e2006-06-29 12:37:41 -07004310 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004311 }
4312}
4313
4314static void
4315bnx2_free_skbs(struct bnx2 *bp)
4316{
4317 bnx2_free_tx_skbs(bp);
4318 bnx2_free_rx_skbs(bp);
4319}
4320
4321static int
4322bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4323{
4324 int rc;
4325
4326 rc = bnx2_reset_chip(bp, reset_code);
4327 bnx2_free_skbs(bp);
4328 if (rc)
4329 return rc;
4330
Michael Chanfba9fe92006-06-12 22:21:25 -07004331 if ((rc = bnx2_init_chip(bp)) != 0)
4332 return rc;
4333
Michael Chanb6016b72005-05-26 13:03:09 -07004334 bnx2_init_tx_ring(bp);
4335 bnx2_init_rx_ring(bp);
4336 return 0;
4337}
4338
4339static int
4340bnx2_init_nic(struct bnx2 *bp)
4341{
4342 int rc;
4343
4344 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4345 return rc;
4346
Michael Chan80be4432006-11-19 14:07:28 -08004347 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004348 bnx2_init_phy(bp);
4349 bnx2_set_link(bp);
Michael Chan0d8a6572007-07-07 22:49:43 -07004350 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004351 return 0;
4352}
4353
4354static int
4355bnx2_test_registers(struct bnx2 *bp)
4356{
4357 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004358 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004359 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004360 u16 offset;
4361 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004362#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004363 u32 rw_mask;
4364 u32 ro_mask;
4365 } reg_tbl[] = {
4366 { 0x006c, 0, 0x00000000, 0x0000003f },
4367 { 0x0090, 0, 0xffffffff, 0x00000000 },
4368 { 0x0094, 0, 0x00000000, 0x00000000 },
4369
Michael Chan5bae30c2007-05-03 13:18:46 -07004370 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4371 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4372 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4373 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4374 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4375 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4376 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4377 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4378 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004379
Michael Chan5bae30c2007-05-03 13:18:46 -07004380 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4383 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4384 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4385 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004386
Michael Chan5bae30c2007-05-03 13:18:46 -07004387 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4388 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4389 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004390
4391 { 0x1000, 0, 0x00000000, 0x00000001 },
4392 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004393
4394 { 0x1408, 0, 0x01c00800, 0x00000000 },
4395 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4396 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004397 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004398 { 0x14b0, 0, 0x00000002, 0x00000001 },
4399 { 0x14b8, 0, 0x00000000, 0x00000000 },
4400 { 0x14c0, 0, 0x00000000, 0x00000009 },
4401 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4402 { 0x14cc, 0, 0x00000000, 0x00000001 },
4403 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004404
4405 { 0x1800, 0, 0x00000000, 0x00000001 },
4406 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004407
4408 { 0x2800, 0, 0x00000000, 0x00000001 },
4409 { 0x2804, 0, 0x00000000, 0x00003f01 },
4410 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4411 { 0x2810, 0, 0xffff0000, 0x00000000 },
4412 { 0x2814, 0, 0xffff0000, 0x00000000 },
4413 { 0x2818, 0, 0xffff0000, 0x00000000 },
4414 { 0x281c, 0, 0xffff0000, 0x00000000 },
4415 { 0x2834, 0, 0xffffffff, 0x00000000 },
4416 { 0x2840, 0, 0x00000000, 0xffffffff },
4417 { 0x2844, 0, 0x00000000, 0xffffffff },
4418 { 0x2848, 0, 0xffffffff, 0x00000000 },
4419 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4420
4421 { 0x2c00, 0, 0x00000000, 0x00000011 },
4422 { 0x2c04, 0, 0x00000000, 0x00030007 },
4423
Michael Chanb6016b72005-05-26 13:03:09 -07004424 { 0x3c00, 0, 0x00000000, 0x00000001 },
4425 { 0x3c04, 0, 0x00000000, 0x00070000 },
4426 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4427 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4428 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4429 { 0x3c14, 0, 0x00000000, 0xffffffff },
4430 { 0x3c18, 0, 0x00000000, 0xffffffff },
4431 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4432 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004433
4434 { 0x5004, 0, 0x00000000, 0x0000007f },
4435 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004436
Michael Chanb6016b72005-05-26 13:03:09 -07004437 { 0x5c00, 0, 0x00000000, 0x00000001 },
4438 { 0x5c04, 0, 0x00000000, 0x0003000f },
4439 { 0x5c08, 0, 0x00000003, 0x00000000 },
4440 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4441 { 0x5c10, 0, 0x00000000, 0xffffffff },
4442 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4443 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4444 { 0x5c88, 0, 0x00000000, 0x00077373 },
4445 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4446
4447 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4448 { 0x680c, 0, 0xffffffff, 0x00000000 },
4449 { 0x6810, 0, 0xffffffff, 0x00000000 },
4450 { 0x6814, 0, 0xffffffff, 0x00000000 },
4451 { 0x6818, 0, 0xffffffff, 0x00000000 },
4452 { 0x681c, 0, 0xffffffff, 0x00000000 },
4453 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4454 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4455 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4456 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4457 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4458 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4459 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4460 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4461 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4462 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4463 { 0x684c, 0, 0xffffffff, 0x00000000 },
4464 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4465 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4466 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4467 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4468 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4469 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4470
4471 { 0xffff, 0, 0x00000000, 0x00000000 },
4472 };
4473
4474 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004475 is_5709 = 0;
4476 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4477 is_5709 = 1;
4478
Michael Chanb6016b72005-05-26 13:03:09 -07004479 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4480 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004481 u16 flags = reg_tbl[i].flags;
4482
4483 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4484 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004485
4486 offset = (u32) reg_tbl[i].offset;
4487 rw_mask = reg_tbl[i].rw_mask;
4488 ro_mask = reg_tbl[i].ro_mask;
4489
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004490 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004491
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004492 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004493
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004494 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004495 if ((val & rw_mask) != 0) {
4496 goto reg_test_err;
4497 }
4498
4499 if ((val & ro_mask) != (save_val & ro_mask)) {
4500 goto reg_test_err;
4501 }
4502
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004503 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004504
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004505 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004506 if ((val & rw_mask) != rw_mask) {
4507 goto reg_test_err;
4508 }
4509
4510 if ((val & ro_mask) != (save_val & ro_mask)) {
4511 goto reg_test_err;
4512 }
4513
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004514 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004515 continue;
4516
4517reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004518 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004519 ret = -ENODEV;
4520 break;
4521 }
4522 return ret;
4523}
4524
4525static int
4526bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4527{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004528 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004529 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4530 int i;
4531
4532 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4533 u32 offset;
4534
4535 for (offset = 0; offset < size; offset += 4) {
4536
4537 REG_WR_IND(bp, start + offset, test_pattern[i]);
4538
4539 if (REG_RD_IND(bp, start + offset) !=
4540 test_pattern[i]) {
4541 return -ENODEV;
4542 }
4543 }
4544 }
4545 return 0;
4546}
4547
4548static int
4549bnx2_test_memory(struct bnx2 *bp)
4550{
4551 int ret = 0;
4552 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004553 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004554 u32 offset;
4555 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004556 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004557 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004558 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004559 { 0xe0000, 0x4000 },
4560 { 0x120000, 0x4000 },
4561 { 0x1a0000, 0x4000 },
4562 { 0x160000, 0x4000 },
4563 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004564 },
4565 mem_tbl_5709[] = {
4566 { 0x60000, 0x4000 },
4567 { 0xa0000, 0x3000 },
4568 { 0xe0000, 0x4000 },
4569 { 0x120000, 0x4000 },
4570 { 0x1a0000, 0x4000 },
4571 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004572 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004573 struct mem_entry *mem_tbl;
4574
4575 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4576 mem_tbl = mem_tbl_5709;
4577 else
4578 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004579
4580 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4581 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4582 mem_tbl[i].len)) != 0) {
4583 return ret;
4584 }
4585 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004586
Michael Chanb6016b72005-05-26 13:03:09 -07004587 return ret;
4588}
4589
Michael Chanbc5a0692006-01-23 16:13:22 -08004590#define BNX2_MAC_LOOPBACK 0
4591#define BNX2_PHY_LOOPBACK 1
4592
Michael Chanb6016b72005-05-26 13:03:09 -07004593static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004594bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004595{
4596 unsigned int pkt_size, num_pkts, i;
4597 struct sk_buff *skb, *rx_skb;
4598 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004599 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004600 dma_addr_t map;
4601 struct tx_bd *txbd;
4602 struct sw_bd *rx_buf;
4603 struct l2_fhdr *rx_hdr;
4604 int ret = -ENODEV;
4605
Michael Chanbc5a0692006-01-23 16:13:22 -08004606 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4607 bp->loopback = MAC_LOOPBACK;
4608 bnx2_set_mac_loopback(bp);
4609 }
4610 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004611 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004612 bnx2_set_phy_loopback(bp);
4613 }
4614 else
4615 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004616
4617 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004618 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004619 if (!skb)
4620 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004621 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004622 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004623 memset(packet + 6, 0x0, 8);
4624 for (i = 14; i < pkt_size; i++)
4625 packet[i] = (unsigned char) (i & 0xff);
4626
4627 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4628 PCI_DMA_TODEVICE);
4629
Michael Chanbf5295b2006-03-23 01:11:56 -08004630 REG_WR(bp, BNX2_HC_COMMAND,
4631 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4632
Michael Chanb6016b72005-05-26 13:03:09 -07004633 REG_RD(bp, BNX2_HC_COMMAND);
4634
4635 udelay(5);
4636 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4637
Michael Chanb6016b72005-05-26 13:03:09 -07004638 num_pkts = 0;
4639
Michael Chanbc5a0692006-01-23 16:13:22 -08004640 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004641
4642 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4643 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4644 txbd->tx_bd_mss_nbytes = pkt_size;
4645 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4646
4647 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004648 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4649 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004650
Michael Chan234754d2006-11-19 14:11:41 -08004651 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4652 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004653
4654 udelay(100);
4655
Michael Chanbf5295b2006-03-23 01:11:56 -08004656 REG_WR(bp, BNX2_HC_COMMAND,
4657 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4658
Michael Chanb6016b72005-05-26 13:03:09 -07004659 REG_RD(bp, BNX2_HC_COMMAND);
4660
4661 udelay(5);
4662
4663 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004664 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004665
Michael Chanbc5a0692006-01-23 16:13:22 -08004666 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004667 goto loopback_test_done;
4668 }
4669
4670 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4671 if (rx_idx != rx_start_idx + num_pkts) {
4672 goto loopback_test_done;
4673 }
4674
4675 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4676 rx_skb = rx_buf->skb;
4677
4678 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4679 skb_reserve(rx_skb, bp->rx_offset);
4680
4681 pci_dma_sync_single_for_cpu(bp->pdev,
4682 pci_unmap_addr(rx_buf, mapping),
4683 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4684
Michael Chanade2bfe2006-01-23 16:09:51 -08004685 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004686 (L2_FHDR_ERRORS_BAD_CRC |
4687 L2_FHDR_ERRORS_PHY_DECODE |
4688 L2_FHDR_ERRORS_ALIGNMENT |
4689 L2_FHDR_ERRORS_TOO_SHORT |
4690 L2_FHDR_ERRORS_GIANT_FRAME)) {
4691
4692 goto loopback_test_done;
4693 }
4694
4695 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4696 goto loopback_test_done;
4697 }
4698
4699 for (i = 14; i < pkt_size; i++) {
4700 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4701 goto loopback_test_done;
4702 }
4703 }
4704
4705 ret = 0;
4706
4707loopback_test_done:
4708 bp->loopback = 0;
4709 return ret;
4710}
4711
Michael Chanbc5a0692006-01-23 16:13:22 -08004712#define BNX2_MAC_LOOPBACK_FAILED 1
4713#define BNX2_PHY_LOOPBACK_FAILED 2
4714#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4715 BNX2_PHY_LOOPBACK_FAILED)
4716
4717static int
4718bnx2_test_loopback(struct bnx2 *bp)
4719{
4720 int rc = 0;
4721
4722 if (!netif_running(bp->dev))
4723 return BNX2_LOOPBACK_FAILED;
4724
4725 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4726 spin_lock_bh(&bp->phy_lock);
4727 bnx2_init_phy(bp);
4728 spin_unlock_bh(&bp->phy_lock);
4729 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4730 rc |= BNX2_MAC_LOOPBACK_FAILED;
4731 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4732 rc |= BNX2_PHY_LOOPBACK_FAILED;
4733 return rc;
4734}
4735
Michael Chanb6016b72005-05-26 13:03:09 -07004736#define NVRAM_SIZE 0x200
4737#define CRC32_RESIDUAL 0xdebb20e3
4738
4739static int
4740bnx2_test_nvram(struct bnx2 *bp)
4741{
4742 u32 buf[NVRAM_SIZE / 4];
4743 u8 *data = (u8 *) buf;
4744 int rc = 0;
4745 u32 magic, csum;
4746
4747 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4748 goto test_nvram_done;
4749
4750 magic = be32_to_cpu(buf[0]);
4751 if (magic != 0x669955aa) {
4752 rc = -ENODEV;
4753 goto test_nvram_done;
4754 }
4755
4756 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4757 goto test_nvram_done;
4758
4759 csum = ether_crc_le(0x100, data);
4760 if (csum != CRC32_RESIDUAL) {
4761 rc = -ENODEV;
4762 goto test_nvram_done;
4763 }
4764
4765 csum = ether_crc_le(0x100, data + 0x100);
4766 if (csum != CRC32_RESIDUAL) {
4767 rc = -ENODEV;
4768 }
4769
4770test_nvram_done:
4771 return rc;
4772}
4773
4774static int
4775bnx2_test_link(struct bnx2 *bp)
4776{
4777 u32 bmsr;
4778
Michael Chanc770a652005-08-25 15:38:39 -07004779 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004780 bnx2_enable_bmsr1(bp);
4781 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4782 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4783 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004784 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004785
Michael Chanb6016b72005-05-26 13:03:09 -07004786 if (bmsr & BMSR_LSTATUS) {
4787 return 0;
4788 }
4789 return -ENODEV;
4790}
4791
4792static int
4793bnx2_test_intr(struct bnx2 *bp)
4794{
4795 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004796 u16 status_idx;
4797
4798 if (!netif_running(bp->dev))
4799 return -ENODEV;
4800
4801 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4802
4803 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004804 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004805 REG_RD(bp, BNX2_HC_COMMAND);
4806
4807 for (i = 0; i < 10; i++) {
4808 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4809 status_idx) {
4810
4811 break;
4812 }
4813
4814 msleep_interruptible(10);
4815 }
4816 if (i < 10)
4817 return 0;
4818
4819 return -ENODEV;
4820}
4821
4822static void
Michael Chan48b01e22006-11-19 14:08:00 -08004823bnx2_5706_serdes_timer(struct bnx2 *bp)
4824{
4825 spin_lock(&bp->phy_lock);
4826 if (bp->serdes_an_pending)
4827 bp->serdes_an_pending--;
4828 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4829 u32 bmcr;
4830
4831 bp->current_interval = bp->timer_interval;
4832
Michael Chanca58c3a2007-05-03 13:22:52 -07004833 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004834
4835 if (bmcr & BMCR_ANENABLE) {
4836 u32 phy1, phy2;
4837
4838 bnx2_write_phy(bp, 0x1c, 0x7c00);
4839 bnx2_read_phy(bp, 0x1c, &phy1);
4840
4841 bnx2_write_phy(bp, 0x17, 0x0f01);
4842 bnx2_read_phy(bp, 0x15, &phy2);
4843 bnx2_write_phy(bp, 0x17, 0x0f01);
4844 bnx2_read_phy(bp, 0x15, &phy2);
4845
4846 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4847 !(phy2 & 0x20)) { /* no CONFIG */
4848
4849 bmcr &= ~BMCR_ANENABLE;
4850 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004851 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004852 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4853 }
4854 }
4855 }
4856 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4857 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4858 u32 phy2;
4859
4860 bnx2_write_phy(bp, 0x17, 0x0f01);
4861 bnx2_read_phy(bp, 0x15, &phy2);
4862 if (phy2 & 0x20) {
4863 u32 bmcr;
4864
Michael Chanca58c3a2007-05-03 13:22:52 -07004865 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004866 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004867 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004868
4869 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4870 }
4871 } else
4872 bp->current_interval = bp->timer_interval;
4873
4874 spin_unlock(&bp->phy_lock);
4875}
4876
4877static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004878bnx2_5708_serdes_timer(struct bnx2 *bp)
4879{
Michael Chan0d8a6572007-07-07 22:49:43 -07004880 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4881 return;
4882
Michael Chanf8dd0642006-11-19 14:08:29 -08004883 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4884 bp->serdes_an_pending = 0;
4885 return;
4886 }
4887
4888 spin_lock(&bp->phy_lock);
4889 if (bp->serdes_an_pending)
4890 bp->serdes_an_pending--;
4891 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4892 u32 bmcr;
4893
Michael Chanca58c3a2007-05-03 13:22:52 -07004894 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004895 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004896 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004897 bp->current_interval = SERDES_FORCED_TIMEOUT;
4898 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004899 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004900 bp->serdes_an_pending = 2;
4901 bp->current_interval = bp->timer_interval;
4902 }
4903
4904 } else
4905 bp->current_interval = bp->timer_interval;
4906
4907 spin_unlock(&bp->phy_lock);
4908}
4909
4910static void
Michael Chanb6016b72005-05-26 13:03:09 -07004911bnx2_timer(unsigned long data)
4912{
4913 struct bnx2 *bp = (struct bnx2 *) data;
4914 u32 msg;
4915
Michael Chancd339a02005-08-25 15:35:24 -07004916 if (!netif_running(bp->dev))
4917 return;
4918
Michael Chanb6016b72005-05-26 13:03:09 -07004919 if (atomic_read(&bp->intr_sem) != 0)
4920 goto bnx2_restart_timer;
4921
4922 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004923 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004924
Michael Chancea94db2006-06-12 22:16:13 -07004925 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4926
Michael Chan02537b062007-06-04 21:24:07 -07004927 /* workaround occasional corrupted counters */
4928 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4929 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4930 BNX2_HC_COMMAND_STATS_NOW);
4931
Michael Chanf8dd0642006-11-19 14:08:29 -08004932 if (bp->phy_flags & PHY_SERDES_FLAG) {
4933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4934 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004935 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004936 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004937 }
4938
4939bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004940 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004941}
4942
Michael Chan8e6a72c2007-05-03 13:24:48 -07004943static int
4944bnx2_request_irq(struct bnx2 *bp)
4945{
4946 struct net_device *dev = bp->dev;
4947 int rc = 0;
4948
4949 if (bp->flags & USING_MSI_FLAG) {
4950 irq_handler_t fn = bnx2_msi;
4951
4952 if (bp->flags & ONE_SHOT_MSI_FLAG)
4953 fn = bnx2_msi_1shot;
4954
4955 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4956 } else
4957 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4958 IRQF_SHARED, dev->name, dev);
4959 return rc;
4960}
4961
4962static void
4963bnx2_free_irq(struct bnx2 *bp)
4964{
4965 struct net_device *dev = bp->dev;
4966
4967 if (bp->flags & USING_MSI_FLAG) {
4968 free_irq(bp->pdev->irq, dev);
4969 pci_disable_msi(bp->pdev);
4970 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4971 } else
4972 free_irq(bp->pdev->irq, dev);
4973}
4974
Michael Chanb6016b72005-05-26 13:03:09 -07004975/* Called with rtnl_lock */
4976static int
4977bnx2_open(struct net_device *dev)
4978{
Michael Chan972ec0d2006-01-23 16:12:43 -08004979 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004980 int rc;
4981
Michael Chan1b2f9222007-05-03 13:20:19 -07004982 netif_carrier_off(dev);
4983
Pavel Machek829ca9a2005-09-03 15:56:56 -07004984 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004985 bnx2_disable_int(bp);
4986
4987 rc = bnx2_alloc_mem(bp);
4988 if (rc)
4989 return rc;
4990
Michael Chan8e6a72c2007-05-03 13:24:48 -07004991 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07004992 if (pci_enable_msi(bp->pdev) == 0) {
4993 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07004994 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4995 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07004996 }
4997 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07004998 rc = bnx2_request_irq(bp);
4999
Michael Chanb6016b72005-05-26 13:03:09 -07005000 if (rc) {
5001 bnx2_free_mem(bp);
5002 return rc;
5003 }
5004
5005 rc = bnx2_init_nic(bp);
5006
5007 if (rc) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07005008 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005009 bnx2_free_skbs(bp);
5010 bnx2_free_mem(bp);
5011 return rc;
5012 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005013
Michael Chancd339a02005-08-25 15:35:24 -07005014 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005015
5016 atomic_set(&bp->intr_sem, 0);
5017
5018 bnx2_enable_int(bp);
5019
5020 if (bp->flags & USING_MSI_FLAG) {
5021 /* Test MSI to make sure it is working
5022 * If MSI test fails, go back to INTx mode
5023 */
5024 if (bnx2_test_intr(bp) != 0) {
5025 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5026 " using MSI, switching to INTx mode. Please"
5027 " report this failure to the PCI maintainer"
5028 " and include system chipset information.\n",
5029 bp->dev->name);
5030
5031 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005032 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005033
5034 rc = bnx2_init_nic(bp);
5035
Michael Chan8e6a72c2007-05-03 13:24:48 -07005036 if (!rc)
5037 rc = bnx2_request_irq(bp);
5038
Michael Chanb6016b72005-05-26 13:03:09 -07005039 if (rc) {
5040 bnx2_free_skbs(bp);
5041 bnx2_free_mem(bp);
5042 del_timer_sync(&bp->timer);
5043 return rc;
5044 }
5045 bnx2_enable_int(bp);
5046 }
5047 }
5048 if (bp->flags & USING_MSI_FLAG) {
5049 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5050 }
5051
5052 netif_start_queue(dev);
5053
5054 return 0;
5055}
5056
5057static void
David Howellsc4028952006-11-22 14:57:56 +00005058bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07005059{
David Howellsc4028952006-11-22 14:57:56 +00005060 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07005061
Michael Chanafdc08b2005-08-25 15:34:29 -07005062 if (!netif_running(bp->dev))
5063 return;
5064
5065 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07005066 bnx2_netif_stop(bp);
5067
5068 bnx2_init_nic(bp);
5069
5070 atomic_set(&bp->intr_sem, 1);
5071 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07005072 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005073}
5074
5075static void
5076bnx2_tx_timeout(struct net_device *dev)
5077{
Michael Chan972ec0d2006-01-23 16:12:43 -08005078 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005079
5080 /* This allows the netif to be shutdown gracefully before resetting */
5081 schedule_work(&bp->reset_task);
5082}
5083
5084#ifdef BCM_VLAN
5085/* Called with rtnl_lock */
5086static void
5087bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5088{
Michael Chan972ec0d2006-01-23 16:12:43 -08005089 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005090
5091 bnx2_netif_stop(bp);
5092
5093 bp->vlgrp = vlgrp;
5094 bnx2_set_rx_mode(dev);
5095
5096 bnx2_netif_start(bp);
5097}
Michael Chanb6016b72005-05-26 13:03:09 -07005098#endif
5099
Herbert Xu932ff272006-06-09 12:20:56 -07005100/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07005101 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5102 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07005103 */
5104static int
5105bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5106{
Michael Chan972ec0d2006-01-23 16:12:43 -08005107 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005108 dma_addr_t mapping;
5109 struct tx_bd *txbd;
5110 struct sw_bd *tx_buf;
5111 u32 len, vlan_tag_flags, last_frag, mss;
5112 u16 prod, ring_prod;
5113 int i;
5114
Michael Chane89bbf12005-08-25 15:36:58 -07005115 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07005116 netif_stop_queue(dev);
5117 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5118 dev->name);
5119
5120 return NETDEV_TX_BUSY;
5121 }
5122 len = skb_headlen(skb);
5123 prod = bp->tx_prod;
5124 ring_prod = TX_RING_IDX(prod);
5125
5126 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005127 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07005128 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5129 }
5130
5131 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5132 vlan_tag_flags |=
5133 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5134 }
Michael Chanfde82052007-05-03 17:23:35 -07005135 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07005136 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005137 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07005138
Michael Chanb6016b72005-05-26 13:03:09 -07005139 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5140
Michael Chan4666f872007-05-03 13:22:28 -07005141 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005142
Michael Chan4666f872007-05-03 13:22:28 -07005143 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5144 u32 tcp_off = skb_transport_offset(skb) -
5145 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07005146
Michael Chan4666f872007-05-03 13:22:28 -07005147 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5148 TX_BD_FLAGS_SW_FLAGS;
5149 if (likely(tcp_off == 0))
5150 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5151 else {
5152 tcp_off >>= 3;
5153 vlan_tag_flags |= ((tcp_off & 0x3) <<
5154 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5155 ((tcp_off & 0x10) <<
5156 TX_BD_FLAGS_TCP6_OFF4_SHL);
5157 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5158 }
5159 } else {
5160 if (skb_header_cloned(skb) &&
5161 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5162 dev_kfree_skb(skb);
5163 return NETDEV_TX_OK;
5164 }
5165
5166 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5167
5168 iph = ip_hdr(skb);
5169 iph->check = 0;
5170 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5171 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5172 iph->daddr, 0,
5173 IPPROTO_TCP,
5174 0);
5175 if (tcp_opt_len || (iph->ihl > 5)) {
5176 vlan_tag_flags |= ((iph->ihl - 5) +
5177 (tcp_opt_len >> 2)) << 8;
5178 }
Michael Chanb6016b72005-05-26 13:03:09 -07005179 }
Michael Chan4666f872007-05-03 13:22:28 -07005180 } else
Michael Chanb6016b72005-05-26 13:03:09 -07005181 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005182
5183 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005184
Michael Chanb6016b72005-05-26 13:03:09 -07005185 tx_buf = &bp->tx_buf_ring[ring_prod];
5186 tx_buf->skb = skb;
5187 pci_unmap_addr_set(tx_buf, mapping, mapping);
5188
5189 txbd = &bp->tx_desc_ring[ring_prod];
5190
5191 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5195
5196 last_frag = skb_shinfo(skb)->nr_frags;
5197
5198 for (i = 0; i < last_frag; i++) {
5199 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5200
5201 prod = NEXT_TX_BD(prod);
5202 ring_prod = TX_RING_IDX(prod);
5203 txbd = &bp->tx_desc_ring[ring_prod];
5204
5205 len = frag->size;
5206 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5207 len, PCI_DMA_TODEVICE);
5208 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5209 mapping, mapping);
5210
5211 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5212 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5213 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5214 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5215
5216 }
5217 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5218
5219 prod = NEXT_TX_BD(prod);
5220 bp->tx_prod_bseq += skb->len;
5221
Michael Chan234754d2006-11-19 14:11:41 -08005222 REG_WR16(bp, bp->tx_bidx_addr, prod);
5223 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07005224
5225 mmiowb();
5226
5227 bp->tx_prod = prod;
5228 dev->trans_start = jiffies;
5229
Michael Chane89bbf12005-08-25 15:36:58 -07005230 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07005231 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07005232 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07005233 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005234 }
5235
5236 return NETDEV_TX_OK;
5237}
5238
5239/* Called with rtnl_lock */
5240static int
5241bnx2_close(struct net_device *dev)
5242{
Michael Chan972ec0d2006-01-23 16:12:43 -08005243 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005244 u32 reset_code;
5245
Michael Chanafdc08b2005-08-25 15:34:29 -07005246 /* Calling flush_scheduled_work() may deadlock because
5247 * linkwatch_event() may be on the workqueue and it will try to get
5248 * the rtnl_lock which we are holding.
5249 */
5250 while (bp->in_reset_task)
5251 msleep(1);
5252
Michael Chanb6016b72005-05-26 13:03:09 -07005253 bnx2_netif_stop(bp);
5254 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005255 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07005256 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08005257 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005258 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5259 else
5260 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5261 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07005262 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07005263 bnx2_free_skbs(bp);
5264 bnx2_free_mem(bp);
5265 bp->link_up = 0;
5266 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005267 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07005268 return 0;
5269}
5270
5271#define GET_NET_STATS64(ctr) \
5272 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5273 (unsigned long) (ctr##_lo)
5274
5275#define GET_NET_STATS32(ctr) \
5276 (ctr##_lo)
5277
5278#if (BITS_PER_LONG == 64)
5279#define GET_NET_STATS GET_NET_STATS64
5280#else
5281#define GET_NET_STATS GET_NET_STATS32
5282#endif
5283
5284static struct net_device_stats *
5285bnx2_get_stats(struct net_device *dev)
5286{
Michael Chan972ec0d2006-01-23 16:12:43 -08005287 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005288 struct statistics_block *stats_blk = bp->stats_blk;
5289 struct net_device_stats *net_stats = &bp->net_stats;
5290
5291 if (bp->stats_blk == NULL) {
5292 return net_stats;
5293 }
5294 net_stats->rx_packets =
5295 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5296 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5297 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5298
5299 net_stats->tx_packets =
5300 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5301 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5302 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5303
5304 net_stats->rx_bytes =
5305 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5306
5307 net_stats->tx_bytes =
5308 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5309
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005310 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005311 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5312
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005313 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005314 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5315
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005316 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005317 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5318 stats_blk->stat_EtherStatsOverrsizePkts);
5319
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005320 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005321 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5322
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005323 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005324 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5325
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005326 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005327 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5328
5329 net_stats->rx_errors = net_stats->rx_length_errors +
5330 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5331 net_stats->rx_crc_errors;
5332
5333 net_stats->tx_aborted_errors =
5334 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5335 stats_blk->stat_Dot3StatsLateCollisions);
5336
Michael Chan5b0c76a2005-11-04 08:45:49 -08005337 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5338 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005339 net_stats->tx_carrier_errors = 0;
5340 else {
5341 net_stats->tx_carrier_errors =
5342 (unsigned long)
5343 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5344 }
5345
5346 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005347 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005348 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5349 +
5350 net_stats->tx_aborted_errors +
5351 net_stats->tx_carrier_errors;
5352
Michael Chancea94db2006-06-12 22:16:13 -07005353 net_stats->rx_missed_errors =
5354 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5355 stats_blk->stat_FwRxDrop);
5356
Michael Chanb6016b72005-05-26 13:03:09 -07005357 return net_stats;
5358}
5359
5360/* All ethtool functions called with rtnl_lock */
5361
5362static int
5363bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5364{
Michael Chan972ec0d2006-01-23 16:12:43 -08005365 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005366
5367 cmd->supported = SUPPORTED_Autoneg;
5368 if (bp->phy_flags & PHY_SERDES_FLAG) {
5369 cmd->supported |= SUPPORTED_1000baseT_Full |
5370 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005371 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5372 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005373
5374 cmd->port = PORT_FIBRE;
5375 }
5376 else {
5377 cmd->supported |= SUPPORTED_10baseT_Half |
5378 SUPPORTED_10baseT_Full |
5379 SUPPORTED_100baseT_Half |
5380 SUPPORTED_100baseT_Full |
5381 SUPPORTED_1000baseT_Full |
5382 SUPPORTED_TP;
5383
5384 cmd->port = PORT_TP;
5385 }
5386
5387 cmd->advertising = bp->advertising;
5388
5389 if (bp->autoneg & AUTONEG_SPEED) {
5390 cmd->autoneg = AUTONEG_ENABLE;
5391 }
5392 else {
5393 cmd->autoneg = AUTONEG_DISABLE;
5394 }
5395
5396 if (netif_carrier_ok(dev)) {
5397 cmd->speed = bp->line_speed;
5398 cmd->duplex = bp->duplex;
5399 }
5400 else {
5401 cmd->speed = -1;
5402 cmd->duplex = -1;
5403 }
5404
5405 cmd->transceiver = XCVR_INTERNAL;
5406 cmd->phy_address = bp->phy_addr;
5407
5408 return 0;
5409}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005410
Michael Chanb6016b72005-05-26 13:03:09 -07005411static int
5412bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5413{
Michael Chan972ec0d2006-01-23 16:12:43 -08005414 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005415 u8 autoneg = bp->autoneg;
5416 u8 req_duplex = bp->req_duplex;
5417 u16 req_line_speed = bp->req_line_speed;
5418 u32 advertising = bp->advertising;
5419
5420 if (cmd->autoneg == AUTONEG_ENABLE) {
5421 autoneg |= AUTONEG_SPEED;
5422
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005423 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005424
5425 /* allow advertising 1 speed */
5426 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5427 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5428 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5429 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5430
5431 if (bp->phy_flags & PHY_SERDES_FLAG)
5432 return -EINVAL;
5433
5434 advertising = cmd->advertising;
5435
Michael Chan27a005b2007-05-03 13:23:41 -07005436 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5437 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5438 return -EINVAL;
5439 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005440 advertising = cmd->advertising;
5441 }
5442 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5443 return -EINVAL;
5444 }
5445 else {
5446 if (bp->phy_flags & PHY_SERDES_FLAG) {
5447 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5448 }
5449 else {
5450 advertising = ETHTOOL_ALL_COPPER_SPEED;
5451 }
5452 }
5453 advertising |= ADVERTISED_Autoneg;
5454 }
5455 else {
5456 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005457 if ((cmd->speed != SPEED_1000 &&
5458 cmd->speed != SPEED_2500) ||
5459 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005460 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005461
5462 if (cmd->speed == SPEED_2500 &&
5463 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5464 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005465 }
5466 else if (cmd->speed == SPEED_1000) {
5467 return -EINVAL;
5468 }
5469 autoneg &= ~AUTONEG_SPEED;
5470 req_line_speed = cmd->speed;
5471 req_duplex = cmd->duplex;
5472 advertising = 0;
5473 }
5474
5475 bp->autoneg = autoneg;
5476 bp->advertising = advertising;
5477 bp->req_line_speed = req_line_speed;
5478 bp->req_duplex = req_duplex;
5479
Michael Chanc770a652005-08-25 15:38:39 -07005480 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005481
Michael Chan0d8a6572007-07-07 22:49:43 -07005482 bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07005483
Michael Chanc770a652005-08-25 15:38:39 -07005484 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005485
5486 return 0;
5487}
5488
5489static void
5490bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5491{
Michael Chan972ec0d2006-01-23 16:12:43 -08005492 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005493
5494 strcpy(info->driver, DRV_MODULE_NAME);
5495 strcpy(info->version, DRV_MODULE_VERSION);
5496 strcpy(info->bus_info, pci_name(bp->pdev));
5497 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5498 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5499 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005500 info->fw_version[1] = info->fw_version[3] = '.';
5501 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005502}
5503
Michael Chan244ac4f2006-03-20 17:48:46 -08005504#define BNX2_REGDUMP_LEN (32 * 1024)
5505
5506static int
5507bnx2_get_regs_len(struct net_device *dev)
5508{
5509 return BNX2_REGDUMP_LEN;
5510}
5511
5512static void
5513bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5514{
5515 u32 *p = _p, i, offset;
5516 u8 *orig_p = _p;
5517 struct bnx2 *bp = netdev_priv(dev);
5518 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5519 0x0800, 0x0880, 0x0c00, 0x0c10,
5520 0x0c30, 0x0d08, 0x1000, 0x101c,
5521 0x1040, 0x1048, 0x1080, 0x10a4,
5522 0x1400, 0x1490, 0x1498, 0x14f0,
5523 0x1500, 0x155c, 0x1580, 0x15dc,
5524 0x1600, 0x1658, 0x1680, 0x16d8,
5525 0x1800, 0x1820, 0x1840, 0x1854,
5526 0x1880, 0x1894, 0x1900, 0x1984,
5527 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5528 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5529 0x2000, 0x2030, 0x23c0, 0x2400,
5530 0x2800, 0x2820, 0x2830, 0x2850,
5531 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5532 0x3c00, 0x3c94, 0x4000, 0x4010,
5533 0x4080, 0x4090, 0x43c0, 0x4458,
5534 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5535 0x4fc0, 0x5010, 0x53c0, 0x5444,
5536 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5537 0x5fc0, 0x6000, 0x6400, 0x6428,
5538 0x6800, 0x6848, 0x684c, 0x6860,
5539 0x6888, 0x6910, 0x8000 };
5540
5541 regs->version = 0;
5542
5543 memset(p, 0, BNX2_REGDUMP_LEN);
5544
5545 if (!netif_running(bp->dev))
5546 return;
5547
5548 i = 0;
5549 offset = reg_boundaries[0];
5550 p += offset;
5551 while (offset < BNX2_REGDUMP_LEN) {
5552 *p++ = REG_RD(bp, offset);
5553 offset += 4;
5554 if (offset == reg_boundaries[i + 1]) {
5555 offset = reg_boundaries[i + 2];
5556 p = (u32 *) (orig_p + offset);
5557 i += 2;
5558 }
5559 }
5560}
5561
Michael Chanb6016b72005-05-26 13:03:09 -07005562static void
5563bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5564{
Michael Chan972ec0d2006-01-23 16:12:43 -08005565 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005566
5567 if (bp->flags & NO_WOL_FLAG) {
5568 wol->supported = 0;
5569 wol->wolopts = 0;
5570 }
5571 else {
5572 wol->supported = WAKE_MAGIC;
5573 if (bp->wol)
5574 wol->wolopts = WAKE_MAGIC;
5575 else
5576 wol->wolopts = 0;
5577 }
5578 memset(&wol->sopass, 0, sizeof(wol->sopass));
5579}
5580
5581static int
5582bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5583{
Michael Chan972ec0d2006-01-23 16:12:43 -08005584 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005585
5586 if (wol->wolopts & ~WAKE_MAGIC)
5587 return -EINVAL;
5588
5589 if (wol->wolopts & WAKE_MAGIC) {
5590 if (bp->flags & NO_WOL_FLAG)
5591 return -EINVAL;
5592
5593 bp->wol = 1;
5594 }
5595 else {
5596 bp->wol = 0;
5597 }
5598 return 0;
5599}
5600
5601static int
5602bnx2_nway_reset(struct net_device *dev)
5603{
Michael Chan972ec0d2006-01-23 16:12:43 -08005604 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005605 u32 bmcr;
5606
5607 if (!(bp->autoneg & AUTONEG_SPEED)) {
5608 return -EINVAL;
5609 }
5610
Michael Chanc770a652005-08-25 15:38:39 -07005611 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005612
5613 /* Force a link down visible on the other side */
5614 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005615 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005616 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005617
5618 msleep(20);
5619
Michael Chanc770a652005-08-25 15:38:39 -07005620 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005621
5622 bp->current_interval = SERDES_AN_TIMEOUT;
5623 bp->serdes_an_pending = 1;
5624 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005625 }
5626
Michael Chanca58c3a2007-05-03 13:22:52 -07005627 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005628 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005629 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005630
Michael Chanc770a652005-08-25 15:38:39 -07005631 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005632
5633 return 0;
5634}
5635
5636static int
5637bnx2_get_eeprom_len(struct net_device *dev)
5638{
Michael Chan972ec0d2006-01-23 16:12:43 -08005639 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005640
Michael Chan1122db72006-01-23 16:11:42 -08005641 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005642 return 0;
5643
Michael Chan1122db72006-01-23 16:11:42 -08005644 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005645}
5646
5647static int
5648bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5649 u8 *eebuf)
5650{
Michael Chan972ec0d2006-01-23 16:12:43 -08005651 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005652 int rc;
5653
John W. Linville1064e942005-11-10 12:58:24 -08005654 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005655
5656 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5657
5658 return rc;
5659}
5660
5661static int
5662bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5663 u8 *eebuf)
5664{
Michael Chan972ec0d2006-01-23 16:12:43 -08005665 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005666 int rc;
5667
John W. Linville1064e942005-11-10 12:58:24 -08005668 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005669
5670 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5671
5672 return rc;
5673}
5674
5675static int
5676bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5677{
Michael Chan972ec0d2006-01-23 16:12:43 -08005678 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005679
5680 memset(coal, 0, sizeof(struct ethtool_coalesce));
5681
5682 coal->rx_coalesce_usecs = bp->rx_ticks;
5683 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5684 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5685 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5686
5687 coal->tx_coalesce_usecs = bp->tx_ticks;
5688 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5689 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5690 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5691
5692 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5693
5694 return 0;
5695}
5696
5697static int
5698bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5699{
Michael Chan972ec0d2006-01-23 16:12:43 -08005700 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005701
5702 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5703 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5704
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005705 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005706 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5707
5708 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5709 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5710
5711 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5712 if (bp->rx_quick_cons_trip_int > 0xff)
5713 bp->rx_quick_cons_trip_int = 0xff;
5714
5715 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5716 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5717
5718 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5719 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5720
5721 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5722 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5723
5724 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5725 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5726 0xff;
5727
5728 bp->stats_ticks = coal->stats_block_coalesce_usecs;
Michael Chan02537b062007-06-04 21:24:07 -07005729 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5730 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5731 bp->stats_ticks = USEC_PER_SEC;
5732 }
Michael Chanb6016b72005-05-26 13:03:09 -07005733 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5734 bp->stats_ticks &= 0xffff00;
5735
5736 if (netif_running(bp->dev)) {
5737 bnx2_netif_stop(bp);
5738 bnx2_init_nic(bp);
5739 bnx2_netif_start(bp);
5740 }
5741
5742 return 0;
5743}
5744
5745static void
5746bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5747{
Michael Chan972ec0d2006-01-23 16:12:43 -08005748 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005749
Michael Chan13daffa2006-03-20 17:49:20 -08005750 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005751 ering->rx_mini_max_pending = 0;
5752 ering->rx_jumbo_max_pending = 0;
5753
5754 ering->rx_pending = bp->rx_ring_size;
5755 ering->rx_mini_pending = 0;
5756 ering->rx_jumbo_pending = 0;
5757
5758 ering->tx_max_pending = MAX_TX_DESC_CNT;
5759 ering->tx_pending = bp->tx_ring_size;
5760}
5761
5762static int
5763bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5764{
Michael Chan972ec0d2006-01-23 16:12:43 -08005765 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005766
Michael Chan13daffa2006-03-20 17:49:20 -08005767 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005768 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5769 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5770
5771 return -EINVAL;
5772 }
Michael Chan13daffa2006-03-20 17:49:20 -08005773 if (netif_running(bp->dev)) {
5774 bnx2_netif_stop(bp);
5775 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5776 bnx2_free_skbs(bp);
5777 bnx2_free_mem(bp);
5778 }
5779
5780 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005781 bp->tx_ring_size = ering->tx_pending;
5782
5783 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005784 int rc;
5785
5786 rc = bnx2_alloc_mem(bp);
5787 if (rc)
5788 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005789 bnx2_init_nic(bp);
5790 bnx2_netif_start(bp);
5791 }
5792
5793 return 0;
5794}
5795
5796static void
5797bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5798{
Michael Chan972ec0d2006-01-23 16:12:43 -08005799 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005800
5801 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5802 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5803 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5804}
5805
5806static int
5807bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5808{
Michael Chan972ec0d2006-01-23 16:12:43 -08005809 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005810
5811 bp->req_flow_ctrl = 0;
5812 if (epause->rx_pause)
5813 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5814 if (epause->tx_pause)
5815 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5816
5817 if (epause->autoneg) {
5818 bp->autoneg |= AUTONEG_FLOW_CTRL;
5819 }
5820 else {
5821 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5822 }
5823
Michael Chanc770a652005-08-25 15:38:39 -07005824 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005825
Michael Chan0d8a6572007-07-07 22:49:43 -07005826 bnx2_setup_phy(bp, bp->phy_port);
Michael Chanb6016b72005-05-26 13:03:09 -07005827
Michael Chanc770a652005-08-25 15:38:39 -07005828 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005829
5830 return 0;
5831}
5832
5833static u32
5834bnx2_get_rx_csum(struct net_device *dev)
5835{
Michael Chan972ec0d2006-01-23 16:12:43 -08005836 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005837
5838 return bp->rx_csum;
5839}
5840
5841static int
5842bnx2_set_rx_csum(struct net_device *dev, u32 data)
5843{
Michael Chan972ec0d2006-01-23 16:12:43 -08005844 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005845
5846 bp->rx_csum = data;
5847 return 0;
5848}
5849
Michael Chanb11d6212006-06-29 12:31:21 -07005850static int
5851bnx2_set_tso(struct net_device *dev, u32 data)
5852{
Michael Chan4666f872007-05-03 13:22:28 -07005853 struct bnx2 *bp = netdev_priv(dev);
5854
5855 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005856 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005857 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5858 dev->features |= NETIF_F_TSO6;
5859 } else
5860 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5861 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005862 return 0;
5863}
5864
Michael Chancea94db2006-06-12 22:16:13 -07005865#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005866
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005867static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005868 char string[ETH_GSTRING_LEN];
5869} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5870 { "rx_bytes" },
5871 { "rx_error_bytes" },
5872 { "tx_bytes" },
5873 { "tx_error_bytes" },
5874 { "rx_ucast_packets" },
5875 { "rx_mcast_packets" },
5876 { "rx_bcast_packets" },
5877 { "tx_ucast_packets" },
5878 { "tx_mcast_packets" },
5879 { "tx_bcast_packets" },
5880 { "tx_mac_errors" },
5881 { "tx_carrier_errors" },
5882 { "rx_crc_errors" },
5883 { "rx_align_errors" },
5884 { "tx_single_collisions" },
5885 { "tx_multi_collisions" },
5886 { "tx_deferred" },
5887 { "tx_excess_collisions" },
5888 { "tx_late_collisions" },
5889 { "tx_total_collisions" },
5890 { "rx_fragments" },
5891 { "rx_jabbers" },
5892 { "rx_undersize_packets" },
5893 { "rx_oversize_packets" },
5894 { "rx_64_byte_packets" },
5895 { "rx_65_to_127_byte_packets" },
5896 { "rx_128_to_255_byte_packets" },
5897 { "rx_256_to_511_byte_packets" },
5898 { "rx_512_to_1023_byte_packets" },
5899 { "rx_1024_to_1522_byte_packets" },
5900 { "rx_1523_to_9022_byte_packets" },
5901 { "tx_64_byte_packets" },
5902 { "tx_65_to_127_byte_packets" },
5903 { "tx_128_to_255_byte_packets" },
5904 { "tx_256_to_511_byte_packets" },
5905 { "tx_512_to_1023_byte_packets" },
5906 { "tx_1024_to_1522_byte_packets" },
5907 { "tx_1523_to_9022_byte_packets" },
5908 { "rx_xon_frames" },
5909 { "rx_xoff_frames" },
5910 { "tx_xon_frames" },
5911 { "tx_xoff_frames" },
5912 { "rx_mac_ctrl_frames" },
5913 { "rx_filtered_packets" },
5914 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005915 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005916};
5917
5918#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5919
Arjan van de Venf71e1302006-03-03 21:33:57 -05005920static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005921 STATS_OFFSET32(stat_IfHCInOctets_hi),
5922 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5923 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5924 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5925 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5926 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5927 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5928 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5929 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5930 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5931 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005932 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5933 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5934 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5935 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5936 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5937 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5938 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5939 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5940 STATS_OFFSET32(stat_EtherStatsCollisions),
5941 STATS_OFFSET32(stat_EtherStatsFragments),
5942 STATS_OFFSET32(stat_EtherStatsJabbers),
5943 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5944 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5945 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5946 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5947 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5948 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5949 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5950 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5951 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5952 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5953 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5954 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5955 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5956 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5957 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5958 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5959 STATS_OFFSET32(stat_XonPauseFramesReceived),
5960 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5961 STATS_OFFSET32(stat_OutXonSent),
5962 STATS_OFFSET32(stat_OutXoffSent),
5963 STATS_OFFSET32(stat_MacControlFramesReceived),
5964 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5965 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005966 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005967};
5968
5969/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5970 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005971 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005972static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005973 8,0,8,8,8,8,8,8,8,8,
5974 4,0,4,4,4,4,4,4,4,4,
5975 4,4,4,4,4,4,4,4,4,4,
5976 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005977 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005978};
5979
Michael Chan5b0c76a2005-11-04 08:45:49 -08005980static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5981 8,0,8,8,8,8,8,8,8,8,
5982 4,4,4,4,4,4,4,4,4,4,
5983 4,4,4,4,4,4,4,4,4,4,
5984 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005985 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005986};
5987
Michael Chanb6016b72005-05-26 13:03:09 -07005988#define BNX2_NUM_TESTS 6
5989
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005990static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005991 char string[ETH_GSTRING_LEN];
5992} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5993 { "register_test (offline)" },
5994 { "memory_test (offline)" },
5995 { "loopback_test (offline)" },
5996 { "nvram_test (online)" },
5997 { "interrupt_test (online)" },
5998 { "link_test (online)" },
5999};
6000
6001static int
6002bnx2_self_test_count(struct net_device *dev)
6003{
6004 return BNX2_NUM_TESTS;
6005}
6006
6007static void
6008bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6009{
Michael Chan972ec0d2006-01-23 16:12:43 -08006010 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006011
6012 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6013 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08006014 int i;
6015
Michael Chanb6016b72005-05-26 13:03:09 -07006016 bnx2_netif_stop(bp);
6017 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6018 bnx2_free_skbs(bp);
6019
6020 if (bnx2_test_registers(bp) != 0) {
6021 buf[0] = 1;
6022 etest->flags |= ETH_TEST_FL_FAILED;
6023 }
6024 if (bnx2_test_memory(bp) != 0) {
6025 buf[1] = 1;
6026 etest->flags |= ETH_TEST_FL_FAILED;
6027 }
Michael Chanbc5a0692006-01-23 16:13:22 -08006028 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07006029 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07006030
6031 if (!netif_running(bp->dev)) {
6032 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6033 }
6034 else {
6035 bnx2_init_nic(bp);
6036 bnx2_netif_start(bp);
6037 }
6038
6039 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08006040 for (i = 0; i < 7; i++) {
6041 if (bp->link_up)
6042 break;
6043 msleep_interruptible(1000);
6044 }
Michael Chanb6016b72005-05-26 13:03:09 -07006045 }
6046
6047 if (bnx2_test_nvram(bp) != 0) {
6048 buf[3] = 1;
6049 etest->flags |= ETH_TEST_FL_FAILED;
6050 }
6051 if (bnx2_test_intr(bp) != 0) {
6052 buf[4] = 1;
6053 etest->flags |= ETH_TEST_FL_FAILED;
6054 }
6055
6056 if (bnx2_test_link(bp) != 0) {
6057 buf[5] = 1;
6058 etest->flags |= ETH_TEST_FL_FAILED;
6059
6060 }
6061}
6062
6063static void
6064bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6065{
6066 switch (stringset) {
6067 case ETH_SS_STATS:
6068 memcpy(buf, bnx2_stats_str_arr,
6069 sizeof(bnx2_stats_str_arr));
6070 break;
6071 case ETH_SS_TEST:
6072 memcpy(buf, bnx2_tests_str_arr,
6073 sizeof(bnx2_tests_str_arr));
6074 break;
6075 }
6076}
6077
6078static int
6079bnx2_get_stats_count(struct net_device *dev)
6080{
6081 return BNX2_NUM_STATS;
6082}
6083
6084static void
6085bnx2_get_ethtool_stats(struct net_device *dev,
6086 struct ethtool_stats *stats, u64 *buf)
6087{
Michael Chan972ec0d2006-01-23 16:12:43 -08006088 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006089 int i;
6090 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006091 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006092
6093 if (hw_stats == NULL) {
6094 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6095 return;
6096 }
6097
Michael Chan5b0c76a2005-11-04 08:45:49 -08006098 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6099 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6100 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6101 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07006102 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08006103 else
6104 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07006105
6106 for (i = 0; i < BNX2_NUM_STATS; i++) {
6107 if (stats_len_arr[i] == 0) {
6108 /* skip this counter */
6109 buf[i] = 0;
6110 continue;
6111 }
6112 if (stats_len_arr[i] == 4) {
6113 /* 4-byte counter */
6114 buf[i] = (u64)
6115 *(hw_stats + bnx2_stats_offset_arr[i]);
6116 continue;
6117 }
6118 /* 8-byte counter */
6119 buf[i] = (((u64) *(hw_stats +
6120 bnx2_stats_offset_arr[i])) << 32) +
6121 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6122 }
6123}
6124
6125static int
6126bnx2_phys_id(struct net_device *dev, u32 data)
6127{
Michael Chan972ec0d2006-01-23 16:12:43 -08006128 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006129 int i;
6130 u32 save;
6131
6132 if (data == 0)
6133 data = 2;
6134
6135 save = REG_RD(bp, BNX2_MISC_CFG);
6136 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6137
6138 for (i = 0; i < (data * 2); i++) {
6139 if ((i % 2) == 0) {
6140 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6141 }
6142 else {
6143 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6144 BNX2_EMAC_LED_1000MB_OVERRIDE |
6145 BNX2_EMAC_LED_100MB_OVERRIDE |
6146 BNX2_EMAC_LED_10MB_OVERRIDE |
6147 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6148 BNX2_EMAC_LED_TRAFFIC);
6149 }
6150 msleep_interruptible(500);
6151 if (signal_pending(current))
6152 break;
6153 }
6154 REG_WR(bp, BNX2_EMAC_LED, 0);
6155 REG_WR(bp, BNX2_MISC_CFG, save);
6156 return 0;
6157}
6158
Michael Chan4666f872007-05-03 13:22:28 -07006159static int
6160bnx2_set_tx_csum(struct net_device *dev, u32 data)
6161{
6162 struct bnx2 *bp = netdev_priv(dev);
6163
6164 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6165 return (ethtool_op_set_tx_hw_csum(dev, data));
6166 else
6167 return (ethtool_op_set_tx_csum(dev, data));
6168}
6169
Jeff Garzik7282d492006-09-13 14:30:00 -04006170static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07006171 .get_settings = bnx2_get_settings,
6172 .set_settings = bnx2_set_settings,
6173 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08006174 .get_regs_len = bnx2_get_regs_len,
6175 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07006176 .get_wol = bnx2_get_wol,
6177 .set_wol = bnx2_set_wol,
6178 .nway_reset = bnx2_nway_reset,
6179 .get_link = ethtool_op_get_link,
6180 .get_eeprom_len = bnx2_get_eeprom_len,
6181 .get_eeprom = bnx2_get_eeprom,
6182 .set_eeprom = bnx2_set_eeprom,
6183 .get_coalesce = bnx2_get_coalesce,
6184 .set_coalesce = bnx2_set_coalesce,
6185 .get_ringparam = bnx2_get_ringparam,
6186 .set_ringparam = bnx2_set_ringparam,
6187 .get_pauseparam = bnx2_get_pauseparam,
6188 .set_pauseparam = bnx2_set_pauseparam,
6189 .get_rx_csum = bnx2_get_rx_csum,
6190 .set_rx_csum = bnx2_set_rx_csum,
6191 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07006192 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07006193 .get_sg = ethtool_op_get_sg,
6194 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07006195 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07006196 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07006197 .self_test_count = bnx2_self_test_count,
6198 .self_test = bnx2_self_test,
6199 .get_strings = bnx2_get_strings,
6200 .phys_id = bnx2_phys_id,
6201 .get_stats_count = bnx2_get_stats_count,
6202 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07006203 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07006204};
6205
6206/* Called with rtnl_lock */
6207static int
6208bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6209{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006210 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08006211 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006212 int err;
6213
6214 switch(cmd) {
6215 case SIOCGMIIPHY:
6216 data->phy_id = bp->phy_addr;
6217
6218 /* fallthru */
6219 case SIOCGMIIREG: {
6220 u32 mii_regval;
6221
Michael Chandad3e452007-05-03 13:18:03 -07006222 if (!netif_running(dev))
6223 return -EAGAIN;
6224
Michael Chanc770a652005-08-25 15:38:39 -07006225 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006226 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07006227 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006228
6229 data->val_out = mii_regval;
6230
6231 return err;
6232 }
6233
6234 case SIOCSMIIREG:
6235 if (!capable(CAP_NET_ADMIN))
6236 return -EPERM;
6237
Michael Chandad3e452007-05-03 13:18:03 -07006238 if (!netif_running(dev))
6239 return -EAGAIN;
6240
Michael Chanc770a652005-08-25 15:38:39 -07006241 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006242 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07006243 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07006244
6245 return err;
6246
6247 default:
6248 /* do nothing */
6249 break;
6250 }
6251 return -EOPNOTSUPP;
6252}
6253
6254/* Called with rtnl_lock */
6255static int
6256bnx2_change_mac_addr(struct net_device *dev, void *p)
6257{
6258 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08006259 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006260
Michael Chan73eef4c2005-08-25 15:39:15 -07006261 if (!is_valid_ether_addr(addr->sa_data))
6262 return -EINVAL;
6263
Michael Chanb6016b72005-05-26 13:03:09 -07006264 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6265 if (netif_running(dev))
6266 bnx2_set_mac_addr(bp);
6267
6268 return 0;
6269}
6270
6271/* Called with rtnl_lock */
6272static int
6273bnx2_change_mtu(struct net_device *dev, int new_mtu)
6274{
Michael Chan972ec0d2006-01-23 16:12:43 -08006275 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006276
6277 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6278 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6279 return -EINVAL;
6280
6281 dev->mtu = new_mtu;
6282 if (netif_running(dev)) {
6283 bnx2_netif_stop(bp);
6284
6285 bnx2_init_nic(bp);
6286
6287 bnx2_netif_start(bp);
6288 }
6289 return 0;
6290}
6291
6292#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6293static void
6294poll_bnx2(struct net_device *dev)
6295{
Michael Chan972ec0d2006-01-23 16:12:43 -08006296 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006297
6298 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01006299 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006300 enable_irq(bp->pdev->irq);
6301}
6302#endif
6303
Michael Chan253c8b72007-01-08 19:56:01 -08006304static void __devinit
6305bnx2_get_5709_media(struct bnx2 *bp)
6306{
6307 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6308 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6309 u32 strap;
6310
6311 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6312 return;
6313 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6314 bp->phy_flags |= PHY_SERDES_FLAG;
6315 return;
6316 }
6317
6318 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6319 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6320 else
6321 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6322
6323 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6324 switch (strap) {
6325 case 0x4:
6326 case 0x5:
6327 case 0x6:
6328 bp->phy_flags |= PHY_SERDES_FLAG;
6329 return;
6330 }
6331 } else {
6332 switch (strap) {
6333 case 0x1:
6334 case 0x2:
6335 case 0x4:
6336 bp->phy_flags |= PHY_SERDES_FLAG;
6337 return;
6338 }
6339 }
6340}
6341
Michael Chan883e5152007-05-03 13:25:11 -07006342static void __devinit
6343bnx2_get_pci_speed(struct bnx2 *bp)
6344{
6345 u32 reg;
6346
6347 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6348 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6349 u32 clkreg;
6350
6351 bp->flags |= PCIX_FLAG;
6352
6353 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6354
6355 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6356 switch (clkreg) {
6357 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6358 bp->bus_speed_mhz = 133;
6359 break;
6360
6361 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6362 bp->bus_speed_mhz = 100;
6363 break;
6364
6365 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6366 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6367 bp->bus_speed_mhz = 66;
6368 break;
6369
6370 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6371 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6372 bp->bus_speed_mhz = 50;
6373 break;
6374
6375 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6376 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6377 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6378 bp->bus_speed_mhz = 33;
6379 break;
6380 }
6381 }
6382 else {
6383 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6384 bp->bus_speed_mhz = 66;
6385 else
6386 bp->bus_speed_mhz = 33;
6387 }
6388
6389 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6390 bp->flags |= PCI_32BIT_FLAG;
6391
6392}
6393
Michael Chanb6016b72005-05-26 13:03:09 -07006394static int __devinit
6395bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6396{
6397 struct bnx2 *bp;
6398 unsigned long mem_len;
6399 int rc;
6400 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006401 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006402
6403 SET_MODULE_OWNER(dev);
6404 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006405 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006406
6407 bp->flags = 0;
6408 bp->phy_flags = 0;
6409
6410 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6411 rc = pci_enable_device(pdev);
6412 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006413 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006414 goto err_out;
6415 }
6416
6417 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006418 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006419 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006420 rc = -ENODEV;
6421 goto err_out_disable;
6422 }
6423
6424 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6425 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006426 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006427 goto err_out_disable;
6428 }
6429
6430 pci_set_master(pdev);
6431
6432 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6433 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006434 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006435 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006436 rc = -EIO;
6437 goto err_out_release;
6438 }
6439
Michael Chanb6016b72005-05-26 13:03:09 -07006440 bp->dev = dev;
6441 bp->pdev = pdev;
6442
6443 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006444 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006445 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006446
6447 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006448 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006449 dev->mem_end = dev->mem_start + mem_len;
6450 dev->irq = pdev->irq;
6451
6452 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6453
6454 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006455 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006456 rc = -ENOMEM;
6457 goto err_out_release;
6458 }
6459
6460 /* Configure byte swap and enable write to the reg_window registers.
6461 * Rely on CPU to do target byte swapping on big endian systems
6462 * The chip's target access swapping will not swap all accesses
6463 */
6464 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6465 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6466 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6467
Pavel Machek829ca9a2005-09-03 15:56:56 -07006468 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006469
6470 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6471
Michael Chan883e5152007-05-03 13:25:11 -07006472 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6473 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6474 dev_err(&pdev->dev,
6475 "Cannot find PCIE capability, aborting.\n");
6476 rc = -EIO;
6477 goto err_out_unmap;
6478 }
6479 bp->flags |= PCIE_FLAG;
6480 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006481 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6482 if (bp->pcix_cap == 0) {
6483 dev_err(&pdev->dev,
6484 "Cannot find PCIX capability, aborting.\n");
6485 rc = -EIO;
6486 goto err_out_unmap;
6487 }
6488 }
6489
Michael Chan8e6a72c2007-05-03 13:24:48 -07006490 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6491 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6492 bp->flags |= MSI_CAP_FLAG;
6493 }
6494
Michael Chan40453c82007-05-03 13:19:18 -07006495 /* 5708 cannot support DMA addresses > 40-bit. */
6496 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6497 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6498 else
6499 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6500
6501 /* Configure DMA attributes. */
6502 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6503 dev->features |= NETIF_F_HIGHDMA;
6504 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6505 if (rc) {
6506 dev_err(&pdev->dev,
6507 "pci_set_consistent_dma_mask failed, aborting.\n");
6508 goto err_out_unmap;
6509 }
6510 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6511 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6512 goto err_out_unmap;
6513 }
6514
Michael Chan883e5152007-05-03 13:25:11 -07006515 if (!(bp->flags & PCIE_FLAG))
6516 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006517
6518 /* 5706A0 may falsely detect SERR and PERR. */
6519 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6520 reg = REG_RD(bp, PCI_COMMAND);
6521 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6522 REG_WR(bp, PCI_COMMAND, reg);
6523 }
6524 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6525 !(bp->flags & PCIX_FLAG)) {
6526
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006527 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006528 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006529 goto err_out_unmap;
6530 }
6531
6532 bnx2_init_nvram(bp);
6533
Michael Chane3648b32005-11-04 08:51:21 -08006534 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6535
6536 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006537 BNX2_SHM_HDR_SIGNATURE_SIG) {
6538 u32 off = PCI_FUNC(pdev->devfn) << 2;
6539
6540 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6541 } else
Michael Chane3648b32005-11-04 08:51:21 -08006542 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6543
Michael Chanb6016b72005-05-26 13:03:09 -07006544 /* Get the permanent MAC address. First we need to make sure the
6545 * firmware is actually running.
6546 */
Michael Chane3648b32005-11-04 08:51:21 -08006547 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006548
6549 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6550 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006551 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006552 rc = -ENODEV;
6553 goto err_out_unmap;
6554 }
6555
Michael Chane3648b32005-11-04 08:51:21 -08006556 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006557
Michael Chane3648b32005-11-04 08:51:21 -08006558 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006559 bp->mac_addr[0] = (u8) (reg >> 8);
6560 bp->mac_addr[1] = (u8) reg;
6561
Michael Chane3648b32005-11-04 08:51:21 -08006562 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006563 bp->mac_addr[2] = (u8) (reg >> 24);
6564 bp->mac_addr[3] = (u8) (reg >> 16);
6565 bp->mac_addr[4] = (u8) (reg >> 8);
6566 bp->mac_addr[5] = (u8) reg;
6567
6568 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006569 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006570
6571 bp->rx_csum = 1;
6572
6573 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6574
6575 bp->tx_quick_cons_trip_int = 20;
6576 bp->tx_quick_cons_trip = 20;
6577 bp->tx_ticks_int = 80;
6578 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006579
Michael Chanb6016b72005-05-26 13:03:09 -07006580 bp->rx_quick_cons_trip_int = 6;
6581 bp->rx_quick_cons_trip = 6;
6582 bp->rx_ticks_int = 18;
6583 bp->rx_ticks = 18;
6584
6585 bp->stats_ticks = 1000000 & 0xffff00;
6586
6587 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006588 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006589
Michael Chan5b0c76a2005-11-04 08:45:49 -08006590 bp->phy_addr = 1;
6591
Michael Chanb6016b72005-05-26 13:03:09 -07006592 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006593 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6594 bnx2_get_5709_media(bp);
6595 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006596 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006597
Michael Chan0d8a6572007-07-07 22:49:43 -07006598 bp->phy_port = PORT_TP;
Michael Chanbac0dff2006-11-19 14:15:05 -08006599 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan0d8a6572007-07-07 22:49:43 -07006600 bp->phy_port = PORT_FIBRE;
Michael Chanb6016b72005-05-26 13:03:09 -07006601 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006602 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006603 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006604 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006605 BNX2_SHARED_HW_CFG_CONFIG);
6606 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6607 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6608 }
Michael Chan0d8a6572007-07-07 22:49:43 -07006609 bnx2_init_remote_phy(bp);
6610
Michael Chan261dd5c2007-01-08 19:55:46 -08006611 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6612 CHIP_NUM(bp) == CHIP_NUM_5708)
6613 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006614 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6615 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006616
Michael Chan16088272006-06-12 22:16:43 -07006617 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6618 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6619 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006620 bp->flags |= NO_WOL_FLAG;
6621
Michael Chanb6016b72005-05-26 13:03:09 -07006622 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6623 bp->tx_quick_cons_trip_int =
6624 bp->tx_quick_cons_trip;
6625 bp->tx_ticks_int = bp->tx_ticks;
6626 bp->rx_quick_cons_trip_int =
6627 bp->rx_quick_cons_trip;
6628 bp->rx_ticks_int = bp->rx_ticks;
6629 bp->comp_prod_trip_int = bp->comp_prod_trip;
6630 bp->com_ticks_int = bp->com_ticks;
6631 bp->cmd_ticks_int = bp->cmd_ticks;
6632 }
6633
Michael Chanf9317a42006-09-29 17:06:23 -07006634 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6635 *
6636 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6637 * with byte enables disabled on the unused 32-bit word. This is legal
6638 * but causes problems on the AMD 8132 which will eventually stop
6639 * responding after a while.
6640 *
6641 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006642 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006643 */
6644 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6645 struct pci_dev *amd_8132 = NULL;
6646
6647 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6648 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6649 amd_8132))) {
6650 u8 rev;
6651
6652 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6653 if (rev >= 0x10 && rev <= 0x13) {
6654 disable_msi = 1;
6655 pci_dev_put(amd_8132);
6656 break;
6657 }
6658 }
6659 }
6660
Michael Chandeaf3912007-07-07 22:48:00 -07006661 bnx2_set_default_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006662 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6663
Michael Chancd339a02005-08-25 15:35:24 -07006664 init_timer(&bp->timer);
6665 bp->timer.expires = RUN_AT(bp->timer_interval);
6666 bp->timer.data = (unsigned long) bp;
6667 bp->timer.function = bnx2_timer;
6668
Michael Chanb6016b72005-05-26 13:03:09 -07006669 return 0;
6670
6671err_out_unmap:
6672 if (bp->regview) {
6673 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006674 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006675 }
6676
6677err_out_release:
6678 pci_release_regions(pdev);
6679
6680err_out_disable:
6681 pci_disable_device(pdev);
6682 pci_set_drvdata(pdev, NULL);
6683
6684err_out:
6685 return rc;
6686}
6687
Michael Chan883e5152007-05-03 13:25:11 -07006688static char * __devinit
6689bnx2_bus_string(struct bnx2 *bp, char *str)
6690{
6691 char *s = str;
6692
6693 if (bp->flags & PCIE_FLAG) {
6694 s += sprintf(s, "PCI Express");
6695 } else {
6696 s += sprintf(s, "PCI");
6697 if (bp->flags & PCIX_FLAG)
6698 s += sprintf(s, "-X");
6699 if (bp->flags & PCI_32BIT_FLAG)
6700 s += sprintf(s, " 32-bit");
6701 else
6702 s += sprintf(s, " 64-bit");
6703 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6704 }
6705 return str;
6706}
6707
Michael Chanb6016b72005-05-26 13:03:09 -07006708static int __devinit
6709bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6710{
6711 static int version_printed = 0;
6712 struct net_device *dev = NULL;
6713 struct bnx2 *bp;
6714 int rc, i;
Michael Chan883e5152007-05-03 13:25:11 -07006715 char str[40];
Michael Chanb6016b72005-05-26 13:03:09 -07006716
6717 if (version_printed++ == 0)
6718 printk(KERN_INFO "%s", version);
6719
6720 /* dev zeroed in init_etherdev */
6721 dev = alloc_etherdev(sizeof(*bp));
6722
6723 if (!dev)
6724 return -ENOMEM;
6725
6726 rc = bnx2_init_board(pdev, dev);
6727 if (rc < 0) {
6728 free_netdev(dev);
6729 return rc;
6730 }
6731
6732 dev->open = bnx2_open;
6733 dev->hard_start_xmit = bnx2_start_xmit;
6734 dev->stop = bnx2_close;
6735 dev->get_stats = bnx2_get_stats;
6736 dev->set_multicast_list = bnx2_set_rx_mode;
6737 dev->do_ioctl = bnx2_ioctl;
6738 dev->set_mac_address = bnx2_change_mac_addr;
6739 dev->change_mtu = bnx2_change_mtu;
6740 dev->tx_timeout = bnx2_tx_timeout;
6741 dev->watchdog_timeo = TX_TIMEOUT;
6742#ifdef BCM_VLAN
6743 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07006744#endif
6745 dev->poll = bnx2_poll;
6746 dev->ethtool_ops = &bnx2_ethtool_ops;
6747 dev->weight = 64;
6748
Michael Chan972ec0d2006-01-23 16:12:43 -08006749 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006750
6751#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6752 dev->poll_controller = poll_bnx2;
6753#endif
6754
Michael Chan1b2f9222007-05-03 13:20:19 -07006755 pci_set_drvdata(pdev, dev);
6756
6757 memcpy(dev->dev_addr, bp->mac_addr, 6);
6758 memcpy(dev->perm_addr, bp->mac_addr, 6);
6759 bp->name = board_info[ent->driver_data].name;
6760
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006761 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan4666f872007-05-03 13:22:28 -07006762 if (CHIP_NUM(bp) == CHIP_NUM_5709)
Stephen Hemmingerd212f872007-06-27 00:47:37 -07006763 dev->features |= NETIF_F_IPV6_CSUM;
6764
Michael Chan1b2f9222007-05-03 13:20:19 -07006765#ifdef BCM_VLAN
6766 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6767#endif
6768 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006769 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6770 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006771
Michael Chanb6016b72005-05-26 13:03:09 -07006772 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006773 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006774 if (bp->regview)
6775 iounmap(bp->regview);
6776 pci_release_regions(pdev);
6777 pci_disable_device(pdev);
6778 pci_set_drvdata(pdev, NULL);
6779 free_netdev(dev);
6780 return rc;
6781 }
6782
Michael Chan883e5152007-05-03 13:25:11 -07006783 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Michael Chanb6016b72005-05-26 13:03:09 -07006784 "IRQ %d, ",
6785 dev->name,
6786 bp->name,
6787 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6788 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07006789 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07006790 dev->base_addr,
6791 bp->pdev->irq);
6792
6793 printk("node addr ");
6794 for (i = 0; i < 6; i++)
6795 printk("%2.2x", dev->dev_addr[i]);
6796 printk("\n");
6797
Michael Chanb6016b72005-05-26 13:03:09 -07006798 return 0;
6799}
6800
6801static void __devexit
6802bnx2_remove_one(struct pci_dev *pdev)
6803{
6804 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006805 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006806
Michael Chanafdc08b2005-08-25 15:34:29 -07006807 flush_scheduled_work();
6808
Michael Chanb6016b72005-05-26 13:03:09 -07006809 unregister_netdev(dev);
6810
6811 if (bp->regview)
6812 iounmap(bp->regview);
6813
6814 free_netdev(dev);
6815 pci_release_regions(pdev);
6816 pci_disable_device(pdev);
6817 pci_set_drvdata(pdev, NULL);
6818}
6819
6820static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006821bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006822{
6823 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006824 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006825 u32 reset_code;
6826
6827 if (!netif_running(dev))
6828 return 0;
6829
Michael Chan1d60290f2006-03-20 17:50:08 -08006830 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006831 bnx2_netif_stop(bp);
6832 netif_device_detach(dev);
6833 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006834 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006835 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006836 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006837 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6838 else
6839 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6840 bnx2_reset_chip(bp, reset_code);
6841 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006842 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006843 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006844 return 0;
6845}
6846
6847static int
6848bnx2_resume(struct pci_dev *pdev)
6849{
6850 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006851 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006852
6853 if (!netif_running(dev))
6854 return 0;
6855
Michael Chan30c517b2007-05-03 13:20:40 -07006856 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006857 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006858 netif_device_attach(dev);
6859 bnx2_init_nic(bp);
6860 bnx2_netif_start(bp);
6861 return 0;
6862}
6863
6864static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006865 .name = DRV_MODULE_NAME,
6866 .id_table = bnx2_pci_tbl,
6867 .probe = bnx2_init_one,
6868 .remove = __devexit_p(bnx2_remove_one),
6869 .suspend = bnx2_suspend,
6870 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006871};
6872
6873static int __init bnx2_init(void)
6874{
Jeff Garzik29917622006-08-19 17:48:59 -04006875 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006876}
6877
6878static void __exit bnx2_cleanup(void)
6879{
6880 pci_unregister_driver(&bnx2_pci_driver);
6881}
6882
6883module_init(bnx2_init);
6884module_exit(bnx2_cleanup);
6885
6886
6887