blob: d75c96de96e4feb51c5b9a9026a1ab2cf8b29fab [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
39
40#include <net/checksum.h>
41
42#include <asm/system.h>
43#include <asm/io.h>
44#include <asm/byteorder.h>
45#include <asm/uaccess.h>
46
47#ifdef CONFIG_SPARC64
48#include <asm/idprom.h>
49#include <asm/oplib.h>
50#include <asm/pbm.h>
51#endif
52
53#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54#define TG3_VLAN_TAG_USED 1
55#else
56#define TG3_VLAN_TAG_USED 0
57#endif
58
59#ifdef NETIF_F_TSO
60#define TG3_TSO_SUPPORT 1
61#else
62#define TG3_TSO_SUPPORT 0
63#endif
64
65#include "tg3.h"
66
67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": "
David S. Miller7d3f4c92005-08-06 06:35:48 -070069#define DRV_MODULE_VERSION "3.35"
70#define DRV_MODULE_RELDATE "August 6, 2005"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0
74#define TG3_DEF_TX_MODE 0
75#define TG3_DEF_MSG_ENABLE \
76 (NETIF_MSG_DRV | \
77 NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | \
79 NETIF_MSG_TIMER | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
85/* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
87 */
88#define TG3_TX_TIMEOUT (5 * HZ)
89
90/* hardware minimum and maximum for a single frame's data payload */
91#define TG3_MIN_MTU 60
92#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070093 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95/* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
98 */
99#define TG3_RX_RING_SIZE 512
100#define TG3_DEF_RX_RING_PENDING 200
101#define TG3_RX_JUMBO_RING_SIZE 256
102#define TG3_DEF_RX_JUMBO_RING_PENDING 100
103
104/* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
109 */
110#define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
112
113#define TG3_TX_RING_SIZE 512
114#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
115
116#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_RING_SIZE)
118#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 TG3_TX_RING_SIZE)
124#define TX_RING_GAP(TP) \
125 (TG3_TX_RING_SIZE - (TP)->tx_pending)
126#define TX_BUFFS_AVAIL(TP) \
127 (((TP)->tx_cons <= (TP)->tx_prod) ? \
128 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
129 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
Michael Chan4cafd3f2005-05-29 14:56:34 -0700141#define TG3_NUM_TEST 6
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
John W. Linville6e9017a2005-04-21 16:58:56 -0700214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
John W. Linvilleaf2bcd92005-04-21 16:57:50 -0700215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Xose Vazquez Perezd8659252005-05-23 12:54:51 -0700216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chan4cf78e42005-07-25 12:29:19 -0700224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { 0, }
245};
246
247MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249static struct {
250 const char string[ETH_GSTRING_LEN];
251} ethtool_stats_keys[TG3_NUM_STATS] = {
252 { "rx_octets" },
253 { "rx_fragments" },
254 { "rx_ucast_packets" },
255 { "rx_mcast_packets" },
256 { "rx_bcast_packets" },
257 { "rx_fcs_errors" },
258 { "rx_align_errors" },
259 { "rx_xon_pause_rcvd" },
260 { "rx_xoff_pause_rcvd" },
261 { "rx_mac_ctrl_rcvd" },
262 { "rx_xoff_entered" },
263 { "rx_frame_too_long_errors" },
264 { "rx_jabbers" },
265 { "rx_undersize_packets" },
266 { "rx_in_length_errors" },
267 { "rx_out_length_errors" },
268 { "rx_64_or_less_octet_packets" },
269 { "rx_65_to_127_octet_packets" },
270 { "rx_128_to_255_octet_packets" },
271 { "rx_256_to_511_octet_packets" },
272 { "rx_512_to_1023_octet_packets" },
273 { "rx_1024_to_1522_octet_packets" },
274 { "rx_1523_to_2047_octet_packets" },
275 { "rx_2048_to_4095_octet_packets" },
276 { "rx_4096_to_8191_octet_packets" },
277 { "rx_8192_to_9022_octet_packets" },
278
279 { "tx_octets" },
280 { "tx_collisions" },
281
282 { "tx_xon_sent" },
283 { "tx_xoff_sent" },
284 { "tx_flow_control" },
285 { "tx_mac_errors" },
286 { "tx_single_collisions" },
287 { "tx_mult_collisions" },
288 { "tx_deferred" },
289 { "tx_excessive_collisions" },
290 { "tx_late_collisions" },
291 { "tx_collide_2times" },
292 { "tx_collide_3times" },
293 { "tx_collide_4times" },
294 { "tx_collide_5times" },
295 { "tx_collide_6times" },
296 { "tx_collide_7times" },
297 { "tx_collide_8times" },
298 { "tx_collide_9times" },
299 { "tx_collide_10times" },
300 { "tx_collide_11times" },
301 { "tx_collide_12times" },
302 { "tx_collide_13times" },
303 { "tx_collide_14times" },
304 { "tx_collide_15times" },
305 { "tx_ucast_packets" },
306 { "tx_mcast_packets" },
307 { "tx_bcast_packets" },
308 { "tx_carrier_sense_errors" },
309 { "tx_discards" },
310 { "tx_errors" },
311
312 { "dma_writeq_full" },
313 { "dma_write_prioq_full" },
314 { "rxbds_empty" },
315 { "rx_discards" },
316 { "rx_errors" },
317 { "rx_threshold_hit" },
318
319 { "dma_readq_full" },
320 { "dma_read_prioq_full" },
321 { "tx_comp_queue_full" },
322
323 { "ring_set_send_prod_index" },
324 { "ring_status_update" },
325 { "nic_irqs" },
326 { "nic_avoided_irqs" },
327 { "nic_tx_threshold_hit" }
328};
329
Michael Chan4cafd3f2005-05-29 14:56:34 -0700330static struct {
331 const char string[ETH_GSTRING_LEN];
332} ethtool_test_keys[TG3_NUM_TEST] = {
333 { "nvram test (online) " },
334 { "link test (online) " },
335 { "register test (offline)" },
336 { "memory test (offline)" },
337 { "loopback test (offline)" },
338 { "interrupt test (offline)" },
339};
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342{
343 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
David S. Millerf47c11e2005-06-24 20:18:35 -0700344 spin_lock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
David S. Millerf47c11e2005-06-24 20:18:35 -0700347 spin_unlock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 } else {
349 writel(val, tp->regs + off);
350 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351 readl(tp->regs + off);
352 }
353}
354
355static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356{
357 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
David S. Millerf47c11e2005-06-24 20:18:35 -0700358 spin_lock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
David S. Millerf47c11e2005-06-24 20:18:35 -0700361 spin_unlock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 } else {
363 void __iomem *dest = tp->regs + off;
364 writel(val, dest);
365 readl(dest); /* always flush PCI write */
366 }
367}
368
369static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
370{
371 void __iomem *mbox = tp->regs + off;
372 writel(val, mbox);
373 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
374 readl(mbox);
375}
376
377static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378{
379 void __iomem *mbox = tp->regs + off;
380 writel(val, mbox);
381 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
382 writel(val, mbox);
383 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
384 readl(mbox);
385}
386
387#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
388#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
389#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
390
391#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
392#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
393#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
394#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
395#define tr32(reg) readl(tp->regs + (reg))
396#define tr16(reg) readw(tp->regs + (reg))
397#define tr8(reg) readb(tp->regs + (reg))
398
399static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400{
David S. Millerf47c11e2005-06-24 20:18:35 -0700401 spin_lock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404
405 /* Always leave this as zero. */
406 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -0700407 spin_unlock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
409
410static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411{
David S. Millerf47c11e2005-06-24 20:18:35 -0700412 spin_lock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415
416 /* Always leave this as zero. */
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -0700418 spin_unlock_bh(&tp->indirect_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
420
421static void tg3_disable_ints(struct tg3 *tp)
422{
423 tw32(TG3PCI_MISC_HOST_CTRL,
424 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427}
428
429static inline void tg3_cond_int(struct tg3 *tp)
430{
431 if (tp->hw_status->status & SD_STATUS_UPDATED)
432 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
433}
434
435static void tg3_enable_ints(struct tg3 *tp)
436{
Michael Chanbbe832c2005-06-24 20:20:04 -0700437 tp->irq_sync = 0;
438 wmb();
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 tw32(TG3PCI_MISC_HOST_CTRL,
441 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
David S. Millerfac9b832005-05-18 22:46:34 -0700442 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 tg3_cond_int(tp);
446}
447
Michael Chan04237dd2005-04-25 15:17:17 -0700448static inline unsigned int tg3_has_work(struct tg3 *tp)
449{
450 struct tg3_hw_status *sblk = tp->hw_status;
451 unsigned int work_exists = 0;
452
453 /* check for phy events */
454 if (!(tp->tg3_flags &
455 (TG3_FLAG_USE_LINKCHG_REG |
456 TG3_FLAG_POLL_SERDES))) {
457 if (sblk->status & SD_STATUS_LINK_CHG)
458 work_exists = 1;
459 }
460 /* check for RX/TX work to do */
461 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
462 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
463 work_exists = 1;
464
465 return work_exists;
466}
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700469 * similar to tg3_enable_ints, but it accurately determines whether there
470 * is new work pending and can return without flushing the PIO write
471 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 */
473static void tg3_restart_ints(struct tg3 *tp)
474{
475 tw32(TG3PCI_MISC_HOST_CTRL,
476 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
David S. Millerfac9b832005-05-18 22:46:34 -0700477 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 mmiowb();
480
David S. Millerfac9b832005-05-18 22:46:34 -0700481 /* When doing tagged status, this work check is unnecessary.
482 * The last_tag we write above tells the chip which piece of
483 * work we've completed.
484 */
485 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
486 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700487 tw32(HOSTCC_MODE, tp->coalesce_mode |
488 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490
491static inline void tg3_netif_stop(struct tg3 *tp)
492{
Michael Chanbbe832c2005-06-24 20:20:04 -0700493 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 netif_poll_disable(tp->dev);
495 netif_tx_disable(tp->dev);
496}
497
498static inline void tg3_netif_start(struct tg3 *tp)
499{
500 netif_wake_queue(tp->dev);
501 /* NOTE: unconditional netif_wake_queue is only appropriate
502 * so long as all callers are assured to have free tx slots
503 * (such as after tg3_init_hw)
504 */
505 netif_poll_enable(tp->dev);
David S. Millerf47c11e2005-06-24 20:18:35 -0700506 tp->hw_status->status |= SD_STATUS_UPDATED;
507 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508}
509
510static void tg3_switch_clocks(struct tg3 *tp)
511{
512 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
513 u32 orig_clock_ctrl;
514
Michael Chan4cf78e42005-07-25 12:29:19 -0700515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
516 return;
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 orig_clock_ctrl = clock_ctrl;
519 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
520 CLOCK_CTRL_CLKRUN_OENABLE |
521 0x1f);
522 tp->pci_clock_ctrl = clock_ctrl;
523
524 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
525 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
526 tw32_f(TG3PCI_CLOCK_CTRL,
527 clock_ctrl | CLOCK_CTRL_625_CORE);
528 udelay(40);
529 }
530 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
531 tw32_f(TG3PCI_CLOCK_CTRL,
532 clock_ctrl |
533 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
534 udelay(40);
535 tw32_f(TG3PCI_CLOCK_CTRL,
536 clock_ctrl | (CLOCK_CTRL_ALTCLK));
537 udelay(40);
538 }
539 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
540 udelay(40);
541}
542
543#define PHY_BUSY_LOOPS 5000
544
545static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
546{
547 u32 frame_val;
548 unsigned int loops;
549 int ret;
550
551 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
552 tw32_f(MAC_MI_MODE,
553 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
554 udelay(80);
555 }
556
557 *val = 0x0;
558
559 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
560 MI_COM_PHY_ADDR_MASK);
561 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
562 MI_COM_REG_ADDR_MASK);
563 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
564
565 tw32_f(MAC_MI_COM, frame_val);
566
567 loops = PHY_BUSY_LOOPS;
568 while (loops != 0) {
569 udelay(10);
570 frame_val = tr32(MAC_MI_COM);
571
572 if ((frame_val & MI_COM_BUSY) == 0) {
573 udelay(5);
574 frame_val = tr32(MAC_MI_COM);
575 break;
576 }
577 loops -= 1;
578 }
579
580 ret = -EBUSY;
581 if (loops != 0) {
582 *val = frame_val & MI_COM_DATA_MASK;
583 ret = 0;
584 }
585
586 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
587 tw32_f(MAC_MI_MODE, tp->mi_mode);
588 udelay(80);
589 }
590
591 return ret;
592}
593
594static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
595{
596 u32 frame_val;
597 unsigned int loops;
598 int ret;
599
600 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
601 tw32_f(MAC_MI_MODE,
602 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
603 udelay(80);
604 }
605
606 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
607 MI_COM_PHY_ADDR_MASK);
608 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
609 MI_COM_REG_ADDR_MASK);
610 frame_val |= (val & MI_COM_DATA_MASK);
611 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
612
613 tw32_f(MAC_MI_COM, frame_val);
614
615 loops = PHY_BUSY_LOOPS;
616 while (loops != 0) {
617 udelay(10);
618 frame_val = tr32(MAC_MI_COM);
619 if ((frame_val & MI_COM_BUSY) == 0) {
620 udelay(5);
621 frame_val = tr32(MAC_MI_COM);
622 break;
623 }
624 loops -= 1;
625 }
626
627 ret = -EBUSY;
628 if (loops != 0)
629 ret = 0;
630
631 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
632 tw32_f(MAC_MI_MODE, tp->mi_mode);
633 udelay(80);
634 }
635
636 return ret;
637}
638
639static void tg3_phy_set_wirespeed(struct tg3 *tp)
640{
641 u32 val;
642
643 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
644 return;
645
646 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
647 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
648 tg3_writephy(tp, MII_TG3_AUX_CTRL,
649 (val | (1 << 15) | (1 << 4)));
650}
651
652static int tg3_bmcr_reset(struct tg3 *tp)
653{
654 u32 phy_control;
655 int limit, err;
656
657 /* OK, reset it, and poll the BMCR_RESET bit until it
658 * clears or we time out.
659 */
660 phy_control = BMCR_RESET;
661 err = tg3_writephy(tp, MII_BMCR, phy_control);
662 if (err != 0)
663 return -EBUSY;
664
665 limit = 5000;
666 while (limit--) {
667 err = tg3_readphy(tp, MII_BMCR, &phy_control);
668 if (err != 0)
669 return -EBUSY;
670
671 if ((phy_control & BMCR_RESET) == 0) {
672 udelay(40);
673 break;
674 }
675 udelay(10);
676 }
677 if (limit <= 0)
678 return -EBUSY;
679
680 return 0;
681}
682
683static int tg3_wait_macro_done(struct tg3 *tp)
684{
685 int limit = 100;
686
687 while (limit--) {
688 u32 tmp32;
689
690 if (!tg3_readphy(tp, 0x16, &tmp32)) {
691 if ((tmp32 & 0x1000) == 0)
692 break;
693 }
694 }
695 if (limit <= 0)
696 return -EBUSY;
697
698 return 0;
699}
700
701static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
702{
703 static const u32 test_pat[4][6] = {
704 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
705 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
706 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
707 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
708 };
709 int chan;
710
711 for (chan = 0; chan < 4; chan++) {
712 int i;
713
714 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
715 (chan * 0x2000) | 0x0200);
716 tg3_writephy(tp, 0x16, 0x0002);
717
718 for (i = 0; i < 6; i++)
719 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
720 test_pat[chan][i]);
721
722 tg3_writephy(tp, 0x16, 0x0202);
723 if (tg3_wait_macro_done(tp)) {
724 *resetp = 1;
725 return -EBUSY;
726 }
727
728 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
729 (chan * 0x2000) | 0x0200);
730 tg3_writephy(tp, 0x16, 0x0082);
731 if (tg3_wait_macro_done(tp)) {
732 *resetp = 1;
733 return -EBUSY;
734 }
735
736 tg3_writephy(tp, 0x16, 0x0802);
737 if (tg3_wait_macro_done(tp)) {
738 *resetp = 1;
739 return -EBUSY;
740 }
741
742 for (i = 0; i < 6; i += 2) {
743 u32 low, high;
744
745 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
746 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
747 tg3_wait_macro_done(tp)) {
748 *resetp = 1;
749 return -EBUSY;
750 }
751 low &= 0x7fff;
752 high &= 0x000f;
753 if (low != test_pat[chan][i] ||
754 high != test_pat[chan][i+1]) {
755 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
756 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
757 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
758
759 return -EBUSY;
760 }
761 }
762 }
763
764 return 0;
765}
766
767static int tg3_phy_reset_chanpat(struct tg3 *tp)
768{
769 int chan;
770
771 for (chan = 0; chan < 4; chan++) {
772 int i;
773
774 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
775 (chan * 0x2000) | 0x0200);
776 tg3_writephy(tp, 0x16, 0x0002);
777 for (i = 0; i < 6; i++)
778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
779 tg3_writephy(tp, 0x16, 0x0202);
780 if (tg3_wait_macro_done(tp))
781 return -EBUSY;
782 }
783
784 return 0;
785}
786
787static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
788{
789 u32 reg32, phy9_orig;
790 int retries, do_phy_reset, err;
791
792 retries = 10;
793 do_phy_reset = 1;
794 do {
795 if (do_phy_reset) {
796 err = tg3_bmcr_reset(tp);
797 if (err)
798 return err;
799 do_phy_reset = 0;
800 }
801
802 /* Disable transmitter and interrupt. */
803 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
804 continue;
805
806 reg32 |= 0x3000;
807 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
808
809 /* Set full-duplex, 1000 mbps. */
810 tg3_writephy(tp, MII_BMCR,
811 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
812
813 /* Set to master mode. */
814 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
815 continue;
816
817 tg3_writephy(tp, MII_TG3_CTRL,
818 (MII_TG3_CTRL_AS_MASTER |
819 MII_TG3_CTRL_ENABLE_AS_MASTER));
820
821 /* Enable SM_DSP_CLOCK and 6dB. */
822 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
823
824 /* Block the PHY control access. */
825 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
826 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
827
828 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
829 if (!err)
830 break;
831 } while (--retries);
832
833 err = tg3_phy_reset_chanpat(tp);
834 if (err)
835 return err;
836
837 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
838 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
839
840 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
841 tg3_writephy(tp, 0x16, 0x0000);
842
843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
845 /* Set Extended packet length bit for jumbo frames */
846 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
847 }
848 else {
849 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
850 }
851
852 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
853
854 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
855 reg32 &= ~0x3000;
856 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
857 } else if (!err)
858 err = -EBUSY;
859
860 return err;
861}
862
863/* This will reset the tigon3 PHY if there is no valid
864 * link unless the FORCE argument is non-zero.
865 */
866static int tg3_phy_reset(struct tg3 *tp)
867{
868 u32 phy_status;
869 int err;
870
871 err = tg3_readphy(tp, MII_BMSR, &phy_status);
872 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
873 if (err != 0)
874 return -EBUSY;
875
876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
879 err = tg3_phy_reset_5703_4_5(tp);
880 if (err)
881 return err;
882 goto out;
883 }
884
885 err = tg3_bmcr_reset(tp);
886 if (err)
887 return err;
888
889out:
890 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
891 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
892 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
893 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
894 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
895 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
896 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
897 }
898 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
899 tg3_writephy(tp, 0x1c, 0x8d68);
900 tg3_writephy(tp, 0x1c, 0x8d68);
901 }
902 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
903 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
904 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
905 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
906 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
907 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
908 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
909 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
910 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
911 }
912 /* Set Extended packet length bit (bit 14) on all chips that */
913 /* support jumbo frames */
914 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
915 /* Cannot do read-modify-write on 5401 */
916 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -0700917 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 u32 phy_reg;
919
920 /* Set bit 14 with read-modify-write to preserve other bits */
921 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
922 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
923 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
924 }
925
926 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
927 * jumbo frames transmission.
928 */
Michael Chan0f893dc2005-07-25 12:30:38 -0700929 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 u32 phy_reg;
931
932 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
933 tg3_writephy(tp, MII_TG3_EXT_CTRL,
934 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
935 }
936
937 tg3_phy_set_wirespeed(tp);
938 return 0;
939}
940
941static void tg3_frob_aux_power(struct tg3 *tp)
942{
943 struct tg3 *tp_peer = tp;
944
945 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
946 return;
947
948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
949 tp_peer = pci_get_drvdata(tp->pdev_peer);
950 if (!tp_peer)
951 BUG();
952 }
953
954
955 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
956 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
959 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960 (GRC_LCLCTRL_GPIO_OE0 |
961 GRC_LCLCTRL_GPIO_OE1 |
962 GRC_LCLCTRL_GPIO_OE2 |
963 GRC_LCLCTRL_GPIO_OUTPUT0 |
964 GRC_LCLCTRL_GPIO_OUTPUT1));
965 udelay(100);
966 } else {
967 u32 no_gpio2;
968 u32 grc_local_ctrl;
969
970 if (tp_peer != tp &&
971 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
972 return;
973
974 /* On 5753 and variants, GPIO2 cannot be used. */
975 no_gpio2 = tp->nic_sram_data_cfg &
976 NIC_SRAM_DATA_CFG_NO_GPIO2;
977
978 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
979 GRC_LCLCTRL_GPIO_OE1 |
980 GRC_LCLCTRL_GPIO_OE2 |
981 GRC_LCLCTRL_GPIO_OUTPUT1 |
982 GRC_LCLCTRL_GPIO_OUTPUT2;
983 if (no_gpio2) {
984 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
985 GRC_LCLCTRL_GPIO_OUTPUT2);
986 }
987 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
988 grc_local_ctrl);
989 udelay(100);
990
991 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
992
993 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
994 grc_local_ctrl);
995 udelay(100);
996
997 if (!no_gpio2) {
998 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
999 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000 grc_local_ctrl);
1001 udelay(100);
1002 }
1003 }
1004 } else {
1005 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1006 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1007 if (tp_peer != tp &&
1008 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1009 return;
1010
1011 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1012 (GRC_LCLCTRL_GPIO_OE1 |
1013 GRC_LCLCTRL_GPIO_OUTPUT1));
1014 udelay(100);
1015
1016 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1017 (GRC_LCLCTRL_GPIO_OE1));
1018 udelay(100);
1019
1020 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021 (GRC_LCLCTRL_GPIO_OE1 |
1022 GRC_LCLCTRL_GPIO_OUTPUT1));
1023 udelay(100);
1024 }
1025 }
1026}
1027
1028static int tg3_setup_phy(struct tg3 *, int);
1029
1030#define RESET_KIND_SHUTDOWN 0
1031#define RESET_KIND_INIT 1
1032#define RESET_KIND_SUSPEND 2
1033
1034static void tg3_write_sig_post_reset(struct tg3 *, int);
1035static int tg3_halt_cpu(struct tg3 *, u32);
1036
1037static int tg3_set_power_state(struct tg3 *tp, int state)
1038{
1039 u32 misc_host_ctrl;
1040 u16 power_control, power_caps;
1041 int pm = tp->pm_cap;
1042
1043 /* Make sure register accesses (indirect or otherwise)
1044 * will function correctly.
1045 */
1046 pci_write_config_dword(tp->pdev,
1047 TG3PCI_MISC_HOST_CTRL,
1048 tp->misc_host_ctrl);
1049
1050 pci_read_config_word(tp->pdev,
1051 pm + PCI_PM_CTRL,
1052 &power_control);
1053 power_control |= PCI_PM_CTRL_PME_STATUS;
1054 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1055 switch (state) {
1056 case 0:
1057 power_control |= 0;
1058 pci_write_config_word(tp->pdev,
1059 pm + PCI_PM_CTRL,
1060 power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001061 udelay(100); /* Delay after power state change */
1062
1063 /* Switch out of Vaux if it is not a LOM */
1064 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1065 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1066 udelay(100);
1067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069 return 0;
1070
1071 case 1:
1072 power_control |= 1;
1073 break;
1074
1075 case 2:
1076 power_control |= 2;
1077 break;
1078
1079 case 3:
1080 power_control |= 3;
1081 break;
1082
1083 default:
1084 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1085 "requested.\n",
1086 tp->dev->name, state);
1087 return -EINVAL;
1088 };
1089
1090 power_control |= PCI_PM_CTRL_PME_ENABLE;
1091
1092 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1093 tw32(TG3PCI_MISC_HOST_CTRL,
1094 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1095
1096 if (tp->link_config.phy_is_low_power == 0) {
1097 tp->link_config.phy_is_low_power = 1;
1098 tp->link_config.orig_speed = tp->link_config.speed;
1099 tp->link_config.orig_duplex = tp->link_config.duplex;
1100 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1101 }
1102
Michael Chan747e8f82005-07-25 12:33:22 -07001103 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 tp->link_config.speed = SPEED_10;
1105 tp->link_config.duplex = DUPLEX_HALF;
1106 tp->link_config.autoneg = AUTONEG_ENABLE;
1107 tg3_setup_phy(tp, 0);
1108 }
1109
1110 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1111
1112 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1113 u32 mac_mode;
1114
1115 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1116 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1117 udelay(40);
1118
1119 mac_mode = MAC_MODE_PORT_MODE_MII;
1120
1121 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1122 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1123 mac_mode |= MAC_MODE_LINK_POLARITY;
1124 } else {
1125 mac_mode = MAC_MODE_PORT_MODE_TBI;
1126 }
1127
John W. Linvillecbf46852005-04-21 17:01:29 -07001128 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 tw32(MAC_LED_CTRL, tp->led_ctrl);
1130
1131 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1132 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1133 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1134
1135 tw32_f(MAC_MODE, mac_mode);
1136 udelay(100);
1137
1138 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1139 udelay(10);
1140 }
1141
1142 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1143 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1145 u32 base_val;
1146
1147 base_val = tp->pci_clock_ctrl;
1148 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1149 CLOCK_CTRL_TXCLK_DISABLE);
1150
1151 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1152 CLOCK_CTRL_ALTCLK |
1153 CLOCK_CTRL_PWRDOWN_PLL133);
1154 udelay(40);
Michael Chan4cf78e42005-07-25 12:29:19 -07001155 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1156 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07001157 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1159 u32 newbits1, newbits2;
1160
1161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1164 CLOCK_CTRL_TXCLK_DISABLE |
1165 CLOCK_CTRL_ALTCLK);
1166 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1167 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1168 newbits1 = CLOCK_CTRL_625_CORE;
1169 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1170 } else {
1171 newbits1 = CLOCK_CTRL_ALTCLK;
1172 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1173 }
1174
1175 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1176 udelay(40);
1177
1178 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1179 udelay(40);
1180
1181 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1182 u32 newbits3;
1183
1184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1186 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1187 CLOCK_CTRL_TXCLK_DISABLE |
1188 CLOCK_CTRL_44MHZ_CORE);
1189 } else {
1190 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1191 }
1192
1193 tw32_f(TG3PCI_CLOCK_CTRL,
1194 tp->pci_clock_ctrl | newbits3);
1195 udelay(40);
1196 }
1197 }
1198
1199 tg3_frob_aux_power(tp);
1200
1201 /* Workaround for unstable PLL clock */
1202 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1203 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1204 u32 val = tr32(0x7d00);
1205
1206 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1207 tw32(0x7d00, val);
1208 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1209 tg3_halt_cpu(tp, RX_CPU_BASE);
1210 }
1211
1212 /* Finally, set the new power state. */
1213 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001214 udelay(100); /* Delay after power state change */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1217
1218 return 0;
1219}
1220
1221static void tg3_link_report(struct tg3 *tp)
1222{
1223 if (!netif_carrier_ok(tp->dev)) {
1224 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1225 } else {
1226 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1227 tp->dev->name,
1228 (tp->link_config.active_speed == SPEED_1000 ?
1229 1000 :
1230 (tp->link_config.active_speed == SPEED_100 ?
1231 100 : 10)),
1232 (tp->link_config.active_duplex == DUPLEX_FULL ?
1233 "full" : "half"));
1234
1235 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1236 "%s for RX.\n",
1237 tp->dev->name,
1238 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1239 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1240 }
1241}
1242
1243static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1244{
1245 u32 new_tg3_flags = 0;
1246 u32 old_rx_mode = tp->rx_mode;
1247 u32 old_tx_mode = tp->tx_mode;
1248
1249 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
Michael Chan747e8f82005-07-25 12:33:22 -07001250
1251 /* Convert 1000BaseX flow control bits to 1000BaseT
1252 * bits before resolving flow control.
1253 */
1254 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1255 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1256 ADVERTISE_PAUSE_ASYM);
1257 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1258
1259 if (local_adv & ADVERTISE_1000XPAUSE)
1260 local_adv |= ADVERTISE_PAUSE_CAP;
1261 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1262 local_adv |= ADVERTISE_PAUSE_ASYM;
1263 if (remote_adv & LPA_1000XPAUSE)
1264 remote_adv |= LPA_PAUSE_CAP;
1265 if (remote_adv & LPA_1000XPAUSE_ASYM)
1266 remote_adv |= LPA_PAUSE_ASYM;
1267 }
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 if (local_adv & ADVERTISE_PAUSE_CAP) {
1270 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1271 if (remote_adv & LPA_PAUSE_CAP)
1272 new_tg3_flags |=
1273 (TG3_FLAG_RX_PAUSE |
1274 TG3_FLAG_TX_PAUSE);
1275 else if (remote_adv & LPA_PAUSE_ASYM)
1276 new_tg3_flags |=
1277 (TG3_FLAG_RX_PAUSE);
1278 } else {
1279 if (remote_adv & LPA_PAUSE_CAP)
1280 new_tg3_flags |=
1281 (TG3_FLAG_RX_PAUSE |
1282 TG3_FLAG_TX_PAUSE);
1283 }
1284 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1285 if ((remote_adv & LPA_PAUSE_CAP) &&
1286 (remote_adv & LPA_PAUSE_ASYM))
1287 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1288 }
1289
1290 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1291 tp->tg3_flags |= new_tg3_flags;
1292 } else {
1293 new_tg3_flags = tp->tg3_flags;
1294 }
1295
1296 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1297 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1298 else
1299 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1300
1301 if (old_rx_mode != tp->rx_mode) {
1302 tw32_f(MAC_RX_MODE, tp->rx_mode);
1303 }
1304
1305 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1306 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1307 else
1308 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1309
1310 if (old_tx_mode != tp->tx_mode) {
1311 tw32_f(MAC_TX_MODE, tp->tx_mode);
1312 }
1313}
1314
1315static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1316{
1317 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1318 case MII_TG3_AUX_STAT_10HALF:
1319 *speed = SPEED_10;
1320 *duplex = DUPLEX_HALF;
1321 break;
1322
1323 case MII_TG3_AUX_STAT_10FULL:
1324 *speed = SPEED_10;
1325 *duplex = DUPLEX_FULL;
1326 break;
1327
1328 case MII_TG3_AUX_STAT_100HALF:
1329 *speed = SPEED_100;
1330 *duplex = DUPLEX_HALF;
1331 break;
1332
1333 case MII_TG3_AUX_STAT_100FULL:
1334 *speed = SPEED_100;
1335 *duplex = DUPLEX_FULL;
1336 break;
1337
1338 case MII_TG3_AUX_STAT_1000HALF:
1339 *speed = SPEED_1000;
1340 *duplex = DUPLEX_HALF;
1341 break;
1342
1343 case MII_TG3_AUX_STAT_1000FULL:
1344 *speed = SPEED_1000;
1345 *duplex = DUPLEX_FULL;
1346 break;
1347
1348 default:
1349 *speed = SPEED_INVALID;
1350 *duplex = DUPLEX_INVALID;
1351 break;
1352 };
1353}
1354
1355static void tg3_phy_copper_begin(struct tg3 *tp)
1356{
1357 u32 new_adv;
1358 int i;
1359
1360 if (tp->link_config.phy_is_low_power) {
1361 /* Entering low power mode. Disable gigabit and
1362 * 100baseT advertisements.
1363 */
1364 tg3_writephy(tp, MII_TG3_CTRL, 0);
1365
1366 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1367 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1368 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1369 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1370
1371 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1372 } else if (tp->link_config.speed == SPEED_INVALID) {
1373 tp->link_config.advertising =
1374 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1375 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1376 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1377 ADVERTISED_Autoneg | ADVERTISED_MII);
1378
1379 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1380 tp->link_config.advertising &=
1381 ~(ADVERTISED_1000baseT_Half |
1382 ADVERTISED_1000baseT_Full);
1383
1384 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1385 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1386 new_adv |= ADVERTISE_10HALF;
1387 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1388 new_adv |= ADVERTISE_10FULL;
1389 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1390 new_adv |= ADVERTISE_100HALF;
1391 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1392 new_adv |= ADVERTISE_100FULL;
1393 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1394
1395 if (tp->link_config.advertising &
1396 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1397 new_adv = 0;
1398 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1399 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1400 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1401 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1402 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1403 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1404 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1405 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1406 MII_TG3_CTRL_ENABLE_AS_MASTER);
1407 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1408 } else {
1409 tg3_writephy(tp, MII_TG3_CTRL, 0);
1410 }
1411 } else {
1412 /* Asking for a specific link mode. */
1413 if (tp->link_config.speed == SPEED_1000) {
1414 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1415 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1416
1417 if (tp->link_config.duplex == DUPLEX_FULL)
1418 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1419 else
1420 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1421 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1422 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1423 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1424 MII_TG3_CTRL_ENABLE_AS_MASTER);
1425 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1426 } else {
1427 tg3_writephy(tp, MII_TG3_CTRL, 0);
1428
1429 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1430 if (tp->link_config.speed == SPEED_100) {
1431 if (tp->link_config.duplex == DUPLEX_FULL)
1432 new_adv |= ADVERTISE_100FULL;
1433 else
1434 new_adv |= ADVERTISE_100HALF;
1435 } else {
1436 if (tp->link_config.duplex == DUPLEX_FULL)
1437 new_adv |= ADVERTISE_10FULL;
1438 else
1439 new_adv |= ADVERTISE_10HALF;
1440 }
1441 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1442 }
1443 }
1444
1445 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1446 tp->link_config.speed != SPEED_INVALID) {
1447 u32 bmcr, orig_bmcr;
1448
1449 tp->link_config.active_speed = tp->link_config.speed;
1450 tp->link_config.active_duplex = tp->link_config.duplex;
1451
1452 bmcr = 0;
1453 switch (tp->link_config.speed) {
1454 default:
1455 case SPEED_10:
1456 break;
1457
1458 case SPEED_100:
1459 bmcr |= BMCR_SPEED100;
1460 break;
1461
1462 case SPEED_1000:
1463 bmcr |= TG3_BMCR_SPEED1000;
1464 break;
1465 };
1466
1467 if (tp->link_config.duplex == DUPLEX_FULL)
1468 bmcr |= BMCR_FULLDPLX;
1469
1470 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1471 (bmcr != orig_bmcr)) {
1472 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1473 for (i = 0; i < 1500; i++) {
1474 u32 tmp;
1475
1476 udelay(10);
1477 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1478 tg3_readphy(tp, MII_BMSR, &tmp))
1479 continue;
1480 if (!(tmp & BMSR_LSTATUS)) {
1481 udelay(40);
1482 break;
1483 }
1484 }
1485 tg3_writephy(tp, MII_BMCR, bmcr);
1486 udelay(40);
1487 }
1488 } else {
1489 tg3_writephy(tp, MII_BMCR,
1490 BMCR_ANENABLE | BMCR_ANRESTART);
1491 }
1492}
1493
1494static int tg3_init_5401phy_dsp(struct tg3 *tp)
1495{
1496 int err;
1497
1498 /* Turn off tap power management. */
1499 /* Set Extended packet length bit */
1500 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1501
1502 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1503 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1504
1505 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1506 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1507
1508 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1509 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1510
1511 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1512 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1513
1514 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1515 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1516
1517 udelay(40);
1518
1519 return err;
1520}
1521
1522static int tg3_copper_is_advertising_all(struct tg3 *tp)
1523{
1524 u32 adv_reg, all_mask;
1525
1526 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1527 return 0;
1528
1529 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1530 ADVERTISE_100HALF | ADVERTISE_100FULL);
1531 if ((adv_reg & all_mask) != all_mask)
1532 return 0;
1533 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1534 u32 tg3_ctrl;
1535
1536 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1537 return 0;
1538
1539 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1540 MII_TG3_CTRL_ADV_1000_FULL);
1541 if ((tg3_ctrl & all_mask) != all_mask)
1542 return 0;
1543 }
1544 return 1;
1545}
1546
1547static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1548{
1549 int current_link_up;
1550 u32 bmsr, dummy;
1551 u16 current_speed;
1552 u8 current_duplex;
1553 int i, err;
1554
1555 tw32(MAC_EVENT, 0);
1556
1557 tw32_f(MAC_STATUS,
1558 (MAC_STATUS_SYNC_CHANGED |
1559 MAC_STATUS_CFG_CHANGED |
1560 MAC_STATUS_MI_COMPLETION |
1561 MAC_STATUS_LNKSTATE_CHANGED));
1562 udelay(40);
1563
1564 tp->mi_mode = MAC_MI_MODE_BASE;
1565 tw32_f(MAC_MI_MODE, tp->mi_mode);
1566 udelay(80);
1567
1568 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1569
1570 /* Some third-party PHYs need to be reset on link going
1571 * down.
1572 */
1573 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1576 netif_carrier_ok(tp->dev)) {
1577 tg3_readphy(tp, MII_BMSR, &bmsr);
1578 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1579 !(bmsr & BMSR_LSTATUS))
1580 force_reset = 1;
1581 }
1582 if (force_reset)
1583 tg3_phy_reset(tp);
1584
1585 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1586 tg3_readphy(tp, MII_BMSR, &bmsr);
1587 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1588 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1589 bmsr = 0;
1590
1591 if (!(bmsr & BMSR_LSTATUS)) {
1592 err = tg3_init_5401phy_dsp(tp);
1593 if (err)
1594 return err;
1595
1596 tg3_readphy(tp, MII_BMSR, &bmsr);
1597 for (i = 0; i < 1000; i++) {
1598 udelay(10);
1599 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1600 (bmsr & BMSR_LSTATUS)) {
1601 udelay(40);
1602 break;
1603 }
1604 }
1605
1606 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1607 !(bmsr & BMSR_LSTATUS) &&
1608 tp->link_config.active_speed == SPEED_1000) {
1609 err = tg3_phy_reset(tp);
1610 if (!err)
1611 err = tg3_init_5401phy_dsp(tp);
1612 if (err)
1613 return err;
1614 }
1615 }
1616 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1617 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1618 /* 5701 {A0,B0} CRC bug workaround */
1619 tg3_writephy(tp, 0x15, 0x0a75);
1620 tg3_writephy(tp, 0x1c, 0x8c68);
1621 tg3_writephy(tp, 0x1c, 0x8d68);
1622 tg3_writephy(tp, 0x1c, 0x8c68);
1623 }
1624
1625 /* Clear pending interrupts... */
1626 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1627 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1628
1629 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1630 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1631 else
1632 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1633
1634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1636 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1637 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1638 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1639 else
1640 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1641 }
1642
1643 current_link_up = 0;
1644 current_speed = SPEED_INVALID;
1645 current_duplex = DUPLEX_INVALID;
1646
1647 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1648 u32 val;
1649
1650 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1651 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1652 if (!(val & (1 << 10))) {
1653 val |= (1 << 10);
1654 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1655 goto relink;
1656 }
1657 }
1658
1659 bmsr = 0;
1660 for (i = 0; i < 100; i++) {
1661 tg3_readphy(tp, MII_BMSR, &bmsr);
1662 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1663 (bmsr & BMSR_LSTATUS))
1664 break;
1665 udelay(40);
1666 }
1667
1668 if (bmsr & BMSR_LSTATUS) {
1669 u32 aux_stat, bmcr;
1670
1671 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1672 for (i = 0; i < 2000; i++) {
1673 udelay(10);
1674 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1675 aux_stat)
1676 break;
1677 }
1678
1679 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1680 &current_speed,
1681 &current_duplex);
1682
1683 bmcr = 0;
1684 for (i = 0; i < 200; i++) {
1685 tg3_readphy(tp, MII_BMCR, &bmcr);
1686 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1687 continue;
1688 if (bmcr && bmcr != 0x7fff)
1689 break;
1690 udelay(10);
1691 }
1692
1693 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1694 if (bmcr & BMCR_ANENABLE) {
1695 current_link_up = 1;
1696
1697 /* Force autoneg restart if we are exiting
1698 * low power mode.
1699 */
1700 if (!tg3_copper_is_advertising_all(tp))
1701 current_link_up = 0;
1702 } else {
1703 current_link_up = 0;
1704 }
1705 } else {
1706 if (!(bmcr & BMCR_ANENABLE) &&
1707 tp->link_config.speed == current_speed &&
1708 tp->link_config.duplex == current_duplex) {
1709 current_link_up = 1;
1710 } else {
1711 current_link_up = 0;
1712 }
1713 }
1714
1715 tp->link_config.active_speed = current_speed;
1716 tp->link_config.active_duplex = current_duplex;
1717 }
1718
1719 if (current_link_up == 1 &&
1720 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1721 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1722 u32 local_adv, remote_adv;
1723
1724 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1725 local_adv = 0;
1726 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1727
1728 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1729 remote_adv = 0;
1730
1731 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1732
1733 /* If we are not advertising full pause capability,
1734 * something is wrong. Bring the link down and reconfigure.
1735 */
1736 if (local_adv != ADVERTISE_PAUSE_CAP) {
1737 current_link_up = 0;
1738 } else {
1739 tg3_setup_flow_control(tp, local_adv, remote_adv);
1740 }
1741 }
1742relink:
1743 if (current_link_up == 0) {
1744 u32 tmp;
1745
1746 tg3_phy_copper_begin(tp);
1747
1748 tg3_readphy(tp, MII_BMSR, &tmp);
1749 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1750 (tmp & BMSR_LSTATUS))
1751 current_link_up = 1;
1752 }
1753
1754 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1755 if (current_link_up == 1) {
1756 if (tp->link_config.active_speed == SPEED_100 ||
1757 tp->link_config.active_speed == SPEED_10)
1758 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1759 else
1760 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1761 } else
1762 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1763
1764 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1765 if (tp->link_config.active_duplex == DUPLEX_HALF)
1766 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1767
1768 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1770 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1771 (current_link_up == 1 &&
1772 tp->link_config.active_speed == SPEED_10))
1773 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1774 } else {
1775 if (current_link_up == 1)
1776 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1777 }
1778
1779 /* ??? Without this setting Netgear GA302T PHY does not
1780 * ??? send/receive packets...
1781 */
1782 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1783 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1784 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1785 tw32_f(MAC_MI_MODE, tp->mi_mode);
1786 udelay(80);
1787 }
1788
1789 tw32_f(MAC_MODE, tp->mac_mode);
1790 udelay(40);
1791
1792 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1793 /* Polled via timer. */
1794 tw32_f(MAC_EVENT, 0);
1795 } else {
1796 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1797 }
1798 udelay(40);
1799
1800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1801 current_link_up == 1 &&
1802 tp->link_config.active_speed == SPEED_1000 &&
1803 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1804 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1805 udelay(120);
1806 tw32_f(MAC_STATUS,
1807 (MAC_STATUS_SYNC_CHANGED |
1808 MAC_STATUS_CFG_CHANGED));
1809 udelay(40);
1810 tg3_write_mem(tp,
1811 NIC_SRAM_FIRMWARE_MBOX,
1812 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1813 }
1814
1815 if (current_link_up != netif_carrier_ok(tp->dev)) {
1816 if (current_link_up)
1817 netif_carrier_on(tp->dev);
1818 else
1819 netif_carrier_off(tp->dev);
1820 tg3_link_report(tp);
1821 }
1822
1823 return 0;
1824}
1825
1826struct tg3_fiber_aneginfo {
1827 int state;
1828#define ANEG_STATE_UNKNOWN 0
1829#define ANEG_STATE_AN_ENABLE 1
1830#define ANEG_STATE_RESTART_INIT 2
1831#define ANEG_STATE_RESTART 3
1832#define ANEG_STATE_DISABLE_LINK_OK 4
1833#define ANEG_STATE_ABILITY_DETECT_INIT 5
1834#define ANEG_STATE_ABILITY_DETECT 6
1835#define ANEG_STATE_ACK_DETECT_INIT 7
1836#define ANEG_STATE_ACK_DETECT 8
1837#define ANEG_STATE_COMPLETE_ACK_INIT 9
1838#define ANEG_STATE_COMPLETE_ACK 10
1839#define ANEG_STATE_IDLE_DETECT_INIT 11
1840#define ANEG_STATE_IDLE_DETECT 12
1841#define ANEG_STATE_LINK_OK 13
1842#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1843#define ANEG_STATE_NEXT_PAGE_WAIT 15
1844
1845 u32 flags;
1846#define MR_AN_ENABLE 0x00000001
1847#define MR_RESTART_AN 0x00000002
1848#define MR_AN_COMPLETE 0x00000004
1849#define MR_PAGE_RX 0x00000008
1850#define MR_NP_LOADED 0x00000010
1851#define MR_TOGGLE_TX 0x00000020
1852#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1853#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1854#define MR_LP_ADV_SYM_PAUSE 0x00000100
1855#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1856#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1857#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1858#define MR_LP_ADV_NEXT_PAGE 0x00001000
1859#define MR_TOGGLE_RX 0x00002000
1860#define MR_NP_RX 0x00004000
1861
1862#define MR_LINK_OK 0x80000000
1863
1864 unsigned long link_time, cur_time;
1865
1866 u32 ability_match_cfg;
1867 int ability_match_count;
1868
1869 char ability_match, idle_match, ack_match;
1870
1871 u32 txconfig, rxconfig;
1872#define ANEG_CFG_NP 0x00000080
1873#define ANEG_CFG_ACK 0x00000040
1874#define ANEG_CFG_RF2 0x00000020
1875#define ANEG_CFG_RF1 0x00000010
1876#define ANEG_CFG_PS2 0x00000001
1877#define ANEG_CFG_PS1 0x00008000
1878#define ANEG_CFG_HD 0x00004000
1879#define ANEG_CFG_FD 0x00002000
1880#define ANEG_CFG_INVAL 0x00001f06
1881
1882};
1883#define ANEG_OK 0
1884#define ANEG_DONE 1
1885#define ANEG_TIMER_ENAB 2
1886#define ANEG_FAILED -1
1887
1888#define ANEG_STATE_SETTLE_TIME 10000
1889
1890static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1891 struct tg3_fiber_aneginfo *ap)
1892{
1893 unsigned long delta;
1894 u32 rx_cfg_reg;
1895 int ret;
1896
1897 if (ap->state == ANEG_STATE_UNKNOWN) {
1898 ap->rxconfig = 0;
1899 ap->link_time = 0;
1900 ap->cur_time = 0;
1901 ap->ability_match_cfg = 0;
1902 ap->ability_match_count = 0;
1903 ap->ability_match = 0;
1904 ap->idle_match = 0;
1905 ap->ack_match = 0;
1906 }
1907 ap->cur_time++;
1908
1909 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1910 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1911
1912 if (rx_cfg_reg != ap->ability_match_cfg) {
1913 ap->ability_match_cfg = rx_cfg_reg;
1914 ap->ability_match = 0;
1915 ap->ability_match_count = 0;
1916 } else {
1917 if (++ap->ability_match_count > 1) {
1918 ap->ability_match = 1;
1919 ap->ability_match_cfg = rx_cfg_reg;
1920 }
1921 }
1922 if (rx_cfg_reg & ANEG_CFG_ACK)
1923 ap->ack_match = 1;
1924 else
1925 ap->ack_match = 0;
1926
1927 ap->idle_match = 0;
1928 } else {
1929 ap->idle_match = 1;
1930 ap->ability_match_cfg = 0;
1931 ap->ability_match_count = 0;
1932 ap->ability_match = 0;
1933 ap->ack_match = 0;
1934
1935 rx_cfg_reg = 0;
1936 }
1937
1938 ap->rxconfig = rx_cfg_reg;
1939 ret = ANEG_OK;
1940
1941 switch(ap->state) {
1942 case ANEG_STATE_UNKNOWN:
1943 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1944 ap->state = ANEG_STATE_AN_ENABLE;
1945
1946 /* fallthru */
1947 case ANEG_STATE_AN_ENABLE:
1948 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1949 if (ap->flags & MR_AN_ENABLE) {
1950 ap->link_time = 0;
1951 ap->cur_time = 0;
1952 ap->ability_match_cfg = 0;
1953 ap->ability_match_count = 0;
1954 ap->ability_match = 0;
1955 ap->idle_match = 0;
1956 ap->ack_match = 0;
1957
1958 ap->state = ANEG_STATE_RESTART_INIT;
1959 } else {
1960 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1961 }
1962 break;
1963
1964 case ANEG_STATE_RESTART_INIT:
1965 ap->link_time = ap->cur_time;
1966 ap->flags &= ~(MR_NP_LOADED);
1967 ap->txconfig = 0;
1968 tw32(MAC_TX_AUTO_NEG, 0);
1969 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1970 tw32_f(MAC_MODE, tp->mac_mode);
1971 udelay(40);
1972
1973 ret = ANEG_TIMER_ENAB;
1974 ap->state = ANEG_STATE_RESTART;
1975
1976 /* fallthru */
1977 case ANEG_STATE_RESTART:
1978 delta = ap->cur_time - ap->link_time;
1979 if (delta > ANEG_STATE_SETTLE_TIME) {
1980 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1981 } else {
1982 ret = ANEG_TIMER_ENAB;
1983 }
1984 break;
1985
1986 case ANEG_STATE_DISABLE_LINK_OK:
1987 ret = ANEG_DONE;
1988 break;
1989
1990 case ANEG_STATE_ABILITY_DETECT_INIT:
1991 ap->flags &= ~(MR_TOGGLE_TX);
1992 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1993 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1994 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1995 tw32_f(MAC_MODE, tp->mac_mode);
1996 udelay(40);
1997
1998 ap->state = ANEG_STATE_ABILITY_DETECT;
1999 break;
2000
2001 case ANEG_STATE_ABILITY_DETECT:
2002 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2003 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2004 }
2005 break;
2006
2007 case ANEG_STATE_ACK_DETECT_INIT:
2008 ap->txconfig |= ANEG_CFG_ACK;
2009 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2010 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2011 tw32_f(MAC_MODE, tp->mac_mode);
2012 udelay(40);
2013
2014 ap->state = ANEG_STATE_ACK_DETECT;
2015
2016 /* fallthru */
2017 case ANEG_STATE_ACK_DETECT:
2018 if (ap->ack_match != 0) {
2019 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2020 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2021 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2022 } else {
2023 ap->state = ANEG_STATE_AN_ENABLE;
2024 }
2025 } else if (ap->ability_match != 0 &&
2026 ap->rxconfig == 0) {
2027 ap->state = ANEG_STATE_AN_ENABLE;
2028 }
2029 break;
2030
2031 case ANEG_STATE_COMPLETE_ACK_INIT:
2032 if (ap->rxconfig & ANEG_CFG_INVAL) {
2033 ret = ANEG_FAILED;
2034 break;
2035 }
2036 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2037 MR_LP_ADV_HALF_DUPLEX |
2038 MR_LP_ADV_SYM_PAUSE |
2039 MR_LP_ADV_ASYM_PAUSE |
2040 MR_LP_ADV_REMOTE_FAULT1 |
2041 MR_LP_ADV_REMOTE_FAULT2 |
2042 MR_LP_ADV_NEXT_PAGE |
2043 MR_TOGGLE_RX |
2044 MR_NP_RX);
2045 if (ap->rxconfig & ANEG_CFG_FD)
2046 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2047 if (ap->rxconfig & ANEG_CFG_HD)
2048 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2049 if (ap->rxconfig & ANEG_CFG_PS1)
2050 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2051 if (ap->rxconfig & ANEG_CFG_PS2)
2052 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2053 if (ap->rxconfig & ANEG_CFG_RF1)
2054 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2055 if (ap->rxconfig & ANEG_CFG_RF2)
2056 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2057 if (ap->rxconfig & ANEG_CFG_NP)
2058 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2059
2060 ap->link_time = ap->cur_time;
2061
2062 ap->flags ^= (MR_TOGGLE_TX);
2063 if (ap->rxconfig & 0x0008)
2064 ap->flags |= MR_TOGGLE_RX;
2065 if (ap->rxconfig & ANEG_CFG_NP)
2066 ap->flags |= MR_NP_RX;
2067 ap->flags |= MR_PAGE_RX;
2068
2069 ap->state = ANEG_STATE_COMPLETE_ACK;
2070 ret = ANEG_TIMER_ENAB;
2071 break;
2072
2073 case ANEG_STATE_COMPLETE_ACK:
2074 if (ap->ability_match != 0 &&
2075 ap->rxconfig == 0) {
2076 ap->state = ANEG_STATE_AN_ENABLE;
2077 break;
2078 }
2079 delta = ap->cur_time - ap->link_time;
2080 if (delta > ANEG_STATE_SETTLE_TIME) {
2081 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2082 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2083 } else {
2084 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2085 !(ap->flags & MR_NP_RX)) {
2086 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2087 } else {
2088 ret = ANEG_FAILED;
2089 }
2090 }
2091 }
2092 break;
2093
2094 case ANEG_STATE_IDLE_DETECT_INIT:
2095 ap->link_time = ap->cur_time;
2096 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2097 tw32_f(MAC_MODE, tp->mac_mode);
2098 udelay(40);
2099
2100 ap->state = ANEG_STATE_IDLE_DETECT;
2101 ret = ANEG_TIMER_ENAB;
2102 break;
2103
2104 case ANEG_STATE_IDLE_DETECT:
2105 if (ap->ability_match != 0 &&
2106 ap->rxconfig == 0) {
2107 ap->state = ANEG_STATE_AN_ENABLE;
2108 break;
2109 }
2110 delta = ap->cur_time - ap->link_time;
2111 if (delta > ANEG_STATE_SETTLE_TIME) {
2112 /* XXX another gem from the Broadcom driver :( */
2113 ap->state = ANEG_STATE_LINK_OK;
2114 }
2115 break;
2116
2117 case ANEG_STATE_LINK_OK:
2118 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2119 ret = ANEG_DONE;
2120 break;
2121
2122 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2123 /* ??? unimplemented */
2124 break;
2125
2126 case ANEG_STATE_NEXT_PAGE_WAIT:
2127 /* ??? unimplemented */
2128 break;
2129
2130 default:
2131 ret = ANEG_FAILED;
2132 break;
2133 };
2134
2135 return ret;
2136}
2137
2138static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2139{
2140 int res = 0;
2141 struct tg3_fiber_aneginfo aninfo;
2142 int status = ANEG_FAILED;
2143 unsigned int tick;
2144 u32 tmp;
2145
2146 tw32_f(MAC_TX_AUTO_NEG, 0);
2147
2148 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2149 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2150 udelay(40);
2151
2152 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2153 udelay(40);
2154
2155 memset(&aninfo, 0, sizeof(aninfo));
2156 aninfo.flags |= MR_AN_ENABLE;
2157 aninfo.state = ANEG_STATE_UNKNOWN;
2158 aninfo.cur_time = 0;
2159 tick = 0;
2160 while (++tick < 195000) {
2161 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2162 if (status == ANEG_DONE || status == ANEG_FAILED)
2163 break;
2164
2165 udelay(1);
2166 }
2167
2168 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2169 tw32_f(MAC_MODE, tp->mac_mode);
2170 udelay(40);
2171
2172 *flags = aninfo.flags;
2173
2174 if (status == ANEG_DONE &&
2175 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2176 MR_LP_ADV_FULL_DUPLEX)))
2177 res = 1;
2178
2179 return res;
2180}
2181
2182static void tg3_init_bcm8002(struct tg3 *tp)
2183{
2184 u32 mac_status = tr32(MAC_STATUS);
2185 int i;
2186
2187 /* Reset when initting first time or we have a link. */
2188 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2189 !(mac_status & MAC_STATUS_PCS_SYNCED))
2190 return;
2191
2192 /* Set PLL lock range. */
2193 tg3_writephy(tp, 0x16, 0x8007);
2194
2195 /* SW reset */
2196 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2197
2198 /* Wait for reset to complete. */
2199 /* XXX schedule_timeout() ... */
2200 for (i = 0; i < 500; i++)
2201 udelay(10);
2202
2203 /* Config mode; select PMA/Ch 1 regs. */
2204 tg3_writephy(tp, 0x10, 0x8411);
2205
2206 /* Enable auto-lock and comdet, select txclk for tx. */
2207 tg3_writephy(tp, 0x11, 0x0a10);
2208
2209 tg3_writephy(tp, 0x18, 0x00a0);
2210 tg3_writephy(tp, 0x16, 0x41ff);
2211
2212 /* Assert and deassert POR. */
2213 tg3_writephy(tp, 0x13, 0x0400);
2214 udelay(40);
2215 tg3_writephy(tp, 0x13, 0x0000);
2216
2217 tg3_writephy(tp, 0x11, 0x0a50);
2218 udelay(40);
2219 tg3_writephy(tp, 0x11, 0x0a10);
2220
2221 /* Wait for signal to stabilize */
2222 /* XXX schedule_timeout() ... */
2223 for (i = 0; i < 15000; i++)
2224 udelay(10);
2225
2226 /* Deselect the channel register so we can read the PHYID
2227 * later.
2228 */
2229 tg3_writephy(tp, 0x10, 0x8011);
2230}
2231
2232static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2233{
2234 u32 sg_dig_ctrl, sg_dig_status;
2235 u32 serdes_cfg, expected_sg_dig_ctrl;
2236 int workaround, port_a;
2237 int current_link_up;
2238
2239 serdes_cfg = 0;
2240 expected_sg_dig_ctrl = 0;
2241 workaround = 0;
2242 port_a = 1;
2243 current_link_up = 0;
2244
2245 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2246 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2247 workaround = 1;
2248 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2249 port_a = 0;
2250
2251 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2252 /* preserve bits 20-23 for voltage regulator */
2253 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2254 }
2255
2256 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2257
2258 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2259 if (sg_dig_ctrl & (1 << 31)) {
2260 if (workaround) {
2261 u32 val = serdes_cfg;
2262
2263 if (port_a)
2264 val |= 0xc010000;
2265 else
2266 val |= 0x4010000;
2267 tw32_f(MAC_SERDES_CFG, val);
2268 }
2269 tw32_f(SG_DIG_CTRL, 0x01388400);
2270 }
2271 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2272 tg3_setup_flow_control(tp, 0, 0);
2273 current_link_up = 1;
2274 }
2275 goto out;
2276 }
2277
2278 /* Want auto-negotiation. */
2279 expected_sg_dig_ctrl = 0x81388400;
2280
2281 /* Pause capability */
2282 expected_sg_dig_ctrl |= (1 << 11);
2283
2284 /* Asymettric pause */
2285 expected_sg_dig_ctrl |= (1 << 12);
2286
2287 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2288 if (workaround)
2289 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2290 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2291 udelay(5);
2292 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2293
2294 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2295 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2296 MAC_STATUS_SIGNAL_DET)) {
2297 int i;
2298
2299 /* Giver time to negotiate (~200ms) */
2300 for (i = 0; i < 40000; i++) {
2301 sg_dig_status = tr32(SG_DIG_STATUS);
2302 if (sg_dig_status & (0x3))
2303 break;
2304 udelay(5);
2305 }
2306 mac_status = tr32(MAC_STATUS);
2307
2308 if ((sg_dig_status & (1 << 1)) &&
2309 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2310 u32 local_adv, remote_adv;
2311
2312 local_adv = ADVERTISE_PAUSE_CAP;
2313 remote_adv = 0;
2314 if (sg_dig_status & (1 << 19))
2315 remote_adv |= LPA_PAUSE_CAP;
2316 if (sg_dig_status & (1 << 20))
2317 remote_adv |= LPA_PAUSE_ASYM;
2318
2319 tg3_setup_flow_control(tp, local_adv, remote_adv);
2320 current_link_up = 1;
2321 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2322 } else if (!(sg_dig_status & (1 << 1))) {
2323 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2324 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2325 else {
2326 if (workaround) {
2327 u32 val = serdes_cfg;
2328
2329 if (port_a)
2330 val |= 0xc010000;
2331 else
2332 val |= 0x4010000;
2333
2334 tw32_f(MAC_SERDES_CFG, val);
2335 }
2336
2337 tw32_f(SG_DIG_CTRL, 0x01388400);
2338 udelay(40);
2339
2340 /* Link parallel detection - link is up */
2341 /* only if we have PCS_SYNC and not */
2342 /* receiving config code words */
2343 mac_status = tr32(MAC_STATUS);
2344 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2345 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2346 tg3_setup_flow_control(tp, 0, 0);
2347 current_link_up = 1;
2348 }
2349 }
2350 }
2351 }
2352
2353out:
2354 return current_link_up;
2355}
2356
2357static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2358{
2359 int current_link_up = 0;
2360
2361 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2362 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2363 goto out;
2364 }
2365
2366 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2367 u32 flags;
2368 int i;
2369
2370 if (fiber_autoneg(tp, &flags)) {
2371 u32 local_adv, remote_adv;
2372
2373 local_adv = ADVERTISE_PAUSE_CAP;
2374 remote_adv = 0;
2375 if (flags & MR_LP_ADV_SYM_PAUSE)
2376 remote_adv |= LPA_PAUSE_CAP;
2377 if (flags & MR_LP_ADV_ASYM_PAUSE)
2378 remote_adv |= LPA_PAUSE_ASYM;
2379
2380 tg3_setup_flow_control(tp, local_adv, remote_adv);
2381
2382 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2383 current_link_up = 1;
2384 }
2385 for (i = 0; i < 30; i++) {
2386 udelay(20);
2387 tw32_f(MAC_STATUS,
2388 (MAC_STATUS_SYNC_CHANGED |
2389 MAC_STATUS_CFG_CHANGED));
2390 udelay(40);
2391 if ((tr32(MAC_STATUS) &
2392 (MAC_STATUS_SYNC_CHANGED |
2393 MAC_STATUS_CFG_CHANGED)) == 0)
2394 break;
2395 }
2396
2397 mac_status = tr32(MAC_STATUS);
2398 if (current_link_up == 0 &&
2399 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2400 !(mac_status & MAC_STATUS_RCVD_CFG))
2401 current_link_up = 1;
2402 } else {
2403 /* Forcing 1000FD link up. */
2404 current_link_up = 1;
2405 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2406
2407 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2408 udelay(40);
2409 }
2410
2411out:
2412 return current_link_up;
2413}
2414
2415static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2416{
2417 u32 orig_pause_cfg;
2418 u16 orig_active_speed;
2419 u8 orig_active_duplex;
2420 u32 mac_status;
2421 int current_link_up;
2422 int i;
2423
2424 orig_pause_cfg =
2425 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2426 TG3_FLAG_TX_PAUSE));
2427 orig_active_speed = tp->link_config.active_speed;
2428 orig_active_duplex = tp->link_config.active_duplex;
2429
2430 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2431 netif_carrier_ok(tp->dev) &&
2432 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2433 mac_status = tr32(MAC_STATUS);
2434 mac_status &= (MAC_STATUS_PCS_SYNCED |
2435 MAC_STATUS_SIGNAL_DET |
2436 MAC_STATUS_CFG_CHANGED |
2437 MAC_STATUS_RCVD_CFG);
2438 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2439 MAC_STATUS_SIGNAL_DET)) {
2440 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2441 MAC_STATUS_CFG_CHANGED));
2442 return 0;
2443 }
2444 }
2445
2446 tw32_f(MAC_TX_AUTO_NEG, 0);
2447
2448 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2449 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2450 tw32_f(MAC_MODE, tp->mac_mode);
2451 udelay(40);
2452
2453 if (tp->phy_id == PHY_ID_BCM8002)
2454 tg3_init_bcm8002(tp);
2455
2456 /* Enable link change event even when serdes polling. */
2457 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2458 udelay(40);
2459
2460 current_link_up = 0;
2461 mac_status = tr32(MAC_STATUS);
2462
2463 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2464 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2465 else
2466 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2467
2468 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2469 tw32_f(MAC_MODE, tp->mac_mode);
2470 udelay(40);
2471
2472 tp->hw_status->status =
2473 (SD_STATUS_UPDATED |
2474 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2475
2476 for (i = 0; i < 100; i++) {
2477 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2478 MAC_STATUS_CFG_CHANGED));
2479 udelay(5);
2480 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2481 MAC_STATUS_CFG_CHANGED)) == 0)
2482 break;
2483 }
2484
2485 mac_status = tr32(MAC_STATUS);
2486 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2487 current_link_up = 0;
2488 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2489 tw32_f(MAC_MODE, (tp->mac_mode |
2490 MAC_MODE_SEND_CONFIGS));
2491 udelay(1);
2492 tw32_f(MAC_MODE, tp->mac_mode);
2493 }
2494 }
2495
2496 if (current_link_up == 1) {
2497 tp->link_config.active_speed = SPEED_1000;
2498 tp->link_config.active_duplex = DUPLEX_FULL;
2499 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2500 LED_CTRL_LNKLED_OVERRIDE |
2501 LED_CTRL_1000MBPS_ON));
2502 } else {
2503 tp->link_config.active_speed = SPEED_INVALID;
2504 tp->link_config.active_duplex = DUPLEX_INVALID;
2505 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2506 LED_CTRL_LNKLED_OVERRIDE |
2507 LED_CTRL_TRAFFIC_OVERRIDE));
2508 }
2509
2510 if (current_link_up != netif_carrier_ok(tp->dev)) {
2511 if (current_link_up)
2512 netif_carrier_on(tp->dev);
2513 else
2514 netif_carrier_off(tp->dev);
2515 tg3_link_report(tp);
2516 } else {
2517 u32 now_pause_cfg =
2518 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2519 TG3_FLAG_TX_PAUSE);
2520 if (orig_pause_cfg != now_pause_cfg ||
2521 orig_active_speed != tp->link_config.active_speed ||
2522 orig_active_duplex != tp->link_config.active_duplex)
2523 tg3_link_report(tp);
2524 }
2525
2526 return 0;
2527}
2528
Michael Chan747e8f82005-07-25 12:33:22 -07002529static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2530{
2531 int current_link_up, err = 0;
2532 u32 bmsr, bmcr;
2533 u16 current_speed;
2534 u8 current_duplex;
2535
2536 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2537 tw32_f(MAC_MODE, tp->mac_mode);
2538 udelay(40);
2539
2540 tw32(MAC_EVENT, 0);
2541
2542 tw32_f(MAC_STATUS,
2543 (MAC_STATUS_SYNC_CHANGED |
2544 MAC_STATUS_CFG_CHANGED |
2545 MAC_STATUS_MI_COMPLETION |
2546 MAC_STATUS_LNKSTATE_CHANGED));
2547 udelay(40);
2548
2549 if (force_reset)
2550 tg3_phy_reset(tp);
2551
2552 current_link_up = 0;
2553 current_speed = SPEED_INVALID;
2554 current_duplex = DUPLEX_INVALID;
2555
2556 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2557 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2558
2559 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2560
2561 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2562 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2563 /* do nothing, just check for link up at the end */
2564 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2565 u32 adv, new_adv;
2566
2567 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2568 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2569 ADVERTISE_1000XPAUSE |
2570 ADVERTISE_1000XPSE_ASYM |
2571 ADVERTISE_SLCT);
2572
2573 /* Always advertise symmetric PAUSE just like copper */
2574 new_adv |= ADVERTISE_1000XPAUSE;
2575
2576 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2577 new_adv |= ADVERTISE_1000XHALF;
2578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2579 new_adv |= ADVERTISE_1000XFULL;
2580
2581 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2582 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2583 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2584 tg3_writephy(tp, MII_BMCR, bmcr);
2585
2586 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2587 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2588 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2589
2590 return err;
2591 }
2592 } else {
2593 u32 new_bmcr;
2594
2595 bmcr &= ~BMCR_SPEED1000;
2596 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2597
2598 if (tp->link_config.duplex == DUPLEX_FULL)
2599 new_bmcr |= BMCR_FULLDPLX;
2600
2601 if (new_bmcr != bmcr) {
2602 /* BMCR_SPEED1000 is a reserved bit that needs
2603 * to be set on write.
2604 */
2605 new_bmcr |= BMCR_SPEED1000;
2606
2607 /* Force a linkdown */
2608 if (netif_carrier_ok(tp->dev)) {
2609 u32 adv;
2610
2611 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2612 adv &= ~(ADVERTISE_1000XFULL |
2613 ADVERTISE_1000XHALF |
2614 ADVERTISE_SLCT);
2615 tg3_writephy(tp, MII_ADVERTISE, adv);
2616 tg3_writephy(tp, MII_BMCR, bmcr |
2617 BMCR_ANRESTART |
2618 BMCR_ANENABLE);
2619 udelay(10);
2620 netif_carrier_off(tp->dev);
2621 }
2622 tg3_writephy(tp, MII_BMCR, new_bmcr);
2623 bmcr = new_bmcr;
2624 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2625 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2626 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2627 }
2628 }
2629
2630 if (bmsr & BMSR_LSTATUS) {
2631 current_speed = SPEED_1000;
2632 current_link_up = 1;
2633 if (bmcr & BMCR_FULLDPLX)
2634 current_duplex = DUPLEX_FULL;
2635 else
2636 current_duplex = DUPLEX_HALF;
2637
2638 if (bmcr & BMCR_ANENABLE) {
2639 u32 local_adv, remote_adv, common;
2640
2641 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2642 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2643 common = local_adv & remote_adv;
2644 if (common & (ADVERTISE_1000XHALF |
2645 ADVERTISE_1000XFULL)) {
2646 if (common & ADVERTISE_1000XFULL)
2647 current_duplex = DUPLEX_FULL;
2648 else
2649 current_duplex = DUPLEX_HALF;
2650
2651 tg3_setup_flow_control(tp, local_adv,
2652 remote_adv);
2653 }
2654 else
2655 current_link_up = 0;
2656 }
2657 }
2658
2659 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2660 if (tp->link_config.active_duplex == DUPLEX_HALF)
2661 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2662
2663 tw32_f(MAC_MODE, tp->mac_mode);
2664 udelay(40);
2665
2666 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2667
2668 tp->link_config.active_speed = current_speed;
2669 tp->link_config.active_duplex = current_duplex;
2670
2671 if (current_link_up != netif_carrier_ok(tp->dev)) {
2672 if (current_link_up)
2673 netif_carrier_on(tp->dev);
2674 else {
2675 netif_carrier_off(tp->dev);
2676 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2677 }
2678 tg3_link_report(tp);
2679 }
2680 return err;
2681}
2682
2683static void tg3_serdes_parallel_detect(struct tg3 *tp)
2684{
2685 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2686 /* Give autoneg time to complete. */
2687 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2688 return;
2689 }
2690 if (!netif_carrier_ok(tp->dev) &&
2691 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2692 u32 bmcr;
2693
2694 tg3_readphy(tp, MII_BMCR, &bmcr);
2695 if (bmcr & BMCR_ANENABLE) {
2696 u32 phy1, phy2;
2697
2698 /* Select shadow register 0x1f */
2699 tg3_writephy(tp, 0x1c, 0x7c00);
2700 tg3_readphy(tp, 0x1c, &phy1);
2701
2702 /* Select expansion interrupt status register */
2703 tg3_writephy(tp, 0x17, 0x0f01);
2704 tg3_readphy(tp, 0x15, &phy2);
2705 tg3_readphy(tp, 0x15, &phy2);
2706
2707 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2708 /* We have signal detect and not receiving
2709 * config code words, link is up by parallel
2710 * detection.
2711 */
2712
2713 bmcr &= ~BMCR_ANENABLE;
2714 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2715 tg3_writephy(tp, MII_BMCR, bmcr);
2716 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2717 }
2718 }
2719 }
2720 else if (netif_carrier_ok(tp->dev) &&
2721 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2722 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2723 u32 phy2;
2724
2725 /* Select expansion interrupt status register */
2726 tg3_writephy(tp, 0x17, 0x0f01);
2727 tg3_readphy(tp, 0x15, &phy2);
2728 if (phy2 & 0x20) {
2729 u32 bmcr;
2730
2731 /* Config code words received, turn on autoneg. */
2732 tg3_readphy(tp, MII_BMCR, &bmcr);
2733 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2734
2735 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2736
2737 }
2738 }
2739}
2740
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2742{
2743 int err;
2744
2745 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2746 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07002747 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2748 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 } else {
2750 err = tg3_setup_copper_phy(tp, force_reset);
2751 }
2752
2753 if (tp->link_config.active_speed == SPEED_1000 &&
2754 tp->link_config.active_duplex == DUPLEX_HALF)
2755 tw32(MAC_TX_LENGTHS,
2756 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2757 (6 << TX_LENGTHS_IPG_SHIFT) |
2758 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2759 else
2760 tw32(MAC_TX_LENGTHS,
2761 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2762 (6 << TX_LENGTHS_IPG_SHIFT) |
2763 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2764
2765 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2766 if (netif_carrier_ok(tp->dev)) {
2767 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07002768 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 } else {
2770 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2771 }
2772 }
2773
2774 return err;
2775}
2776
2777/* Tigon3 never reports partial packet sends. So we do not
2778 * need special logic to handle SKBs that have not had all
2779 * of their frags sent yet, like SunGEM does.
2780 */
2781static void tg3_tx(struct tg3 *tp)
2782{
2783 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2784 u32 sw_idx = tp->tx_cons;
2785
2786 while (sw_idx != hw_idx) {
2787 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2788 struct sk_buff *skb = ri->skb;
2789 int i;
2790
2791 if (unlikely(skb == NULL))
2792 BUG();
2793
2794 pci_unmap_single(tp->pdev,
2795 pci_unmap_addr(ri, mapping),
2796 skb_headlen(skb),
2797 PCI_DMA_TODEVICE);
2798
2799 ri->skb = NULL;
2800
2801 sw_idx = NEXT_TX(sw_idx);
2802
2803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2804 if (unlikely(sw_idx == hw_idx))
2805 BUG();
2806
2807 ri = &tp->tx_buffers[sw_idx];
2808 if (unlikely(ri->skb != NULL))
2809 BUG();
2810
2811 pci_unmap_page(tp->pdev,
2812 pci_unmap_addr(ri, mapping),
2813 skb_shinfo(skb)->frags[i].size,
2814 PCI_DMA_TODEVICE);
2815
2816 sw_idx = NEXT_TX(sw_idx);
2817 }
2818
David S. Millerf47c11e2005-06-24 20:18:35 -07002819 dev_kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 }
2821
2822 tp->tx_cons = sw_idx;
2823
2824 if (netif_queue_stopped(tp->dev) &&
2825 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2826 netif_wake_queue(tp->dev);
2827}
2828
2829/* Returns size of skb allocated or < 0 on error.
2830 *
2831 * We only need to fill in the address because the other members
2832 * of the RX descriptor are invariant, see tg3_init_rings.
2833 *
2834 * Note the purposeful assymetry of cpu vs. chip accesses. For
2835 * posting buffers we only dirty the first cache line of the RX
2836 * descriptor (containing the address). Whereas for the RX status
2837 * buffers the cpu only reads the last cacheline of the RX descriptor
2838 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2839 */
2840static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2841 int src_idx, u32 dest_idx_unmasked)
2842{
2843 struct tg3_rx_buffer_desc *desc;
2844 struct ring_info *map, *src_map;
2845 struct sk_buff *skb;
2846 dma_addr_t mapping;
2847 int skb_size, dest_idx;
2848
2849 src_map = NULL;
2850 switch (opaque_key) {
2851 case RXD_OPAQUE_RING_STD:
2852 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2853 desc = &tp->rx_std[dest_idx];
2854 map = &tp->rx_std_buffers[dest_idx];
2855 if (src_idx >= 0)
2856 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07002857 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 break;
2859
2860 case RXD_OPAQUE_RING_JUMBO:
2861 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2862 desc = &tp->rx_jumbo[dest_idx];
2863 map = &tp->rx_jumbo_buffers[dest_idx];
2864 if (src_idx >= 0)
2865 src_map = &tp->rx_jumbo_buffers[src_idx];
2866 skb_size = RX_JUMBO_PKT_BUF_SZ;
2867 break;
2868
2869 default:
2870 return -EINVAL;
2871 };
2872
2873 /* Do not overwrite any of the map or rp information
2874 * until we are sure we can commit to a new buffer.
2875 *
2876 * Callers depend upon this behavior and assume that
2877 * we leave everything unchanged if we fail.
2878 */
2879 skb = dev_alloc_skb(skb_size);
2880 if (skb == NULL)
2881 return -ENOMEM;
2882
2883 skb->dev = tp->dev;
2884 skb_reserve(skb, tp->rx_offset);
2885
2886 mapping = pci_map_single(tp->pdev, skb->data,
2887 skb_size - tp->rx_offset,
2888 PCI_DMA_FROMDEVICE);
2889
2890 map->skb = skb;
2891 pci_unmap_addr_set(map, mapping, mapping);
2892
2893 if (src_map != NULL)
2894 src_map->skb = NULL;
2895
2896 desc->addr_hi = ((u64)mapping >> 32);
2897 desc->addr_lo = ((u64)mapping & 0xffffffff);
2898
2899 return skb_size;
2900}
2901
2902/* We only need to move over in the address because the other
2903 * members of the RX descriptor are invariant. See notes above
2904 * tg3_alloc_rx_skb for full details.
2905 */
2906static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2907 int src_idx, u32 dest_idx_unmasked)
2908{
2909 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2910 struct ring_info *src_map, *dest_map;
2911 int dest_idx;
2912
2913 switch (opaque_key) {
2914 case RXD_OPAQUE_RING_STD:
2915 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2916 dest_desc = &tp->rx_std[dest_idx];
2917 dest_map = &tp->rx_std_buffers[dest_idx];
2918 src_desc = &tp->rx_std[src_idx];
2919 src_map = &tp->rx_std_buffers[src_idx];
2920 break;
2921
2922 case RXD_OPAQUE_RING_JUMBO:
2923 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2924 dest_desc = &tp->rx_jumbo[dest_idx];
2925 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2926 src_desc = &tp->rx_jumbo[src_idx];
2927 src_map = &tp->rx_jumbo_buffers[src_idx];
2928 break;
2929
2930 default:
2931 return;
2932 };
2933
2934 dest_map->skb = src_map->skb;
2935 pci_unmap_addr_set(dest_map, mapping,
2936 pci_unmap_addr(src_map, mapping));
2937 dest_desc->addr_hi = src_desc->addr_hi;
2938 dest_desc->addr_lo = src_desc->addr_lo;
2939
2940 src_map->skb = NULL;
2941}
2942
2943#if TG3_VLAN_TAG_USED
2944static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2945{
2946 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2947}
2948#endif
2949
2950/* The RX ring scheme is composed of multiple rings which post fresh
2951 * buffers to the chip, and one special ring the chip uses to report
2952 * status back to the host.
2953 *
2954 * The special ring reports the status of received packets to the
2955 * host. The chip does not write into the original descriptor the
2956 * RX buffer was obtained from. The chip simply takes the original
2957 * descriptor as provided by the host, updates the status and length
2958 * field, then writes this into the next status ring entry.
2959 *
2960 * Each ring the host uses to post buffers to the chip is described
2961 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2962 * it is first placed into the on-chip ram. When the packet's length
2963 * is known, it walks down the TG3_BDINFO entries to select the ring.
2964 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2965 * which is within the range of the new packet's length is chosen.
2966 *
2967 * The "separate ring for rx status" scheme may sound queer, but it makes
2968 * sense from a cache coherency perspective. If only the host writes
2969 * to the buffer post rings, and only the chip writes to the rx status
2970 * rings, then cache lines never move beyond shared-modified state.
2971 * If both the host and chip were to write into the same ring, cache line
2972 * eviction could occur since both entities want it in an exclusive state.
2973 */
2974static int tg3_rx(struct tg3 *tp, int budget)
2975{
2976 u32 work_mask;
Michael Chan483ba502005-04-25 15:14:03 -07002977 u32 sw_idx = tp->rx_rcb_ptr;
2978 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 int received;
2980
2981 hw_idx = tp->hw_status->idx[0].rx_producer;
2982 /*
2983 * We need to order the read of hw_idx and the read of
2984 * the opaque cookie.
2985 */
2986 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 work_mask = 0;
2988 received = 0;
2989 while (sw_idx != hw_idx && budget > 0) {
2990 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2991 unsigned int len;
2992 struct sk_buff *skb;
2993 dma_addr_t dma_addr;
2994 u32 opaque_key, desc_idx, *post_ptr;
2995
2996 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2997 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2998 if (opaque_key == RXD_OPAQUE_RING_STD) {
2999 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3000 mapping);
3001 skb = tp->rx_std_buffers[desc_idx].skb;
3002 post_ptr = &tp->rx_std_ptr;
3003 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3004 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3005 mapping);
3006 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3007 post_ptr = &tp->rx_jumbo_ptr;
3008 }
3009 else {
3010 goto next_pkt_nopost;
3011 }
3012
3013 work_mask |= opaque_key;
3014
3015 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3016 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3017 drop_it:
3018 tg3_recycle_rx(tp, opaque_key,
3019 desc_idx, *post_ptr);
3020 drop_it_no_recycle:
3021 /* Other statistics kept track of by card. */
3022 tp->net_stats.rx_dropped++;
3023 goto next_pkt;
3024 }
3025
3026 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3027
3028 if (len > RX_COPY_THRESHOLD
3029 && tp->rx_offset == 2
3030 /* rx_offset != 2 iff this is a 5701 card running
3031 * in PCI-X mode [see tg3_get_invariants()] */
3032 ) {
3033 int skb_size;
3034
3035 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3036 desc_idx, *post_ptr);
3037 if (skb_size < 0)
3038 goto drop_it;
3039
3040 pci_unmap_single(tp->pdev, dma_addr,
3041 skb_size - tp->rx_offset,
3042 PCI_DMA_FROMDEVICE);
3043
3044 skb_put(skb, len);
3045 } else {
3046 struct sk_buff *copy_skb;
3047
3048 tg3_recycle_rx(tp, opaque_key,
3049 desc_idx, *post_ptr);
3050
3051 copy_skb = dev_alloc_skb(len + 2);
3052 if (copy_skb == NULL)
3053 goto drop_it_no_recycle;
3054
3055 copy_skb->dev = tp->dev;
3056 skb_reserve(copy_skb, 2);
3057 skb_put(copy_skb, len);
3058 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3059 memcpy(copy_skb->data, skb->data, len);
3060 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3061
3062 /* We'll reuse the original ring buffer. */
3063 skb = copy_skb;
3064 }
3065
3066 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3067 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3068 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3069 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3070 skb->ip_summed = CHECKSUM_UNNECESSARY;
3071 else
3072 skb->ip_summed = CHECKSUM_NONE;
3073
3074 skb->protocol = eth_type_trans(skb, tp->dev);
3075#if TG3_VLAN_TAG_USED
3076 if (tp->vlgrp != NULL &&
3077 desc->type_flags & RXD_FLAG_VLAN) {
3078 tg3_vlan_rx(tp, skb,
3079 desc->err_vlan & RXD_VLAN_MASK);
3080 } else
3081#endif
3082 netif_receive_skb(skb);
3083
3084 tp->dev->last_rx = jiffies;
3085 received++;
3086 budget--;
3087
3088next_pkt:
3089 (*post_ptr)++;
3090next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07003091 sw_idx++;
3092 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
Michael Chan52f6d692005-04-25 15:14:32 -07003093
3094 /* Refresh hw_idx to see if there is new work */
3095 if (sw_idx == hw_idx) {
3096 hw_idx = tp->hw_status->idx[0].rx_producer;
3097 rmb();
3098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 }
3100
3101 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07003102 tp->rx_rcb_ptr = sw_idx;
3103 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104
3105 /* Refill RX ring(s). */
3106 if (work_mask & RXD_OPAQUE_RING_STD) {
3107 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3108 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3109 sw_idx);
3110 }
3111 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3112 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3113 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3114 sw_idx);
3115 }
3116 mmiowb();
3117
3118 return received;
3119}
3120
3121static int tg3_poll(struct net_device *netdev, int *budget)
3122{
3123 struct tg3 *tp = netdev_priv(netdev);
3124 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 int done;
3126
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 /* handle link change and other phy events */
3128 if (!(tp->tg3_flags &
3129 (TG3_FLAG_USE_LINKCHG_REG |
3130 TG3_FLAG_POLL_SERDES))) {
3131 if (sblk->status & SD_STATUS_LINK_CHG) {
3132 sblk->status = SD_STATUS_UPDATED |
3133 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07003134 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07003136 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 }
3138 }
3139
3140 /* run TX completion thread */
3141 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3142 spin_lock(&tp->tx_lock);
3143 tg3_tx(tp);
3144 spin_unlock(&tp->tx_lock);
3145 }
3146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 /* run RX thread, within the bounds set by NAPI.
3148 * All RX "locking" is done by ensuring outside
3149 * code synchronizes with dev->poll()
3150 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3152 int orig_budget = *budget;
3153 int work_done;
3154
3155 if (orig_budget > netdev->quota)
3156 orig_budget = netdev->quota;
3157
3158 work_done = tg3_rx(tp, orig_budget);
3159
3160 *budget -= work_done;
3161 netdev->quota -= work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 }
3163
David S. Millerf7383c22005-05-18 22:50:53 -07003164 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3165 tp->last_tag = sblk->status_tag;
3166 rmb();
David S. Millercd024c82005-06-24 20:17:10 -07003167 sblk->status &= ~SD_STATUS_UPDATED;
David S. Millerf7383c22005-05-18 22:50:53 -07003168
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 /* if no more work, tell net stack and NIC we're done */
David S. Millerf7383c22005-05-18 22:50:53 -07003170 done = !tg3_has_work(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 if (done) {
David S. Millerf47c11e2005-06-24 20:18:35 -07003172 spin_lock(&tp->lock);
3173 netif_rx_complete(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 tg3_restart_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07003175 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 }
3177
3178 return (done ? 0 : 1);
3179}
3180
David S. Millerf47c11e2005-06-24 20:18:35 -07003181static void tg3_irq_quiesce(struct tg3 *tp)
3182{
3183 BUG_ON(tp->irq_sync);
3184
3185 tp->irq_sync = 1;
3186 smp_mb();
3187
3188 synchronize_irq(tp->pdev->irq);
3189}
3190
3191static inline int tg3_irq_sync(struct tg3 *tp)
3192{
3193 return tp->irq_sync;
3194}
3195
3196/* Fully shutdown all tg3 driver activity elsewhere in the system.
3197 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3198 * with as well. Most of the time, this is not necessary except when
3199 * shutting down the device.
3200 */
3201static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3202{
3203 if (irq_sync)
3204 tg3_irq_quiesce(tp);
3205 spin_lock_bh(&tp->lock);
3206 spin_lock(&tp->tx_lock);
3207}
3208
3209static inline void tg3_full_unlock(struct tg3 *tp)
3210{
3211 spin_unlock(&tp->tx_lock);
3212 spin_unlock_bh(&tp->lock);
3213}
3214
Michael Chan88b06bc22005-04-21 17:13:25 -07003215/* MSI ISR - No need to check for interrupt sharing and no need to
3216 * flush status block and interrupt mailbox. PCI ordering rules
3217 * guarantee that MSI will arrive after the status block.
3218 */
3219static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3220{
3221 struct net_device *dev = dev_id;
3222 struct tg3 *tp = netdev_priv(dev);
3223 struct tg3_hw_status *sblk = tp->hw_status;
Michael Chan88b06bc22005-04-21 17:13:25 -07003224
3225 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003226 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc22005-04-21 17:13:25 -07003227 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07003228 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc22005-04-21 17:13:25 -07003229 * NIC to stop sending us irqs, engaging "in-intr-handler"
3230 * event coalescing.
3231 */
3232 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
David S. Millerfac9b832005-05-18 22:46:34 -07003233 tp->last_tag = sblk->status_tag;
David S. Millercd024c82005-06-24 20:17:10 -07003234 rmb();
David S. Millerf47c11e2005-06-24 20:18:35 -07003235 if (tg3_irq_sync(tp))
3236 goto out;
Michael Chan88b06bc22005-04-21 17:13:25 -07003237 sblk->status &= ~SD_STATUS_UPDATED;
Michael Chan04237dd2005-04-25 15:17:17 -07003238 if (likely(tg3_has_work(tp)))
Michael Chan88b06bc22005-04-21 17:13:25 -07003239 netif_rx_schedule(dev); /* schedule NAPI poll */
3240 else {
David S. Millerfac9b832005-05-18 22:46:34 -07003241 /* No work, re-enable interrupts. */
Michael Chan88b06bc22005-04-21 17:13:25 -07003242 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
David S. Millerfac9b832005-05-18 22:46:34 -07003243 tp->last_tag << 24);
Michael Chan88b06bc22005-04-21 17:13:25 -07003244 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003245out:
Michael Chan88b06bc22005-04-21 17:13:25 -07003246 return IRQ_RETVAL(1);
3247}
3248
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3250{
3251 struct net_device *dev = dev_id;
3252 struct tg3 *tp = netdev_priv(dev);
3253 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 unsigned int handled = 1;
3255
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 /* In INTx mode, it is possible for the interrupt to arrive at
3257 * the CPU before the status block posted prior to the interrupt.
3258 * Reading the PCI State register will confirm whether the
3259 * interrupt is ours and will flush the status block.
3260 */
3261 if ((sblk->status & SD_STATUS_UPDATED) ||
3262 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3263 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003264 * Writing any value to intr-mbox-0 clears PCI INTA# and
3265 * chip-internal interrupt pending events.
3266 * Writing non-zero to intr-mbox-0 additional tells the
3267 * NIC to stop sending us irqs, engaging "in-intr-handler"
3268 * event coalescing.
3269 */
3270 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3271 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003272 if (tg3_irq_sync(tp))
3273 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07003274 sblk->status &= ~SD_STATUS_UPDATED;
3275 if (likely(tg3_has_work(tp)))
3276 netif_rx_schedule(dev); /* schedule NAPI poll */
3277 else {
3278 /* No work, shared interrupt perhaps? re-enable
3279 * interrupts, and flush that PCI write
3280 */
3281 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3282 0x00000000);
3283 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3284 }
3285 } else { /* shared interrupt */
3286 handled = 0;
3287 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003288out:
David S. Millerfac9b832005-05-18 22:46:34 -07003289 return IRQ_RETVAL(handled);
3290}
3291
3292static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3293{
3294 struct net_device *dev = dev_id;
3295 struct tg3 *tp = netdev_priv(dev);
3296 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07003297 unsigned int handled = 1;
3298
David S. Millerfac9b832005-05-18 22:46:34 -07003299 /* In INTx mode, it is possible for the interrupt to arrive at
3300 * the CPU before the status block posted prior to the interrupt.
3301 * Reading the PCI State register will confirm whether the
3302 * interrupt is ours and will flush the status block.
3303 */
3304 if ((sblk->status & SD_STATUS_UPDATED) ||
3305 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3306 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 * writing any value to intr-mbox-0 clears PCI INTA# and
3308 * chip-internal interrupt pending events.
3309 * writing non-zero to intr-mbox-0 additional tells the
3310 * NIC to stop sending us irqs, engaging "in-intr-handler"
3311 * event coalescing.
3312 */
3313 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3314 0x00000001);
David S. Millerfac9b832005-05-18 22:46:34 -07003315 tp->last_tag = sblk->status_tag;
David S. Millercd024c82005-06-24 20:17:10 -07003316 rmb();
David S. Millerf47c11e2005-06-24 20:18:35 -07003317 if (tg3_irq_sync(tp))
3318 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319 sblk->status &= ~SD_STATUS_UPDATED;
Michael Chan04237dd2005-04-25 15:17:17 -07003320 if (likely(tg3_has_work(tp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 netif_rx_schedule(dev); /* schedule NAPI poll */
3322 else {
3323 /* no work, shared interrupt perhaps? re-enable
3324 * interrupts, and flush that PCI write
3325 */
3326 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
David S. Millerfac9b832005-05-18 22:46:34 -07003327 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3329 }
3330 } else { /* shared interrupt */
3331 handled = 0;
3332 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003333out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 return IRQ_RETVAL(handled);
3335}
3336
Michael Chan79381092005-04-21 17:13:59 -07003337/* ISR for interrupt test */
3338static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3339 struct pt_regs *regs)
3340{
3341 struct net_device *dev = dev_id;
3342 struct tg3 *tp = netdev_priv(dev);
3343 struct tg3_hw_status *sblk = tp->hw_status;
3344
3345 if (sblk->status & SD_STATUS_UPDATED) {
3346 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3347 0x00000001);
3348 return IRQ_RETVAL(1);
3349 }
3350 return IRQ_RETVAL(0);
3351}
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353static int tg3_init_hw(struct tg3 *);
Michael Chan944d9802005-05-29 14:57:48 -07003354static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
3356#ifdef CONFIG_NET_POLL_CONTROLLER
3357static void tg3_poll_controller(struct net_device *dev)
3358{
Michael Chan88b06bc22005-04-21 17:13:25 -07003359 struct tg3 *tp = netdev_priv(dev);
3360
3361 tg3_interrupt(tp->pdev->irq, dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362}
3363#endif
3364
3365static void tg3_reset_task(void *_data)
3366{
3367 struct tg3 *tp = _data;
3368 unsigned int restart_timer;
3369
3370 tg3_netif_stop(tp);
3371
David S. Millerf47c11e2005-06-24 20:18:35 -07003372 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373
3374 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3375 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3376
Michael Chan944d9802005-05-29 14:57:48 -07003377 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 tg3_init_hw(tp);
3379
3380 tg3_netif_start(tp);
3381
David S. Millerf47c11e2005-06-24 20:18:35 -07003382 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
3384 if (restart_timer)
3385 mod_timer(&tp->timer, jiffies + 1);
3386}
3387
3388static void tg3_tx_timeout(struct net_device *dev)
3389{
3390 struct tg3 *tp = netdev_priv(dev);
3391
3392 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3393 dev->name);
3394
3395 schedule_work(&tp->reset_task);
3396}
3397
3398static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3399
3400static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3401 u32 guilty_entry, int guilty_len,
3402 u32 last_plus_one, u32 *start, u32 mss)
3403{
3404 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3405 dma_addr_t new_addr;
3406 u32 entry = *start;
3407 int i;
3408
3409 if (!new_skb) {
3410 dev_kfree_skb(skb);
3411 return -1;
3412 }
3413
3414 /* New SKB is guaranteed to be linear. */
3415 entry = *start;
3416 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3417 PCI_DMA_TODEVICE);
3418 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3419 (skb->ip_summed == CHECKSUM_HW) ?
3420 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3421 *start = NEXT_TX(entry);
3422
3423 /* Now clean up the sw ring entries. */
3424 i = 0;
3425 while (entry != last_plus_one) {
3426 int len;
3427
3428 if (i == 0)
3429 len = skb_headlen(skb);
3430 else
3431 len = skb_shinfo(skb)->frags[i-1].size;
3432 pci_unmap_single(tp->pdev,
3433 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3434 len, PCI_DMA_TODEVICE);
3435 if (i == 0) {
3436 tp->tx_buffers[entry].skb = new_skb;
3437 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3438 } else {
3439 tp->tx_buffers[entry].skb = NULL;
3440 }
3441 entry = NEXT_TX(entry);
3442 i++;
3443 }
3444
3445 dev_kfree_skb(skb);
3446
3447 return 0;
3448}
3449
3450static void tg3_set_txd(struct tg3 *tp, int entry,
3451 dma_addr_t mapping, int len, u32 flags,
3452 u32 mss_and_is_end)
3453{
3454 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3455 int is_end = (mss_and_is_end & 0x1);
3456 u32 mss = (mss_and_is_end >> 1);
3457 u32 vlan_tag = 0;
3458
3459 if (is_end)
3460 flags |= TXD_FLAG_END;
3461 if (flags & TXD_FLAG_VLAN) {
3462 vlan_tag = flags >> 16;
3463 flags &= 0xffff;
3464 }
3465 vlan_tag |= (mss << TXD_MSS_SHIFT);
3466
3467 txd->addr_hi = ((u64) mapping >> 32);
3468 txd->addr_lo = ((u64) mapping & 0xffffffff);
3469 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3470 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3471}
3472
3473static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3474{
3475 u32 base = (u32) mapping & 0xffffffff;
3476
3477 return ((base > 0xffffdcc0) &&
3478 (base + len + 8 < base));
3479}
3480
3481static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3482{
3483 struct tg3 *tp = netdev_priv(dev);
3484 dma_addr_t mapping;
3485 unsigned int i;
3486 u32 len, entry, base_flags, mss;
3487 int would_hit_hwbug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
3489 len = skb_headlen(skb);
3490
3491 /* No BH disabling for tx_lock here. We are running in BH disabled
3492 * context and TX reclaim runs via tp->poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07003493 * interrupt. Furthermore, IRQ processing runs lockless so we have
3494 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 */
David S. Millerf47c11e2005-06-24 20:18:35 -07003496 if (!spin_trylock(&tp->tx_lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 return NETDEV_TX_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
3499 /* This is a hard error, log it. */
3500 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3501 netif_stop_queue(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07003502 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3504 dev->name);
3505 return NETDEV_TX_BUSY;
3506 }
3507
3508 entry = tp->tx_prod;
3509 base_flags = 0;
3510 if (skb->ip_summed == CHECKSUM_HW)
3511 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3512#if TG3_TSO_SUPPORT != 0
3513 mss = 0;
3514 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3515 (mss = skb_shinfo(skb)->tso_size) != 0) {
3516 int tcp_opt_len, ip_tcp_len;
3517
3518 if (skb_header_cloned(skb) &&
3519 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3520 dev_kfree_skb(skb);
3521 goto out_unlock;
3522 }
3523
3524 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3525 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3526
3527 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3528 TXD_FLAG_CPU_POST_DMA);
3529
3530 skb->nh.iph->check = 0;
3531 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3532 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3533 skb->h.th->check = 0;
3534 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3535 }
3536 else {
3537 skb->h.th->check =
3538 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3539 skb->nh.iph->daddr,
3540 0, IPPROTO_TCP, 0);
3541 }
3542
3543 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3544 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3545 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3546 int tsflags;
3547
3548 tsflags = ((skb->nh.iph->ihl - 5) +
3549 (tcp_opt_len >> 2));
3550 mss |= (tsflags << 11);
3551 }
3552 } else {
3553 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3554 int tsflags;
3555
3556 tsflags = ((skb->nh.iph->ihl - 5) +
3557 (tcp_opt_len >> 2));
3558 base_flags |= tsflags << 12;
3559 }
3560 }
3561 }
3562#else
3563 mss = 0;
3564#endif
3565#if TG3_VLAN_TAG_USED
3566 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3567 base_flags |= (TXD_FLAG_VLAN |
3568 (vlan_tx_tag_get(skb) << 16));
3569#endif
3570
3571 /* Queue skb data, a.k.a. the main skb fragment. */
3572 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3573
3574 tp->tx_buffers[entry].skb = skb;
3575 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3576
3577 would_hit_hwbug = 0;
3578
3579 if (tg3_4g_overflow_test(mapping, len))
3580 would_hit_hwbug = entry + 1;
3581
3582 tg3_set_txd(tp, entry, mapping, len, base_flags,
3583 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3584
3585 entry = NEXT_TX(entry);
3586
3587 /* Now loop through additional data fragments, and queue them. */
3588 if (skb_shinfo(skb)->nr_frags > 0) {
3589 unsigned int i, last;
3590
3591 last = skb_shinfo(skb)->nr_frags - 1;
3592 for (i = 0; i <= last; i++) {
3593 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3594
3595 len = frag->size;
3596 mapping = pci_map_page(tp->pdev,
3597 frag->page,
3598 frag->page_offset,
3599 len, PCI_DMA_TODEVICE);
3600
3601 tp->tx_buffers[entry].skb = NULL;
3602 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3603
3604 if (tg3_4g_overflow_test(mapping, len)) {
3605 /* Only one should match. */
3606 if (would_hit_hwbug)
3607 BUG();
3608 would_hit_hwbug = entry + 1;
3609 }
3610
3611 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3612 tg3_set_txd(tp, entry, mapping, len,
3613 base_flags, (i == last)|(mss << 1));
3614 else
3615 tg3_set_txd(tp, entry, mapping, len,
3616 base_flags, (i == last));
3617
3618 entry = NEXT_TX(entry);
3619 }
3620 }
3621
3622 if (would_hit_hwbug) {
3623 u32 last_plus_one = entry;
3624 u32 start;
3625 unsigned int len = 0;
3626
3627 would_hit_hwbug -= 1;
3628 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3629 entry &= (TG3_TX_RING_SIZE - 1);
3630 start = entry;
3631 i = 0;
3632 while (entry != last_plus_one) {
3633 if (i == 0)
3634 len = skb_headlen(skb);
3635 else
3636 len = skb_shinfo(skb)->frags[i-1].size;
3637
3638 if (entry == would_hit_hwbug)
3639 break;
3640
3641 i++;
3642 entry = NEXT_TX(entry);
3643
3644 }
3645
3646 /* If the workaround fails due to memory/mapping
3647 * failure, silently drop this packet.
3648 */
3649 if (tigon3_4gb_hwbug_workaround(tp, skb,
3650 entry, len,
3651 last_plus_one,
3652 &start, mss))
3653 goto out_unlock;
3654
3655 entry = start;
3656 }
3657
3658 /* Packets are ready, update Tx producer idx local and on card. */
3659 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3660
3661 tp->tx_prod = entry;
3662 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3663 netif_stop_queue(dev);
3664
3665out_unlock:
3666 mmiowb();
David S. Millerf47c11e2005-06-24 20:18:35 -07003667 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668
3669 dev->trans_start = jiffies;
3670
3671 return NETDEV_TX_OK;
3672}
3673
3674static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3675 int new_mtu)
3676{
3677 dev->mtu = new_mtu;
3678
Michael Chanef7f5ec2005-07-25 12:32:25 -07003679 if (new_mtu > ETH_DATA_LEN) {
3680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3681 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3682 ethtool_op_set_tso(dev, 0);
3683 }
3684 else
3685 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3686 } else {
3687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3688 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07003689 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07003690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691}
3692
3693static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3694{
3695 struct tg3 *tp = netdev_priv(dev);
3696
3697 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3698 return -EINVAL;
3699
3700 if (!netif_running(dev)) {
3701 /* We'll just catch it later when the
3702 * device is up'd.
3703 */
3704 tg3_set_mtu(dev, tp, new_mtu);
3705 return 0;
3706 }
3707
3708 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07003709
3710 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711
Michael Chan944d9802005-05-29 14:57:48 -07003712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713
3714 tg3_set_mtu(dev, tp, new_mtu);
3715
3716 tg3_init_hw(tp);
3717
3718 tg3_netif_start(tp);
3719
David S. Millerf47c11e2005-06-24 20:18:35 -07003720 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721
3722 return 0;
3723}
3724
3725/* Free up pending packets in all rx/tx rings.
3726 *
3727 * The chip has been shut down and the driver detached from
3728 * the networking, so no interrupts or new tx packets will
3729 * end up in the driver. tp->{tx,}lock is not held and we are not
3730 * in an interrupt context and thus may sleep.
3731 */
3732static void tg3_free_rings(struct tg3 *tp)
3733{
3734 struct ring_info *rxp;
3735 int i;
3736
3737 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3738 rxp = &tp->rx_std_buffers[i];
3739
3740 if (rxp->skb == NULL)
3741 continue;
3742 pci_unmap_single(tp->pdev,
3743 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07003744 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 PCI_DMA_FROMDEVICE);
3746 dev_kfree_skb_any(rxp->skb);
3747 rxp->skb = NULL;
3748 }
3749
3750 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3751 rxp = &tp->rx_jumbo_buffers[i];
3752
3753 if (rxp->skb == NULL)
3754 continue;
3755 pci_unmap_single(tp->pdev,
3756 pci_unmap_addr(rxp, mapping),
3757 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3758 PCI_DMA_FROMDEVICE);
3759 dev_kfree_skb_any(rxp->skb);
3760 rxp->skb = NULL;
3761 }
3762
3763 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3764 struct tx_ring_info *txp;
3765 struct sk_buff *skb;
3766 int j;
3767
3768 txp = &tp->tx_buffers[i];
3769 skb = txp->skb;
3770
3771 if (skb == NULL) {
3772 i++;
3773 continue;
3774 }
3775
3776 pci_unmap_single(tp->pdev,
3777 pci_unmap_addr(txp, mapping),
3778 skb_headlen(skb),
3779 PCI_DMA_TODEVICE);
3780 txp->skb = NULL;
3781
3782 i++;
3783
3784 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3785 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3786 pci_unmap_page(tp->pdev,
3787 pci_unmap_addr(txp, mapping),
3788 skb_shinfo(skb)->frags[j].size,
3789 PCI_DMA_TODEVICE);
3790 i++;
3791 }
3792
3793 dev_kfree_skb_any(skb);
3794 }
3795}
3796
3797/* Initialize tx/rx rings for packet processing.
3798 *
3799 * The chip has been shut down and the driver detached from
3800 * the networking, so no interrupts or new tx packets will
3801 * end up in the driver. tp->{tx,}lock are held and thus
3802 * we may not sleep.
3803 */
3804static void tg3_init_rings(struct tg3 *tp)
3805{
3806 u32 i;
3807
3808 /* Free up all the SKBs. */
3809 tg3_free_rings(tp);
3810
3811 /* Zero out all descriptors. */
3812 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3813 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3814 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3815 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3816
Michael Chan7e72aad2005-07-25 12:31:17 -07003817 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3818 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3819 (tp->dev->mtu > ETH_DATA_LEN))
3820 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3821
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 /* Initialize invariants of the rings, we only set this
3823 * stuff once. This works because the card does not
3824 * write into the rx buffer posting rings.
3825 */
3826 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3827 struct tg3_rx_buffer_desc *rxd;
3828
3829 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07003830 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 << RXD_LEN_SHIFT;
3832 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3833 rxd->opaque = (RXD_OPAQUE_RING_STD |
3834 (i << RXD_OPAQUE_INDEX_SHIFT));
3835 }
3836
Michael Chan0f893dc2005-07-25 12:30:38 -07003837 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3839 struct tg3_rx_buffer_desc *rxd;
3840
3841 rxd = &tp->rx_jumbo[i];
3842 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3843 << RXD_LEN_SHIFT;
3844 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3845 RXD_FLAG_JUMBO;
3846 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3847 (i << RXD_OPAQUE_INDEX_SHIFT));
3848 }
3849 }
3850
3851 /* Now allocate fresh SKBs for each rx ring. */
3852 for (i = 0; i < tp->rx_pending; i++) {
3853 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3854 -1, i) < 0)
3855 break;
3856 }
3857
Michael Chan0f893dc2005-07-25 12:30:38 -07003858 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3860 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3861 -1, i) < 0)
3862 break;
3863 }
3864 }
3865}
3866
3867/*
3868 * Must not be invoked with interrupt sources disabled and
3869 * the hardware shutdown down.
3870 */
3871static void tg3_free_consistent(struct tg3 *tp)
3872{
3873 if (tp->rx_std_buffers) {
3874 kfree(tp->rx_std_buffers);
3875 tp->rx_std_buffers = NULL;
3876 }
3877 if (tp->rx_std) {
3878 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3879 tp->rx_std, tp->rx_std_mapping);
3880 tp->rx_std = NULL;
3881 }
3882 if (tp->rx_jumbo) {
3883 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3884 tp->rx_jumbo, tp->rx_jumbo_mapping);
3885 tp->rx_jumbo = NULL;
3886 }
3887 if (tp->rx_rcb) {
3888 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3889 tp->rx_rcb, tp->rx_rcb_mapping);
3890 tp->rx_rcb = NULL;
3891 }
3892 if (tp->tx_ring) {
3893 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3894 tp->tx_ring, tp->tx_desc_mapping);
3895 tp->tx_ring = NULL;
3896 }
3897 if (tp->hw_status) {
3898 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3899 tp->hw_status, tp->status_mapping);
3900 tp->hw_status = NULL;
3901 }
3902 if (tp->hw_stats) {
3903 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3904 tp->hw_stats, tp->stats_mapping);
3905 tp->hw_stats = NULL;
3906 }
3907}
3908
3909/*
3910 * Must not be invoked with interrupt sources disabled and
3911 * the hardware shutdown down. Can sleep.
3912 */
3913static int tg3_alloc_consistent(struct tg3 *tp)
3914{
3915 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3916 (TG3_RX_RING_SIZE +
3917 TG3_RX_JUMBO_RING_SIZE)) +
3918 (sizeof(struct tx_ring_info) *
3919 TG3_TX_RING_SIZE),
3920 GFP_KERNEL);
3921 if (!tp->rx_std_buffers)
3922 return -ENOMEM;
3923
3924 memset(tp->rx_std_buffers, 0,
3925 (sizeof(struct ring_info) *
3926 (TG3_RX_RING_SIZE +
3927 TG3_RX_JUMBO_RING_SIZE)) +
3928 (sizeof(struct tx_ring_info) *
3929 TG3_TX_RING_SIZE));
3930
3931 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3932 tp->tx_buffers = (struct tx_ring_info *)
3933 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3934
3935 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3936 &tp->rx_std_mapping);
3937 if (!tp->rx_std)
3938 goto err_out;
3939
3940 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3941 &tp->rx_jumbo_mapping);
3942
3943 if (!tp->rx_jumbo)
3944 goto err_out;
3945
3946 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3947 &tp->rx_rcb_mapping);
3948 if (!tp->rx_rcb)
3949 goto err_out;
3950
3951 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3952 &tp->tx_desc_mapping);
3953 if (!tp->tx_ring)
3954 goto err_out;
3955
3956 tp->hw_status = pci_alloc_consistent(tp->pdev,
3957 TG3_HW_STATUS_SIZE,
3958 &tp->status_mapping);
3959 if (!tp->hw_status)
3960 goto err_out;
3961
3962 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3963 sizeof(struct tg3_hw_stats),
3964 &tp->stats_mapping);
3965 if (!tp->hw_stats)
3966 goto err_out;
3967
3968 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3969 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3970
3971 return 0;
3972
3973err_out:
3974 tg3_free_consistent(tp);
3975 return -ENOMEM;
3976}
3977
3978#define MAX_WAIT_CNT 1000
3979
3980/* To stop a block, clear the enable bit and poll till it
3981 * clears. tp->lock is held.
3982 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07003983static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984{
3985 unsigned int i;
3986 u32 val;
3987
3988 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3989 switch (ofs) {
3990 case RCVLSC_MODE:
3991 case DMAC_MODE:
3992 case MBFREE_MODE:
3993 case BUFMGR_MODE:
3994 case MEMARB_MODE:
3995 /* We can't enable/disable these bits of the
3996 * 5705/5750, just say success.
3997 */
3998 return 0;
3999
4000 default:
4001 break;
4002 };
4003 }
4004
4005 val = tr32(ofs);
4006 val &= ~enable_bit;
4007 tw32_f(ofs, val);
4008
4009 for (i = 0; i < MAX_WAIT_CNT; i++) {
4010 udelay(100);
4011 val = tr32(ofs);
4012 if ((val & enable_bit) == 0)
4013 break;
4014 }
4015
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004016 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4018 "ofs=%lx enable_bit=%x\n",
4019 ofs, enable_bit);
4020 return -ENODEV;
4021 }
4022
4023 return 0;
4024}
4025
4026/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004027static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028{
4029 int i, err;
4030
4031 tg3_disable_ints(tp);
4032
4033 tp->rx_mode &= ~RX_MODE_ENABLE;
4034 tw32_f(MAC_RX_MODE, tp->rx_mode);
4035 udelay(10);
4036
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004037 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4038 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4039 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4040 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4041 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4042 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004044 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4045 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4046 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4047 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4048 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4049 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4050 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051
4052 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4053 tw32_f(MAC_MODE, tp->mac_mode);
4054 udelay(40);
4055
4056 tp->tx_mode &= ~TX_MODE_ENABLE;
4057 tw32_f(MAC_TX_MODE, tp->tx_mode);
4058
4059 for (i = 0; i < MAX_WAIT_CNT; i++) {
4060 udelay(100);
4061 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4062 break;
4063 }
4064 if (i >= MAX_WAIT_CNT) {
4065 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4066 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4067 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07004068 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 }
4070
Michael Chane6de8ad2005-05-05 14:42:41 -07004071 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004072 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4073 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
4075 tw32(FTQ_RESET, 0xffffffff);
4076 tw32(FTQ_RESET, 0x00000000);
4077
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004078 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4079 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080
4081 if (tp->hw_status)
4082 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4083 if (tp->hw_stats)
4084 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4085
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086 return err;
4087}
4088
4089/* tp->lock is held. */
4090static int tg3_nvram_lock(struct tg3 *tp)
4091{
4092 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4093 int i;
4094
4095 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4096 for (i = 0; i < 8000; i++) {
4097 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4098 break;
4099 udelay(20);
4100 }
4101 if (i == 8000)
4102 return -ENODEV;
4103 }
4104 return 0;
4105}
4106
4107/* tp->lock is held. */
4108static void tg3_nvram_unlock(struct tg3 *tp)
4109{
4110 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4111 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4112}
4113
4114/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07004115static void tg3_enable_nvram_access(struct tg3 *tp)
4116{
4117 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4118 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4119 u32 nvaccess = tr32(NVRAM_ACCESS);
4120
4121 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4122 }
4123}
4124
4125/* tp->lock is held. */
4126static void tg3_disable_nvram_access(struct tg3 *tp)
4127{
4128 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4129 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4130 u32 nvaccess = tr32(NVRAM_ACCESS);
4131
4132 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4133 }
4134}
4135
4136/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4138{
4139 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4140 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4141 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4142
4143 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4144 switch (kind) {
4145 case RESET_KIND_INIT:
4146 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4147 DRV_STATE_START);
4148 break;
4149
4150 case RESET_KIND_SHUTDOWN:
4151 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4152 DRV_STATE_UNLOAD);
4153 break;
4154
4155 case RESET_KIND_SUSPEND:
4156 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4157 DRV_STATE_SUSPEND);
4158 break;
4159
4160 default:
4161 break;
4162 };
4163 }
4164}
4165
4166/* tp->lock is held. */
4167static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4168{
4169 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4170 switch (kind) {
4171 case RESET_KIND_INIT:
4172 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4173 DRV_STATE_START_DONE);
4174 break;
4175
4176 case RESET_KIND_SHUTDOWN:
4177 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4178 DRV_STATE_UNLOAD_DONE);
4179 break;
4180
4181 default:
4182 break;
4183 };
4184 }
4185}
4186
4187/* tp->lock is held. */
4188static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4189{
4190 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4191 switch (kind) {
4192 case RESET_KIND_INIT:
4193 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4194 DRV_STATE_START);
4195 break;
4196
4197 case RESET_KIND_SHUTDOWN:
4198 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4199 DRV_STATE_UNLOAD);
4200 break;
4201
4202 case RESET_KIND_SUSPEND:
4203 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4204 DRV_STATE_SUSPEND);
4205 break;
4206
4207 default:
4208 break;
4209 };
4210 }
4211}
4212
4213static void tg3_stop_fw(struct tg3 *);
4214
4215/* tp->lock is held. */
4216static int tg3_chip_reset(struct tg3 *tp)
4217{
4218 u32 val;
4219 u32 flags_save;
4220 int i;
4221
4222 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4223 tg3_nvram_lock(tp);
4224
4225 /*
4226 * We must avoid the readl() that normally takes place.
4227 * It locks machines, causes machine checks, and other
4228 * fun things. So, temporarily disable the 5701
4229 * hardware workaround, while we do the reset.
4230 */
4231 flags_save = tp->tg3_flags;
4232 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
4233
4234 /* do the reset */
4235 val = GRC_MISC_CFG_CORECLK_RESET;
4236
4237 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4238 if (tr32(0x7e2c) == 0x60) {
4239 tw32(0x7e2c, 0x20);
4240 }
4241 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4242 tw32(GRC_MISC_CFG, (1 << 29));
4243 val |= (1 << 29);
4244 }
4245 }
4246
4247 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4248 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4249 tw32(GRC_MISC_CFG, val);
4250
4251 /* restore 5701 hardware bug workaround flag */
4252 tp->tg3_flags = flags_save;
4253
4254 /* Unfortunately, we have to delay before the PCI read back.
4255 * Some 575X chips even will not respond to a PCI cfg access
4256 * when the reset command is given to the chip.
4257 *
4258 * How do these hardware designers expect things to work
4259 * properly if the PCI write is posted for a long period
4260 * of time? It is always necessary to have some method by
4261 * which a register read back can occur to push the write
4262 * out which does the reset.
4263 *
4264 * For most tg3 variants the trick below was working.
4265 * Ho hum...
4266 */
4267 udelay(120);
4268
4269 /* Flush PCI posted writes. The normal MMIO registers
4270 * are inaccessible at this time so this is the only
4271 * way to make this reliably (actually, this is no longer
4272 * the case, see above). I tried to use indirect
4273 * register read/write but this upset some 5701 variants.
4274 */
4275 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4276
4277 udelay(120);
4278
4279 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4280 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4281 int i;
4282 u32 cfg_val;
4283
4284 /* Wait for link training to complete. */
4285 for (i = 0; i < 5000; i++)
4286 udelay(100);
4287
4288 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4289 pci_write_config_dword(tp->pdev, 0xc4,
4290 cfg_val | (1 << 15));
4291 }
4292 /* Set PCIE max payload size and clear error status. */
4293 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4294 }
4295
4296 /* Re-enable indirect register accesses. */
4297 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4298 tp->misc_host_ctrl);
4299
4300 /* Set MAX PCI retry to zero. */
4301 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4302 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4303 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4304 val |= PCISTATE_RETRY_SAME_DMA;
4305 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4306
4307 pci_restore_state(tp->pdev);
4308
4309 /* Make sure PCI-X relaxed ordering bit is clear. */
4310 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4311 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4312 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4313
Michael Chan4cf78e42005-07-25 12:29:19 -07004314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4315 u32 val;
4316
4317 /* Chip reset on 5780 will reset MSI enable bit,
4318 * so need to restore it.
4319 */
4320 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4321 u16 ctrl;
4322
4323 pci_read_config_word(tp->pdev,
4324 tp->msi_cap + PCI_MSI_FLAGS,
4325 &ctrl);
4326 pci_write_config_word(tp->pdev,
4327 tp->msi_cap + PCI_MSI_FLAGS,
4328 ctrl | PCI_MSI_FLAGS_ENABLE);
4329 val = tr32(MSGINT_MODE);
4330 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4331 }
4332
4333 val = tr32(MEMARB_MODE);
4334 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4335
4336 } else
4337 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338
4339 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4340 tg3_stop_fw(tp);
4341 tw32(0x5000, 0x400);
4342 }
4343
4344 tw32(GRC_MODE, tp->grc_mode);
4345
4346 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4347 u32 val = tr32(0xc4);
4348
4349 tw32(0xc4, val | (1 << 15));
4350 }
4351
4352 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4353 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4354 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4355 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4356 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4357 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4358 }
4359
4360 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4361 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4362 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07004363 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4364 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4365 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 } else
4367 tw32_f(MAC_MODE, 0);
4368 udelay(40);
4369
4370 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4371 /* Wait for firmware initialization to complete. */
4372 for (i = 0; i < 100000; i++) {
4373 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4374 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4375 break;
4376 udelay(10);
4377 }
4378 if (i >= 100000) {
4379 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4380 "firmware will not restart magic=%08x\n",
4381 tp->dev->name, val);
4382 return -ENODEV;
4383 }
4384 }
4385
4386 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4387 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4388 u32 val = tr32(0x7c00);
4389
4390 tw32(0x7c00, val | (1 << 25));
4391 }
4392
4393 /* Reprobe ASF enable state. */
4394 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4395 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4396 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4397 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4398 u32 nic_cfg;
4399
4400 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4401 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4402 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07004403 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4405 }
4406 }
4407
4408 return 0;
4409}
4410
4411/* tp->lock is held. */
4412static void tg3_stop_fw(struct tg3 *tp)
4413{
4414 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4415 u32 val;
4416 int i;
4417
4418 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4419 val = tr32(GRC_RX_CPU_EVENT);
4420 val |= (1 << 14);
4421 tw32(GRC_RX_CPU_EVENT, val);
4422
4423 /* Wait for RX cpu to ACK the event. */
4424 for (i = 0; i < 100; i++) {
4425 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4426 break;
4427 udelay(1);
4428 }
4429 }
4430}
4431
4432/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07004433static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
4435 int err;
4436
4437 tg3_stop_fw(tp);
4438
Michael Chan944d9802005-05-29 14:57:48 -07004439 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004441 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 err = tg3_chip_reset(tp);
4443
Michael Chan944d9802005-05-29 14:57:48 -07004444 tg3_write_sig_legacy(tp, kind);
4445 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446
4447 if (err)
4448 return err;
4449
4450 return 0;
4451}
4452
4453#define TG3_FW_RELEASE_MAJOR 0x0
4454#define TG3_FW_RELASE_MINOR 0x0
4455#define TG3_FW_RELEASE_FIX 0x0
4456#define TG3_FW_START_ADDR 0x08000000
4457#define TG3_FW_TEXT_ADDR 0x08000000
4458#define TG3_FW_TEXT_LEN 0x9c0
4459#define TG3_FW_RODATA_ADDR 0x080009c0
4460#define TG3_FW_RODATA_LEN 0x60
4461#define TG3_FW_DATA_ADDR 0x08000a40
4462#define TG3_FW_DATA_LEN 0x20
4463#define TG3_FW_SBSS_ADDR 0x08000a60
4464#define TG3_FW_SBSS_LEN 0xc
4465#define TG3_FW_BSS_ADDR 0x08000a70
4466#define TG3_FW_BSS_LEN 0x10
4467
4468static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4469 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4470 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4471 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4472 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4473 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4474 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4475 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4476 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4477 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4478 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4479 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4480 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4481 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4482 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4483 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4484 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4485 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4486 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4487 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4488 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4489 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4490 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4491 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4492 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4493 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4494 0, 0, 0, 0, 0, 0,
4495 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4496 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4497 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4498 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4499 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4500 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4501 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4502 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4503 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4504 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4505 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4506 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4507 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4508 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4509 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4510 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4511 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4512 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4513 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4514 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4515 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4516 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4517 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4518 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4519 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4520 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4521 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4522 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4523 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4524 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4525 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4526 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4527 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4528 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4529 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4530 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4531 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4532 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4533 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4534 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4535 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4536 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4537 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4538 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4539 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4540 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4541 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4542 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4543 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4544 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4545 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4546 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4547 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4548 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4549 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4550 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4551 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4552 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4553 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4554 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4555 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4556 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4557 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4558 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4559 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4560};
4561
4562static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4563 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4564 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4565 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4566 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4567 0x00000000
4568};
4569
4570#if 0 /* All zeros, don't eat up space with it. */
4571u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4572 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4573 0x00000000, 0x00000000, 0x00000000, 0x00000000
4574};
4575#endif
4576
4577#define RX_CPU_SCRATCH_BASE 0x30000
4578#define RX_CPU_SCRATCH_SIZE 0x04000
4579#define TX_CPU_SCRATCH_BASE 0x34000
4580#define TX_CPU_SCRATCH_SIZE 0x04000
4581
4582/* tp->lock is held. */
4583static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4584{
4585 int i;
4586
4587 if (offset == TX_CPU_BASE &&
4588 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4589 BUG();
4590
4591 if (offset == RX_CPU_BASE) {
4592 for (i = 0; i < 10000; i++) {
4593 tw32(offset + CPU_STATE, 0xffffffff);
4594 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4595 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4596 break;
4597 }
4598
4599 tw32(offset + CPU_STATE, 0xffffffff);
4600 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4601 udelay(10);
4602 } else {
4603 for (i = 0; i < 10000; i++) {
4604 tw32(offset + CPU_STATE, 0xffffffff);
4605 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4606 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4607 break;
4608 }
4609 }
4610
4611 if (i >= 10000) {
4612 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4613 "and %s CPU\n",
4614 tp->dev->name,
4615 (offset == RX_CPU_BASE ? "RX" : "TX"));
4616 return -ENODEV;
4617 }
4618 return 0;
4619}
4620
4621struct fw_info {
4622 unsigned int text_base;
4623 unsigned int text_len;
4624 u32 *text_data;
4625 unsigned int rodata_base;
4626 unsigned int rodata_len;
4627 u32 *rodata_data;
4628 unsigned int data_base;
4629 unsigned int data_len;
4630 u32 *data_data;
4631};
4632
4633/* tp->lock is held. */
4634static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4635 int cpu_scratch_size, struct fw_info *info)
4636{
4637 int err, i;
4638 u32 orig_tg3_flags = tp->tg3_flags;
4639 void (*write_op)(struct tg3 *, u32, u32);
4640
4641 if (cpu_base == TX_CPU_BASE &&
4642 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4643 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4644 "TX cpu firmware on %s which is 5705.\n",
4645 tp->dev->name);
4646 return -EINVAL;
4647 }
4648
4649 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4650 write_op = tg3_write_mem;
4651 else
4652 write_op = tg3_write_indirect_reg32;
4653
4654 /* Force use of PCI config space for indirect register
4655 * write calls.
4656 */
4657 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4658
Michael Chan1b628152005-05-29 14:59:49 -07004659 /* It is possible that bootcode is still loading at this point.
4660 * Get the nvram lock first before halting the cpu.
4661 */
4662 tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 err = tg3_halt_cpu(tp, cpu_base);
Michael Chan1b628152005-05-29 14:59:49 -07004664 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 if (err)
4666 goto out;
4667
4668 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4669 write_op(tp, cpu_scratch_base + i, 0);
4670 tw32(cpu_base + CPU_STATE, 0xffffffff);
4671 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4672 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4673 write_op(tp, (cpu_scratch_base +
4674 (info->text_base & 0xffff) +
4675 (i * sizeof(u32))),
4676 (info->text_data ?
4677 info->text_data[i] : 0));
4678 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4679 write_op(tp, (cpu_scratch_base +
4680 (info->rodata_base & 0xffff) +
4681 (i * sizeof(u32))),
4682 (info->rodata_data ?
4683 info->rodata_data[i] : 0));
4684 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4685 write_op(tp, (cpu_scratch_base +
4686 (info->data_base & 0xffff) +
4687 (i * sizeof(u32))),
4688 (info->data_data ?
4689 info->data_data[i] : 0));
4690
4691 err = 0;
4692
4693out:
4694 tp->tg3_flags = orig_tg3_flags;
4695 return err;
4696}
4697
4698/* tp->lock is held. */
4699static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4700{
4701 struct fw_info info;
4702 int err, i;
4703
4704 info.text_base = TG3_FW_TEXT_ADDR;
4705 info.text_len = TG3_FW_TEXT_LEN;
4706 info.text_data = &tg3FwText[0];
4707 info.rodata_base = TG3_FW_RODATA_ADDR;
4708 info.rodata_len = TG3_FW_RODATA_LEN;
4709 info.rodata_data = &tg3FwRodata[0];
4710 info.data_base = TG3_FW_DATA_ADDR;
4711 info.data_len = TG3_FW_DATA_LEN;
4712 info.data_data = NULL;
4713
4714 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4715 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4716 &info);
4717 if (err)
4718 return err;
4719
4720 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4721 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4722 &info);
4723 if (err)
4724 return err;
4725
4726 /* Now startup only the RX cpu. */
4727 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4728 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4729
4730 for (i = 0; i < 5; i++) {
4731 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4732 break;
4733 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4734 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4735 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4736 udelay(1000);
4737 }
4738 if (i >= 5) {
4739 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4740 "to set RX CPU PC, is %08x should be %08x\n",
4741 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4742 TG3_FW_TEXT_ADDR);
4743 return -ENODEV;
4744 }
4745 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4746 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4747
4748 return 0;
4749}
4750
4751#if TG3_TSO_SUPPORT != 0
4752
4753#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4754#define TG3_TSO_FW_RELASE_MINOR 0x6
4755#define TG3_TSO_FW_RELEASE_FIX 0x0
4756#define TG3_TSO_FW_START_ADDR 0x08000000
4757#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4758#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4759#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4760#define TG3_TSO_FW_RODATA_LEN 0x60
4761#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4762#define TG3_TSO_FW_DATA_LEN 0x30
4763#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4764#define TG3_TSO_FW_SBSS_LEN 0x2c
4765#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4766#define TG3_TSO_FW_BSS_LEN 0x894
4767
4768static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4769 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4770 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4771 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4772 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4773 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4774 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4775 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4776 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4777 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4778 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4779 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4780 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4781 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4782 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4783 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4784 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4785 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4786 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4787 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4788 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4789 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4790 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4791 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4792 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4793 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4794 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4795 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4796 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4797 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4798 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4799 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4800 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4801 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4802 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4803 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4804 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4805 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4806 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4807 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4808 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4809 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4810 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4811 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4812 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4813 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4814 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4815 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4816 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4817 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4818 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4819 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4820 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4821 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4822 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4823 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4824 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4825 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4826 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4827 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4828 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4829 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4830 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4831 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4832 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4833 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4834 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4835 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4836 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4837 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4838 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4839 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4840 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4841 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4842 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4843 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4844 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4845 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4846 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4847 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4848 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4849 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4850 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4851 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4852 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4853 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4854 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4855 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4856 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4857 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4858 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4859 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4860 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4861 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4862 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4863 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4864 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4865 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4866 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4867 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4868 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4869 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4870 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4871 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4872 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4873 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4874 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4875 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4876 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4877 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4878 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4879 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4880 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4881 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4882 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4883 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4884 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4885 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4886 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4887 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4888 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4889 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4890 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4891 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4892 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4893 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4894 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4895 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4896 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4897 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4898 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4899 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4900 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4901 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4902 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4903 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4904 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4905 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4906 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4907 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4908 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4909 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4910 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4911 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4912 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4913 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4914 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4915 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4916 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4917 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4918 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4919 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4920 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4921 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4922 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4923 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4924 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4925 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4926 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4927 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4928 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4929 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4930 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4931 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4932 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4933 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4934 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4935 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4936 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4937 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4938 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4939 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4940 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4941 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4942 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4943 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4944 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4945 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4946 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4947 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4948 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4949 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4950 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4951 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4952 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4953 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4954 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4955 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4956 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4957 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4958 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4959 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4960 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4961 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4962 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4963 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4964 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4965 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4966 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4967 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4968 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4969 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4970 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4971 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4972 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4973 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4974 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4975 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4976 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4977 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4978 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4979 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4980 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4981 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4982 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4983 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4984 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4985 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4986 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4987 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4988 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4989 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4990 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4991 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4992 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4993 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4994 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4995 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4996 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4997 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4998 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4999 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5000 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5001 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5002 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5003 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5004 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5005 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5006 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5007 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5008 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5009 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5010 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5011 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5012 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5013 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5014 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5015 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5016 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5017 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5018 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5019 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5020 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5021 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5022 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5023 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5024 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5025 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5026 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5027 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5028 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5029 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5030 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5031 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5032 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5033 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5034 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5035 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5036 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5037 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5038 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5039 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5040 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5041 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5042 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5043 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5044 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5045 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5046 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5047 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5048 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5049 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5050 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5051 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5052 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5053};
5054
5055static u32 tg3TsoFwRodata[] = {
5056 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5057 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5058 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5059 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5060 0x00000000,
5061};
5062
5063static u32 tg3TsoFwData[] = {
5064 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5065 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5066 0x00000000,
5067};
5068
5069/* 5705 needs a special version of the TSO firmware. */
5070#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5071#define TG3_TSO5_FW_RELASE_MINOR 0x2
5072#define TG3_TSO5_FW_RELEASE_FIX 0x0
5073#define TG3_TSO5_FW_START_ADDR 0x00010000
5074#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5075#define TG3_TSO5_FW_TEXT_LEN 0xe90
5076#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5077#define TG3_TSO5_FW_RODATA_LEN 0x50
5078#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5079#define TG3_TSO5_FW_DATA_LEN 0x20
5080#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5081#define TG3_TSO5_FW_SBSS_LEN 0x28
5082#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5083#define TG3_TSO5_FW_BSS_LEN 0x88
5084
5085static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5086 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5087 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5088 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5089 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5090 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5091 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5092 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5093 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5094 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5095 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5096 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5097 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5098 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5099 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5100 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5101 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5102 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5103 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5104 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5105 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5106 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5107 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5108 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5109 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5110 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5111 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5112 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5113 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5114 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5115 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5116 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5117 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5118 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5119 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5120 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5121 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5122 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5123 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5124 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5125 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5126 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5127 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5128 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5129 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5130 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5131 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5132 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5133 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5134 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5135 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5136 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5137 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5138 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5139 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5140 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5141 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5142 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5143 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5144 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5145 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5146 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5147 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5148 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5149 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5150 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5151 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5152 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5153 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5154 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5155 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5156 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5157 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5158 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5159 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5160 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5161 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5162 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5163 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5164 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5165 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5166 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5167 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5168 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5169 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5170 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5171 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5172 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5173 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5174 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5175 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5176 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5177 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5178 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5179 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5180 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5181 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5182 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5183 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5184 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5185 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5186 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5187 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5188 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5189 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5190 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5191 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5192 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5193 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5194 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5195 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5196 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5197 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5198 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5199 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5200 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5201 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5202 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5203 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5204 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5205 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5206 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5207 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5208 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5209 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5210 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5211 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5212 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5213 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5214 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5215 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5216 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5217 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5218 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5219 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5220 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5221 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5222 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5223 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5224 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5225 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5226 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5227 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5228 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5229 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5230 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5231 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5232 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5233 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5234 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5235 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5236 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5237 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5238 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5239 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5240 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5241 0x00000000, 0x00000000, 0x00000000,
5242};
5243
5244static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5245 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5246 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5247 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5248 0x00000000, 0x00000000, 0x00000000,
5249};
5250
5251static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5252 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5253 0x00000000, 0x00000000, 0x00000000,
5254};
5255
5256/* tp->lock is held. */
5257static int tg3_load_tso_firmware(struct tg3 *tp)
5258{
5259 struct fw_info info;
5260 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5261 int err, i;
5262
5263 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5264 return 0;
5265
5266 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5267 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5268 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5269 info.text_data = &tg3Tso5FwText[0];
5270 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5271 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5272 info.rodata_data = &tg3Tso5FwRodata[0];
5273 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5274 info.data_len = TG3_TSO5_FW_DATA_LEN;
5275 info.data_data = &tg3Tso5FwData[0];
5276 cpu_base = RX_CPU_BASE;
5277 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5278 cpu_scratch_size = (info.text_len +
5279 info.rodata_len +
5280 info.data_len +
5281 TG3_TSO5_FW_SBSS_LEN +
5282 TG3_TSO5_FW_BSS_LEN);
5283 } else {
5284 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5285 info.text_len = TG3_TSO_FW_TEXT_LEN;
5286 info.text_data = &tg3TsoFwText[0];
5287 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5288 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5289 info.rodata_data = &tg3TsoFwRodata[0];
5290 info.data_base = TG3_TSO_FW_DATA_ADDR;
5291 info.data_len = TG3_TSO_FW_DATA_LEN;
5292 info.data_data = &tg3TsoFwData[0];
5293 cpu_base = TX_CPU_BASE;
5294 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5295 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5296 }
5297
5298 err = tg3_load_firmware_cpu(tp, cpu_base,
5299 cpu_scratch_base, cpu_scratch_size,
5300 &info);
5301 if (err)
5302 return err;
5303
5304 /* Now startup the cpu. */
5305 tw32(cpu_base + CPU_STATE, 0xffffffff);
5306 tw32_f(cpu_base + CPU_PC, info.text_base);
5307
5308 for (i = 0; i < 5; i++) {
5309 if (tr32(cpu_base + CPU_PC) == info.text_base)
5310 break;
5311 tw32(cpu_base + CPU_STATE, 0xffffffff);
5312 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5313 tw32_f(cpu_base + CPU_PC, info.text_base);
5314 udelay(1000);
5315 }
5316 if (i >= 5) {
5317 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5318 "to set CPU PC, is %08x should be %08x\n",
5319 tp->dev->name, tr32(cpu_base + CPU_PC),
5320 info.text_base);
5321 return -ENODEV;
5322 }
5323 tw32(cpu_base + CPU_STATE, 0xffffffff);
5324 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5325 return 0;
5326}
5327
5328#endif /* TG3_TSO_SUPPORT != 0 */
5329
5330/* tp->lock is held. */
5331static void __tg3_set_mac_addr(struct tg3 *tp)
5332{
5333 u32 addr_high, addr_low;
5334 int i;
5335
5336 addr_high = ((tp->dev->dev_addr[0] << 8) |
5337 tp->dev->dev_addr[1]);
5338 addr_low = ((tp->dev->dev_addr[2] << 24) |
5339 (tp->dev->dev_addr[3] << 16) |
5340 (tp->dev->dev_addr[4] << 8) |
5341 (tp->dev->dev_addr[5] << 0));
5342 for (i = 0; i < 4; i++) {
5343 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5344 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5345 }
5346
5347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5349 for (i = 0; i < 12; i++) {
5350 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5351 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5352 }
5353 }
5354
5355 addr_high = (tp->dev->dev_addr[0] +
5356 tp->dev->dev_addr[1] +
5357 tp->dev->dev_addr[2] +
5358 tp->dev->dev_addr[3] +
5359 tp->dev->dev_addr[4] +
5360 tp->dev->dev_addr[5]) &
5361 TX_BACKOFF_SEED_MASK;
5362 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5363}
5364
5365static int tg3_set_mac_addr(struct net_device *dev, void *p)
5366{
5367 struct tg3 *tp = netdev_priv(dev);
5368 struct sockaddr *addr = p;
5369
5370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5371
David S. Millerf47c11e2005-06-24 20:18:35 -07005372 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373 __tg3_set_mac_addr(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005374 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375
5376 return 0;
5377}
5378
5379/* tp->lock is held. */
5380static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5381 dma_addr_t mapping, u32 maxlen_flags,
5382 u32 nic_addr)
5383{
5384 tg3_write_mem(tp,
5385 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5386 ((u64) mapping >> 32));
5387 tg3_write_mem(tp,
5388 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5389 ((u64) mapping & 0xffffffff));
5390 tg3_write_mem(tp,
5391 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5392 maxlen_flags);
5393
5394 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5395 tg3_write_mem(tp,
5396 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5397 nic_addr);
5398}
5399
5400static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07005401static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07005402{
5403 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5404 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5405 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5406 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5407 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5408 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5409 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5410 }
5411 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5412 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5413 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5414 u32 val = ec->stats_block_coalesce_usecs;
5415
5416 if (!netif_carrier_ok(tp->dev))
5417 val = 0;
5418
5419 tw32(HOSTCC_STAT_COAL_TICKS, val);
5420 }
5421}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422
5423/* tp->lock is held. */
5424static int tg3_reset_hw(struct tg3 *tp)
5425{
5426 u32 val, rdmac_mode;
5427 int i, err, limit;
5428
5429 tg3_disable_ints(tp);
5430
5431 tg3_stop_fw(tp);
5432
5433 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5434
5435 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07005436 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437 }
5438
5439 err = tg3_chip_reset(tp);
5440 if (err)
5441 return err;
5442
5443 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5444
5445 /* This works around an issue with Athlon chipsets on
5446 * B3 tigon3 silicon. This bit has no effect on any
5447 * other revision. But do not set this on PCI Express
5448 * chips.
5449 */
5450 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5451 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5452 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5453
5454 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5455 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5456 val = tr32(TG3PCI_PCISTATE);
5457 val |= PCISTATE_RETRY_SAME_DMA;
5458 tw32(TG3PCI_PCISTATE, val);
5459 }
5460
5461 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5462 /* Enable some hw fixes. */
5463 val = tr32(TG3PCI_MSI_DATA);
5464 val |= (1 << 26) | (1 << 28) | (1 << 29);
5465 tw32(TG3PCI_MSI_DATA, val);
5466 }
5467
5468 /* Descriptor ring init may make accesses to the
5469 * NIC SRAM area to setup the TX descriptors, so we
5470 * can only do this after the hardware has been
5471 * successfully reset.
5472 */
5473 tg3_init_rings(tp);
5474
5475 /* This value is determined during the probe time DMA
5476 * engine test, tg3_test_dma.
5477 */
5478 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5479
5480 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5481 GRC_MODE_4X_NIC_SEND_RINGS |
5482 GRC_MODE_NO_TX_PHDR_CSUM |
5483 GRC_MODE_NO_RX_PHDR_CSUM);
5484 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5485 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5486 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5487 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5488 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5489
5490 tw32(GRC_MODE,
5491 tp->grc_mode |
5492 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5493
5494 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5495 val = tr32(GRC_MISC_CFG);
5496 val &= ~0xff;
5497 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5498 tw32(GRC_MISC_CFG, val);
5499
5500 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07005501 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502 /* Do nothing. */
5503 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5504 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5506 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5507 else
5508 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5509 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5510 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5511 }
5512#if TG3_TSO_SUPPORT != 0
5513 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5514 int fw_len;
5515
5516 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5517 TG3_TSO5_FW_RODATA_LEN +
5518 TG3_TSO5_FW_DATA_LEN +
5519 TG3_TSO5_FW_SBSS_LEN +
5520 TG3_TSO5_FW_BSS_LEN);
5521 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5522 tw32(BUFMGR_MB_POOL_ADDR,
5523 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5524 tw32(BUFMGR_MB_POOL_SIZE,
5525 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5526 }
5527#endif
5528
Michael Chan0f893dc2005-07-25 12:30:38 -07005529 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005530 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5531 tp->bufmgr_config.mbuf_read_dma_low_water);
5532 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5533 tp->bufmgr_config.mbuf_mac_rx_low_water);
5534 tw32(BUFMGR_MB_HIGH_WATER,
5535 tp->bufmgr_config.mbuf_high_water);
5536 } else {
5537 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5538 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5539 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5540 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5541 tw32(BUFMGR_MB_HIGH_WATER,
5542 tp->bufmgr_config.mbuf_high_water_jumbo);
5543 }
5544 tw32(BUFMGR_DMA_LOW_WATER,
5545 tp->bufmgr_config.dma_low_water);
5546 tw32(BUFMGR_DMA_HIGH_WATER,
5547 tp->bufmgr_config.dma_high_water);
5548
5549 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5550 for (i = 0; i < 2000; i++) {
5551 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5552 break;
5553 udelay(10);
5554 }
5555 if (i >= 2000) {
5556 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5557 tp->dev->name);
5558 return -ENODEV;
5559 }
5560
5561 /* Setup replenish threshold. */
5562 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5563
5564 /* Initialize TG3_BDINFO's at:
5565 * RCVDBDI_STD_BD: standard eth size rx ring
5566 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5567 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5568 *
5569 * like so:
5570 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5571 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5572 * ring attribute flags
5573 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5574 *
5575 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5576 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5577 *
5578 * The size of each ring is fixed in the firmware, but the location is
5579 * configurable.
5580 */
5581 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5582 ((u64) tp->rx_std_mapping >> 32));
5583 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5584 ((u64) tp->rx_std_mapping & 0xffffffff));
5585 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5586 NIC_SRAM_RX_BUFFER_DESC);
5587
5588 /* Don't even try to program the JUMBO/MINI buffer descriptor
5589 * configs on 5705.
5590 */
5591 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5592 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5593 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5594 } else {
5595 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5596 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5597
5598 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5599 BDINFO_FLAGS_DISABLED);
5600
5601 /* Setup replenish threshold. */
5602 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5603
Michael Chan0f893dc2005-07-25 12:30:38 -07005604 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005605 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5606 ((u64) tp->rx_jumbo_mapping >> 32));
5607 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5608 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5609 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5610 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5611 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5612 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5613 } else {
5614 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5615 BDINFO_FLAGS_DISABLED);
5616 }
5617
5618 }
5619
5620 /* There is only one send ring on 5705/5750, no need to explicitly
5621 * disable the others.
5622 */
5623 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5624 /* Clear out send RCB ring in SRAM. */
5625 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5626 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5627 BDINFO_FLAGS_DISABLED);
5628 }
5629
5630 tp->tx_prod = 0;
5631 tp->tx_cons = 0;
5632 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5633 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5634
5635 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5636 tp->tx_desc_mapping,
5637 (TG3_TX_RING_SIZE <<
5638 BDINFO_FLAGS_MAXLEN_SHIFT),
5639 NIC_SRAM_TX_BUFFER_DESC);
5640
5641 /* There is only one receive return ring on 5705/5750, no need
5642 * to explicitly disable the others.
5643 */
5644 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5645 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5646 i += TG3_BDINFO_SIZE) {
5647 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5648 BDINFO_FLAGS_DISABLED);
5649 }
5650 }
5651
5652 tp->rx_rcb_ptr = 0;
5653 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5654
5655 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5656 tp->rx_rcb_mapping,
5657 (TG3_RX_RCB_RING_SIZE(tp) <<
5658 BDINFO_FLAGS_MAXLEN_SHIFT),
5659 0);
5660
5661 tp->rx_std_ptr = tp->rx_pending;
5662 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5663 tp->rx_std_ptr);
5664
Michael Chan0f893dc2005-07-25 12:30:38 -07005665 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666 tp->rx_jumbo_pending : 0;
5667 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5668 tp->rx_jumbo_ptr);
5669
5670 /* Initialize MAC address and backoff seed. */
5671 __tg3_set_mac_addr(tp);
5672
5673 /* MTU + ethernet header + FCS + optional VLAN tag */
5674 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5675
5676 /* The slot time is changed by tg3_setup_phy if we
5677 * run at gigabit with half duplex.
5678 */
5679 tw32(MAC_TX_LENGTHS,
5680 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5681 (6 << TX_LENGTHS_IPG_SHIFT) |
5682 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5683
5684 /* Receive rules. */
5685 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5686 tw32(RCVLPC_CONFIG, 0x0181);
5687
5688 /* Calculate RDMAC_MODE setting early, we need it to determine
5689 * the RCVLPC_STATE_ENABLE mask.
5690 */
5691 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5692 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5693 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5694 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5695 RDMAC_MODE_LNGREAD_ENAB);
5696 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5697 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
Michael Chan85e94ce2005-04-21 17:05:28 -07005698
5699 /* If statement applies to 5705 and 5750 PCI devices only */
5700 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5701 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5702 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5704 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5705 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5706 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5707 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5708 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5709 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5710 }
5711 }
5712
Michael Chan85e94ce2005-04-21 17:05:28 -07005713 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5714 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5715
Linus Torvalds1da177e2005-04-16 15:20:36 -07005716#if TG3_TSO_SUPPORT != 0
5717 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5718 rdmac_mode |= (1 << 27);
5719#endif
5720
5721 /* Receive/send statistics. */
5722 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5723 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5724 val = tr32(RCVLPC_STATS_ENABLE);
5725 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5726 tw32(RCVLPC_STATS_ENABLE, val);
5727 } else {
5728 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5729 }
5730 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5731 tw32(SNDDATAI_STATSENAB, 0xffffff);
5732 tw32(SNDDATAI_STATSCTRL,
5733 (SNDDATAI_SCTRL_ENABLE |
5734 SNDDATAI_SCTRL_FASTUPD));
5735
5736 /* Setup host coalescing engine. */
5737 tw32(HOSTCC_MODE, 0);
5738 for (i = 0; i < 2000; i++) {
5739 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5740 break;
5741 udelay(10);
5742 }
5743
Michael Chand244c892005-07-05 14:42:33 -07005744 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005745
5746 /* set status block DMA address */
5747 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5748 ((u64) tp->status_mapping >> 32));
5749 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5750 ((u64) tp->status_mapping & 0xffffffff));
5751
5752 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5753 /* Status/statistics block address. See tg3_timer,
5754 * the tg3_periodic_fetch_stats call there, and
5755 * tg3_get_stats to see how this works for 5705/5750 chips.
5756 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005757 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5758 ((u64) tp->stats_mapping >> 32));
5759 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5760 ((u64) tp->stats_mapping & 0xffffffff));
5761 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5762 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5763 }
5764
5765 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5766
5767 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5768 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5769 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5770 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5771
5772 /* Clear statistics/status block in chip, and status block in ram. */
5773 for (i = NIC_SRAM_STATS_BLK;
5774 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5775 i += sizeof(u32)) {
5776 tg3_write_mem(tp, i, 0);
5777 udelay(40);
5778 }
5779 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5780
5781 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5782 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5783 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5784 udelay(40);
5785
Michael Chan314fba32005-04-21 17:07:04 -07005786 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5787 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5788 * register to preserve the GPIO settings for LOMs. The GPIOs,
5789 * whether used as inputs or outputs, are set by boot code after
5790 * reset.
5791 */
5792 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5793 u32 gpio_mask;
5794
5795 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5796 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07005797
5798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5799 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5800 GRC_LCLCTRL_GPIO_OUTPUT3;
5801
Michael Chan314fba32005-04-21 17:07:04 -07005802 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5803
5804 /* GPIO1 must be driven high for eeprom write protect */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005805 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5806 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07005807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5809 udelay(100);
5810
5811 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07005812 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5813 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814
5815 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5816 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5817 udelay(40);
5818 }
5819
5820 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5821 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5822 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5823 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5824 WDMAC_MODE_LNGREAD_ENAB);
5825
Michael Chan85e94ce2005-04-21 17:05:28 -07005826 /* If statement applies to 5705 and 5750 PCI devices only */
5827 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5828 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5829 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5831 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5832 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5833 /* nothing */
5834 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5835 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5836 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5837 val |= WDMAC_MODE_RX_ACCEL;
5838 }
5839 }
5840
5841 tw32_f(WDMAC_MODE, val);
5842 udelay(40);
5843
5844 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5845 val = tr32(TG3PCI_X_CAPS);
5846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5847 val &= ~PCIX_CAPS_BURST_MASK;
5848 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5849 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5850 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5851 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5852 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5853 val |= (tp->split_mode_max_reqs <<
5854 PCIX_CAPS_SPLIT_SHIFT);
5855 }
5856 tw32(TG3PCI_X_CAPS, val);
5857 }
5858
5859 tw32_f(RDMAC_MODE, rdmac_mode);
5860 udelay(40);
5861
5862 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5863 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5864 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5865 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5866 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5867 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5868 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5869 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5870#if TG3_TSO_SUPPORT != 0
5871 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5872 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5873#endif
5874 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5875 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5876
5877 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5878 err = tg3_load_5701_a0_firmware_fix(tp);
5879 if (err)
5880 return err;
5881 }
5882
5883#if TG3_TSO_SUPPORT != 0
5884 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5885 err = tg3_load_tso_firmware(tp);
5886 if (err)
5887 return err;
5888 }
5889#endif
5890
5891 tp->tx_mode = TX_MODE_ENABLE;
5892 tw32_f(MAC_TX_MODE, tp->tx_mode);
5893 udelay(100);
5894
5895 tp->rx_mode = RX_MODE_ENABLE;
5896 tw32_f(MAC_RX_MODE, tp->rx_mode);
5897 udelay(10);
5898
5899 if (tp->link_config.phy_is_low_power) {
5900 tp->link_config.phy_is_low_power = 0;
5901 tp->link_config.speed = tp->link_config.orig_speed;
5902 tp->link_config.duplex = tp->link_config.orig_duplex;
5903 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5904 }
5905
5906 tp->mi_mode = MAC_MI_MODE_BASE;
5907 tw32_f(MAC_MI_MODE, tp->mi_mode);
5908 udelay(80);
5909
5910 tw32(MAC_LED_CTRL, tp->led_ctrl);
5911
5912 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5913 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5914 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5915 udelay(10);
5916 }
5917 tw32_f(MAC_RX_MODE, tp->rx_mode);
5918 udelay(10);
5919
5920 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5921 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5922 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5923 /* Set drive transmission level to 1.2V */
5924 /* only if the signal pre-emphasis bit is not set */
5925 val = tr32(MAC_SERDES_CFG);
5926 val &= 0xfffff000;
5927 val |= 0x880;
5928 tw32(MAC_SERDES_CFG, val);
5929 }
5930 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5931 tw32(MAC_SERDES_CFG, 0x616000);
5932 }
5933
5934 /* Prevent chip from dropping frames when flow control
5935 * is enabled.
5936 */
5937 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5938
5939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5940 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5941 /* Use hardware link auto-negotiation */
5942 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5943 }
5944
5945 err = tg3_setup_phy(tp, 1);
5946 if (err)
5947 return err;
5948
5949 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5950 u32 tmp;
5951
5952 /* Clear CRC stats. */
5953 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5954 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5955 tg3_readphy(tp, 0x14, &tmp);
5956 }
5957 }
5958
5959 __tg3_set_rx_mode(tp->dev);
5960
5961 /* Initialize receive rules. */
5962 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5963 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5964 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5965 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5966
Michael Chan4cf78e42005-07-25 12:29:19 -07005967 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5968 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005969 limit = 8;
5970 else
5971 limit = 16;
5972 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5973 limit -= 4;
5974 switch (limit) {
5975 case 16:
5976 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5977 case 15:
5978 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5979 case 14:
5980 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5981 case 13:
5982 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5983 case 12:
5984 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5985 case 11:
5986 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5987 case 10:
5988 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5989 case 9:
5990 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5991 case 8:
5992 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5993 case 7:
5994 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5995 case 6:
5996 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5997 case 5:
5998 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5999 case 4:
6000 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6001 case 3:
6002 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6003 case 2:
6004 case 1:
6005
6006 default:
6007 break;
6008 };
6009
6010 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6011
Linus Torvalds1da177e2005-04-16 15:20:36 -07006012 return 0;
6013}
6014
6015/* Called at device open time to get the chip ready for
6016 * packet processing. Invoked with tp->lock held.
6017 */
6018static int tg3_init_hw(struct tg3 *tp)
6019{
6020 int err;
6021
6022 /* Force the chip into D0. */
6023 err = tg3_set_power_state(tp, 0);
6024 if (err)
6025 goto out;
6026
6027 tg3_switch_clocks(tp);
6028
6029 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6030
6031 err = tg3_reset_hw(tp);
6032
6033out:
6034 return err;
6035}
6036
6037#define TG3_STAT_ADD32(PSTAT, REG) \
6038do { u32 __val = tr32(REG); \
6039 (PSTAT)->low += __val; \
6040 if ((PSTAT)->low < __val) \
6041 (PSTAT)->high += 1; \
6042} while (0)
6043
6044static void tg3_periodic_fetch_stats(struct tg3 *tp)
6045{
6046 struct tg3_hw_stats *sp = tp->hw_stats;
6047
6048 if (!netif_carrier_ok(tp->dev))
6049 return;
6050
6051 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6052 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6053 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6054 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6055 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6056 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6057 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6058 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6059 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6060 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6061 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6062 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6063 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6064
6065 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6066 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6067 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6068 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6069 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6070 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6071 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6072 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6073 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6074 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6075 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6076 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6077 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6078 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6079}
6080
6081static void tg3_timer(unsigned long __opaque)
6082{
6083 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006084
David S. Millerf47c11e2005-06-24 20:18:35 -07006085 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006086
David S. Millerfac9b832005-05-18 22:46:34 -07006087 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6088 /* All of this garbage is because when using non-tagged
6089 * IRQ status the mailbox/status_block protocol the chip
6090 * uses with the cpu is race prone.
6091 */
6092 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6093 tw32(GRC_LOCAL_CTRL,
6094 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6095 } else {
6096 tw32(HOSTCC_MODE, tp->coalesce_mode |
6097 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006099
David S. Millerfac9b832005-05-18 22:46:34 -07006100 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6101 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07006102 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07006103 schedule_work(&tp->reset_task);
6104 return;
6105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006106 }
6107
Linus Torvalds1da177e2005-04-16 15:20:36 -07006108 /* This part only runs once per second. */
6109 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07006110 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6111 tg3_periodic_fetch_stats(tp);
6112
Linus Torvalds1da177e2005-04-16 15:20:36 -07006113 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6114 u32 mac_stat;
6115 int phy_event;
6116
6117 mac_stat = tr32(MAC_STATUS);
6118
6119 phy_event = 0;
6120 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6121 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6122 phy_event = 1;
6123 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6124 phy_event = 1;
6125
6126 if (phy_event)
6127 tg3_setup_phy(tp, 0);
6128 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6129 u32 mac_stat = tr32(MAC_STATUS);
6130 int need_setup = 0;
6131
6132 if (netif_carrier_ok(tp->dev) &&
6133 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6134 need_setup = 1;
6135 }
6136 if (! netif_carrier_ok(tp->dev) &&
6137 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6138 MAC_STATUS_SIGNAL_DET))) {
6139 need_setup = 1;
6140 }
6141 if (need_setup) {
6142 tw32_f(MAC_MODE,
6143 (tp->mac_mode &
6144 ~MAC_MODE_PORT_MODE_MASK));
6145 udelay(40);
6146 tw32_f(MAC_MODE, tp->mac_mode);
6147 udelay(40);
6148 tg3_setup_phy(tp, 0);
6149 }
Michael Chan747e8f82005-07-25 12:33:22 -07006150 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6151 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006152
6153 tp->timer_counter = tp->timer_multiplier;
6154 }
6155
6156 /* Heartbeat is only sent once every 120 seconds. */
6157 if (!--tp->asf_counter) {
6158 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6159 u32 val;
6160
6161 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6162 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6163 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6164 val = tr32(GRC_RX_CPU_EVENT);
6165 val |= (1 << 14);
6166 tw32(GRC_RX_CPU_EVENT, val);
6167 }
6168 tp->asf_counter = tp->asf_multiplier;
6169 }
6170
David S. Millerf47c11e2005-06-24 20:18:35 -07006171 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006172
6173 tp->timer.expires = jiffies + tp->timer_offset;
6174 add_timer(&tp->timer);
6175}
6176
Michael Chan79381092005-04-21 17:13:59 -07006177static int tg3_test_interrupt(struct tg3 *tp)
6178{
6179 struct net_device *dev = tp->dev;
6180 int err, i;
6181 u32 int_mbox = 0;
6182
Michael Chand4bc3922005-05-29 14:59:20 -07006183 if (!netif_running(dev))
6184 return -ENODEV;
6185
Michael Chan79381092005-04-21 17:13:59 -07006186 tg3_disable_ints(tp);
6187
6188 free_irq(tp->pdev->irq, dev);
6189
6190 err = request_irq(tp->pdev->irq, tg3_test_isr,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006191 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07006192 if (err)
6193 return err;
6194
6195 tg3_enable_ints(tp);
6196
6197 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6198 HOSTCC_MODE_NOW);
6199
6200 for (i = 0; i < 5; i++) {
6201 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
6202 if (int_mbox != 0)
6203 break;
6204 msleep(10);
6205 }
6206
6207 tg3_disable_ints(tp);
6208
6209 free_irq(tp->pdev->irq, dev);
6210
6211 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6212 err = request_irq(tp->pdev->irq, tg3_msi,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006213 SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006214 else {
6215 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6216 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6217 fn = tg3_interrupt_tagged;
6218 err = request_irq(tp->pdev->irq, fn,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006219 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006220 }
Michael Chan79381092005-04-21 17:13:59 -07006221
6222 if (err)
6223 return err;
6224
6225 if (int_mbox != 0)
6226 return 0;
6227
6228 return -EIO;
6229}
6230
6231/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6232 * successfully restored
6233 */
6234static int tg3_test_msi(struct tg3 *tp)
6235{
6236 struct net_device *dev = tp->dev;
6237 int err;
6238 u16 pci_cmd;
6239
6240 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6241 return 0;
6242
6243 /* Turn off SERR reporting in case MSI terminates with Master
6244 * Abort.
6245 */
6246 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6247 pci_write_config_word(tp->pdev, PCI_COMMAND,
6248 pci_cmd & ~PCI_COMMAND_SERR);
6249
6250 err = tg3_test_interrupt(tp);
6251
6252 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6253
6254 if (!err)
6255 return 0;
6256
6257 /* other failures */
6258 if (err != -EIO)
6259 return err;
6260
6261 /* MSI test failed, go back to INTx mode */
6262 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6263 "switching to INTx mode. Please report this failure to "
6264 "the PCI maintainer and include system chipset information.\n",
6265 tp->dev->name);
6266
6267 free_irq(tp->pdev->irq, dev);
6268 pci_disable_msi(tp->pdev);
6269
6270 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6271
David S. Millerfac9b832005-05-18 22:46:34 -07006272 {
6273 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6274 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6275 fn = tg3_interrupt_tagged;
Michael Chan79381092005-04-21 17:13:59 -07006276
David S. Millerfac9b832005-05-18 22:46:34 -07006277 err = request_irq(tp->pdev->irq, fn,
6278 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6279 }
Michael Chan79381092005-04-21 17:13:59 -07006280 if (err)
6281 return err;
6282
6283 /* Need to reset the chip because the MSI cycle may have terminated
6284 * with Master Abort.
6285 */
David S. Millerf47c11e2005-06-24 20:18:35 -07006286 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07006287
Michael Chan944d9802005-05-29 14:57:48 -07006288 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006289 err = tg3_init_hw(tp);
6290
David S. Millerf47c11e2005-06-24 20:18:35 -07006291 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006292
6293 if (err)
6294 free_irq(tp->pdev->irq, dev);
6295
6296 return err;
6297}
6298
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299static int tg3_open(struct net_device *dev)
6300{
6301 struct tg3 *tp = netdev_priv(dev);
6302 int err;
6303
David S. Millerf47c11e2005-06-24 20:18:35 -07006304 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006305
6306 tg3_disable_ints(tp);
6307 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6308
David S. Millerf47c11e2005-06-24 20:18:35 -07006309 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006310
6311 /* The placement of this call is tied
6312 * to the setup and use of Host TX descriptors.
6313 */
6314 err = tg3_alloc_consistent(tp);
6315 if (err)
6316 return err;
6317
Michael Chan88b06bc22005-04-21 17:13:25 -07006318 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6319 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6320 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
David S. Millerfac9b832005-05-18 22:46:34 -07006321 /* All MSI supporting chips should support tagged
6322 * status. Assert that this is the case.
6323 */
6324 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6325 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6326 "Not using MSI.\n", tp->dev->name);
6327 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc22005-04-21 17:13:25 -07006328 u32 msi_mode;
6329
6330 msi_mode = tr32(MSGINT_MODE);
6331 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6332 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6333 }
6334 }
6335 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6336 err = request_irq(tp->pdev->irq, tg3_msi,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006337 SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006338 else {
6339 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6340 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6341 fn = tg3_interrupt_tagged;
6342
6343 err = request_irq(tp->pdev->irq, fn,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006344 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006346
6347 if (err) {
Michael Chan88b06bc22005-04-21 17:13:25 -07006348 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6349 pci_disable_msi(tp->pdev);
6350 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352 tg3_free_consistent(tp);
6353 return err;
6354 }
6355
David S. Millerf47c11e2005-06-24 20:18:35 -07006356 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006357
6358 err = tg3_init_hw(tp);
6359 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07006360 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 tg3_free_rings(tp);
6362 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07006363 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6364 tp->timer_offset = HZ;
6365 else
6366 tp->timer_offset = HZ / 10;
6367
6368 BUG_ON(tp->timer_offset > HZ);
6369 tp->timer_counter = tp->timer_multiplier =
6370 (HZ / tp->timer_offset);
6371 tp->asf_counter = tp->asf_multiplier =
6372 ((HZ / tp->timer_offset) * 120);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373
6374 init_timer(&tp->timer);
6375 tp->timer.expires = jiffies + tp->timer_offset;
6376 tp->timer.data = (unsigned long) tp;
6377 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006378 }
6379
David S. Millerf47c11e2005-06-24 20:18:35 -07006380 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381
6382 if (err) {
Michael Chan88b06bc22005-04-21 17:13:25 -07006383 free_irq(tp->pdev->irq, dev);
6384 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6385 pci_disable_msi(tp->pdev);
6386 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6387 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006388 tg3_free_consistent(tp);
6389 return err;
6390 }
6391
Michael Chan79381092005-04-21 17:13:59 -07006392 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6393 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07006394
Michael Chan79381092005-04-21 17:13:59 -07006395 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07006396 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07006397
6398 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6399 pci_disable_msi(tp->pdev);
6400 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6401 }
Michael Chan944d9802005-05-29 14:57:48 -07006402 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006403 tg3_free_rings(tp);
6404 tg3_free_consistent(tp);
6405
David S. Millerf47c11e2005-06-24 20:18:35 -07006406 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006407
6408 return err;
6409 }
6410 }
6411
David S. Millerf47c11e2005-06-24 20:18:35 -07006412 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006413
Michael Chan79381092005-04-21 17:13:59 -07006414 add_timer(&tp->timer);
6415 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006416 tg3_enable_ints(tp);
6417
David S. Millerf47c11e2005-06-24 20:18:35 -07006418 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006419
6420 netif_start_queue(dev);
6421
6422 return 0;
6423}
6424
6425#if 0
6426/*static*/ void tg3_dump_state(struct tg3 *tp)
6427{
6428 u32 val32, val32_2, val32_3, val32_4, val32_5;
6429 u16 val16;
6430 int i;
6431
6432 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6433 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6434 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6435 val16, val32);
6436
6437 /* MAC block */
6438 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6439 tr32(MAC_MODE), tr32(MAC_STATUS));
6440 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6441 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6442 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6443 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6444 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6445 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6446
6447 /* Send data initiator control block */
6448 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6449 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6450 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6451 tr32(SNDDATAI_STATSCTRL));
6452
6453 /* Send data completion control block */
6454 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6455
6456 /* Send BD ring selector block */
6457 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6458 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6459
6460 /* Send BD initiator control block */
6461 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6462 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6463
6464 /* Send BD completion control block */
6465 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6466
6467 /* Receive list placement control block */
6468 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6469 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6470 printk(" RCVLPC_STATSCTRL[%08x]\n",
6471 tr32(RCVLPC_STATSCTRL));
6472
6473 /* Receive data and receive BD initiator control block */
6474 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6475 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6476
6477 /* Receive data completion control block */
6478 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6479 tr32(RCVDCC_MODE));
6480
6481 /* Receive BD initiator control block */
6482 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6483 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6484
6485 /* Receive BD completion control block */
6486 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6487 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6488
6489 /* Receive list selector control block */
6490 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6491 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6492
6493 /* Mbuf cluster free block */
6494 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6495 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6496
6497 /* Host coalescing control block */
6498 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6499 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6500 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6501 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6502 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6503 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6504 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6505 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6506 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6507 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6508 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6509 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6510
6511 /* Memory arbiter control block */
6512 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6513 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6514
6515 /* Buffer manager control block */
6516 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6517 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6518 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6519 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6520 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6521 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6522 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6523 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6524
6525 /* Read DMA control block */
6526 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6527 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6528
6529 /* Write DMA control block */
6530 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6531 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6532
6533 /* DMA completion block */
6534 printk("DEBUG: DMAC_MODE[%08x]\n",
6535 tr32(DMAC_MODE));
6536
6537 /* GRC block */
6538 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6539 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6540 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6541 tr32(GRC_LOCAL_CTRL));
6542
6543 /* TG3_BDINFOs */
6544 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6545 tr32(RCVDBDI_JUMBO_BD + 0x0),
6546 tr32(RCVDBDI_JUMBO_BD + 0x4),
6547 tr32(RCVDBDI_JUMBO_BD + 0x8),
6548 tr32(RCVDBDI_JUMBO_BD + 0xc));
6549 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6550 tr32(RCVDBDI_STD_BD + 0x0),
6551 tr32(RCVDBDI_STD_BD + 0x4),
6552 tr32(RCVDBDI_STD_BD + 0x8),
6553 tr32(RCVDBDI_STD_BD + 0xc));
6554 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6555 tr32(RCVDBDI_MINI_BD + 0x0),
6556 tr32(RCVDBDI_MINI_BD + 0x4),
6557 tr32(RCVDBDI_MINI_BD + 0x8),
6558 tr32(RCVDBDI_MINI_BD + 0xc));
6559
6560 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6561 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6562 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6563 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6564 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6565 val32, val32_2, val32_3, val32_4);
6566
6567 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6568 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6569 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6570 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6571 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6572 val32, val32_2, val32_3, val32_4);
6573
6574 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6575 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6576 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6577 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6578 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6579 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6580 val32, val32_2, val32_3, val32_4, val32_5);
6581
6582 /* SW status block */
6583 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6584 tp->hw_status->status,
6585 tp->hw_status->status_tag,
6586 tp->hw_status->rx_jumbo_consumer,
6587 tp->hw_status->rx_consumer,
6588 tp->hw_status->rx_mini_consumer,
6589 tp->hw_status->idx[0].rx_producer,
6590 tp->hw_status->idx[0].tx_consumer);
6591
6592 /* SW statistics block */
6593 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6594 ((u32 *)tp->hw_stats)[0],
6595 ((u32 *)tp->hw_stats)[1],
6596 ((u32 *)tp->hw_stats)[2],
6597 ((u32 *)tp->hw_stats)[3]);
6598
6599 /* Mailboxes */
6600 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6601 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6602 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6603 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6604 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6605
6606 /* NIC side send descriptors. */
6607 for (i = 0; i < 6; i++) {
6608 unsigned long txd;
6609
6610 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6611 + (i * sizeof(struct tg3_tx_buffer_desc));
6612 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6613 i,
6614 readl(txd + 0x0), readl(txd + 0x4),
6615 readl(txd + 0x8), readl(txd + 0xc));
6616 }
6617
6618 /* NIC side RX descriptors. */
6619 for (i = 0; i < 6; i++) {
6620 unsigned long rxd;
6621
6622 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6623 + (i * sizeof(struct tg3_rx_buffer_desc));
6624 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6625 i,
6626 readl(rxd + 0x0), readl(rxd + 0x4),
6627 readl(rxd + 0x8), readl(rxd + 0xc));
6628 rxd += (4 * sizeof(u32));
6629 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6630 i,
6631 readl(rxd + 0x0), readl(rxd + 0x4),
6632 readl(rxd + 0x8), readl(rxd + 0xc));
6633 }
6634
6635 for (i = 0; i < 6; i++) {
6636 unsigned long rxd;
6637
6638 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6639 + (i * sizeof(struct tg3_rx_buffer_desc));
6640 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6641 i,
6642 readl(rxd + 0x0), readl(rxd + 0x4),
6643 readl(rxd + 0x8), readl(rxd + 0xc));
6644 rxd += (4 * sizeof(u32));
6645 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6646 i,
6647 readl(rxd + 0x0), readl(rxd + 0x4),
6648 readl(rxd + 0x8), readl(rxd + 0xc));
6649 }
6650}
6651#endif
6652
6653static struct net_device_stats *tg3_get_stats(struct net_device *);
6654static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6655
6656static int tg3_close(struct net_device *dev)
6657{
6658 struct tg3 *tp = netdev_priv(dev);
6659
6660 netif_stop_queue(dev);
6661
6662 del_timer_sync(&tp->timer);
6663
David S. Millerf47c11e2005-06-24 20:18:35 -07006664 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006665#if 0
6666 tg3_dump_state(tp);
6667#endif
6668
6669 tg3_disable_ints(tp);
6670
Michael Chan944d9802005-05-29 14:57:48 -07006671 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672 tg3_free_rings(tp);
6673 tp->tg3_flags &=
6674 ~(TG3_FLAG_INIT_COMPLETE |
6675 TG3_FLAG_GOT_SERDES_FLOWCTL);
6676 netif_carrier_off(tp->dev);
6677
David S. Millerf47c11e2005-06-24 20:18:35 -07006678 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679
Michael Chan88b06bc22005-04-21 17:13:25 -07006680 free_irq(tp->pdev->irq, dev);
6681 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6682 pci_disable_msi(tp->pdev);
6683 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006685
6686 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6687 sizeof(tp->net_stats_prev));
6688 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6689 sizeof(tp->estats_prev));
6690
6691 tg3_free_consistent(tp);
6692
6693 return 0;
6694}
6695
6696static inline unsigned long get_stat64(tg3_stat64_t *val)
6697{
6698 unsigned long ret;
6699
6700#if (BITS_PER_LONG == 32)
6701 ret = val->low;
6702#else
6703 ret = ((u64)val->high << 32) | ((u64)val->low);
6704#endif
6705 return ret;
6706}
6707
6708static unsigned long calc_crc_errors(struct tg3 *tp)
6709{
6710 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6711
6712 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6713 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6714 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006715 u32 val;
6716
David S. Millerf47c11e2005-06-24 20:18:35 -07006717 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718 if (!tg3_readphy(tp, 0x1e, &val)) {
6719 tg3_writephy(tp, 0x1e, val | 0x8000);
6720 tg3_readphy(tp, 0x14, &val);
6721 } else
6722 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07006723 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006724
6725 tp->phy_crc_errors += val;
6726
6727 return tp->phy_crc_errors;
6728 }
6729
6730 return get_stat64(&hw_stats->rx_fcs_errors);
6731}
6732
6733#define ESTAT_ADD(member) \
6734 estats->member = old_estats->member + \
6735 get_stat64(&hw_stats->member)
6736
6737static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6738{
6739 struct tg3_ethtool_stats *estats = &tp->estats;
6740 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6741 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6742
6743 if (!hw_stats)
6744 return old_estats;
6745
6746 ESTAT_ADD(rx_octets);
6747 ESTAT_ADD(rx_fragments);
6748 ESTAT_ADD(rx_ucast_packets);
6749 ESTAT_ADD(rx_mcast_packets);
6750 ESTAT_ADD(rx_bcast_packets);
6751 ESTAT_ADD(rx_fcs_errors);
6752 ESTAT_ADD(rx_align_errors);
6753 ESTAT_ADD(rx_xon_pause_rcvd);
6754 ESTAT_ADD(rx_xoff_pause_rcvd);
6755 ESTAT_ADD(rx_mac_ctrl_rcvd);
6756 ESTAT_ADD(rx_xoff_entered);
6757 ESTAT_ADD(rx_frame_too_long_errors);
6758 ESTAT_ADD(rx_jabbers);
6759 ESTAT_ADD(rx_undersize_packets);
6760 ESTAT_ADD(rx_in_length_errors);
6761 ESTAT_ADD(rx_out_length_errors);
6762 ESTAT_ADD(rx_64_or_less_octet_packets);
6763 ESTAT_ADD(rx_65_to_127_octet_packets);
6764 ESTAT_ADD(rx_128_to_255_octet_packets);
6765 ESTAT_ADD(rx_256_to_511_octet_packets);
6766 ESTAT_ADD(rx_512_to_1023_octet_packets);
6767 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6768 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6769 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6770 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6771 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6772
6773 ESTAT_ADD(tx_octets);
6774 ESTAT_ADD(tx_collisions);
6775 ESTAT_ADD(tx_xon_sent);
6776 ESTAT_ADD(tx_xoff_sent);
6777 ESTAT_ADD(tx_flow_control);
6778 ESTAT_ADD(tx_mac_errors);
6779 ESTAT_ADD(tx_single_collisions);
6780 ESTAT_ADD(tx_mult_collisions);
6781 ESTAT_ADD(tx_deferred);
6782 ESTAT_ADD(tx_excessive_collisions);
6783 ESTAT_ADD(tx_late_collisions);
6784 ESTAT_ADD(tx_collide_2times);
6785 ESTAT_ADD(tx_collide_3times);
6786 ESTAT_ADD(tx_collide_4times);
6787 ESTAT_ADD(tx_collide_5times);
6788 ESTAT_ADD(tx_collide_6times);
6789 ESTAT_ADD(tx_collide_7times);
6790 ESTAT_ADD(tx_collide_8times);
6791 ESTAT_ADD(tx_collide_9times);
6792 ESTAT_ADD(tx_collide_10times);
6793 ESTAT_ADD(tx_collide_11times);
6794 ESTAT_ADD(tx_collide_12times);
6795 ESTAT_ADD(tx_collide_13times);
6796 ESTAT_ADD(tx_collide_14times);
6797 ESTAT_ADD(tx_collide_15times);
6798 ESTAT_ADD(tx_ucast_packets);
6799 ESTAT_ADD(tx_mcast_packets);
6800 ESTAT_ADD(tx_bcast_packets);
6801 ESTAT_ADD(tx_carrier_sense_errors);
6802 ESTAT_ADD(tx_discards);
6803 ESTAT_ADD(tx_errors);
6804
6805 ESTAT_ADD(dma_writeq_full);
6806 ESTAT_ADD(dma_write_prioq_full);
6807 ESTAT_ADD(rxbds_empty);
6808 ESTAT_ADD(rx_discards);
6809 ESTAT_ADD(rx_errors);
6810 ESTAT_ADD(rx_threshold_hit);
6811
6812 ESTAT_ADD(dma_readq_full);
6813 ESTAT_ADD(dma_read_prioq_full);
6814 ESTAT_ADD(tx_comp_queue_full);
6815
6816 ESTAT_ADD(ring_set_send_prod_index);
6817 ESTAT_ADD(ring_status_update);
6818 ESTAT_ADD(nic_irqs);
6819 ESTAT_ADD(nic_avoided_irqs);
6820 ESTAT_ADD(nic_tx_threshold_hit);
6821
6822 return estats;
6823}
6824
6825static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6826{
6827 struct tg3 *tp = netdev_priv(dev);
6828 struct net_device_stats *stats = &tp->net_stats;
6829 struct net_device_stats *old_stats = &tp->net_stats_prev;
6830 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6831
6832 if (!hw_stats)
6833 return old_stats;
6834
6835 stats->rx_packets = old_stats->rx_packets +
6836 get_stat64(&hw_stats->rx_ucast_packets) +
6837 get_stat64(&hw_stats->rx_mcast_packets) +
6838 get_stat64(&hw_stats->rx_bcast_packets);
6839
6840 stats->tx_packets = old_stats->tx_packets +
6841 get_stat64(&hw_stats->tx_ucast_packets) +
6842 get_stat64(&hw_stats->tx_mcast_packets) +
6843 get_stat64(&hw_stats->tx_bcast_packets);
6844
6845 stats->rx_bytes = old_stats->rx_bytes +
6846 get_stat64(&hw_stats->rx_octets);
6847 stats->tx_bytes = old_stats->tx_bytes +
6848 get_stat64(&hw_stats->tx_octets);
6849
6850 stats->rx_errors = old_stats->rx_errors +
6851 get_stat64(&hw_stats->rx_errors) +
6852 get_stat64(&hw_stats->rx_discards);
6853 stats->tx_errors = old_stats->tx_errors +
6854 get_stat64(&hw_stats->tx_errors) +
6855 get_stat64(&hw_stats->tx_mac_errors) +
6856 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6857 get_stat64(&hw_stats->tx_discards);
6858
6859 stats->multicast = old_stats->multicast +
6860 get_stat64(&hw_stats->rx_mcast_packets);
6861 stats->collisions = old_stats->collisions +
6862 get_stat64(&hw_stats->tx_collisions);
6863
6864 stats->rx_length_errors = old_stats->rx_length_errors +
6865 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6866 get_stat64(&hw_stats->rx_undersize_packets);
6867
6868 stats->rx_over_errors = old_stats->rx_over_errors +
6869 get_stat64(&hw_stats->rxbds_empty);
6870 stats->rx_frame_errors = old_stats->rx_frame_errors +
6871 get_stat64(&hw_stats->rx_align_errors);
6872 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6873 get_stat64(&hw_stats->tx_discards);
6874 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6875 get_stat64(&hw_stats->tx_carrier_sense_errors);
6876
6877 stats->rx_crc_errors = old_stats->rx_crc_errors +
6878 calc_crc_errors(tp);
6879
6880 return stats;
6881}
6882
6883static inline u32 calc_crc(unsigned char *buf, int len)
6884{
6885 u32 reg;
6886 u32 tmp;
6887 int j, k;
6888
6889 reg = 0xffffffff;
6890
6891 for (j = 0; j < len; j++) {
6892 reg ^= buf[j];
6893
6894 for (k = 0; k < 8; k++) {
6895 tmp = reg & 0x01;
6896
6897 reg >>= 1;
6898
6899 if (tmp) {
6900 reg ^= 0xedb88320;
6901 }
6902 }
6903 }
6904
6905 return ~reg;
6906}
6907
6908static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6909{
6910 /* accept or reject all multicast frames */
6911 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6912 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6913 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6914 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6915}
6916
6917static void __tg3_set_rx_mode(struct net_device *dev)
6918{
6919 struct tg3 *tp = netdev_priv(dev);
6920 u32 rx_mode;
6921
6922 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6923 RX_MODE_KEEP_VLAN_TAG);
6924
6925 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6926 * flag clear.
6927 */
6928#if TG3_VLAN_TAG_USED
6929 if (!tp->vlgrp &&
6930 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6931 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6932#else
6933 /* By definition, VLAN is disabled always in this
6934 * case.
6935 */
6936 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6937 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6938#endif
6939
6940 if (dev->flags & IFF_PROMISC) {
6941 /* Promiscuous mode. */
6942 rx_mode |= RX_MODE_PROMISC;
6943 } else if (dev->flags & IFF_ALLMULTI) {
6944 /* Accept all multicast. */
6945 tg3_set_multi (tp, 1);
6946 } else if (dev->mc_count < 1) {
6947 /* Reject all multicast. */
6948 tg3_set_multi (tp, 0);
6949 } else {
6950 /* Accept one or more multicast(s). */
6951 struct dev_mc_list *mclist;
6952 unsigned int i;
6953 u32 mc_filter[4] = { 0, };
6954 u32 regidx;
6955 u32 bit;
6956 u32 crc;
6957
6958 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6959 i++, mclist = mclist->next) {
6960
6961 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6962 bit = ~crc & 0x7f;
6963 regidx = (bit & 0x60) >> 5;
6964 bit &= 0x1f;
6965 mc_filter[regidx] |= (1 << bit);
6966 }
6967
6968 tw32(MAC_HASH_REG_0, mc_filter[0]);
6969 tw32(MAC_HASH_REG_1, mc_filter[1]);
6970 tw32(MAC_HASH_REG_2, mc_filter[2]);
6971 tw32(MAC_HASH_REG_3, mc_filter[3]);
6972 }
6973
6974 if (rx_mode != tp->rx_mode) {
6975 tp->rx_mode = rx_mode;
6976 tw32_f(MAC_RX_MODE, rx_mode);
6977 udelay(10);
6978 }
6979}
6980
6981static void tg3_set_rx_mode(struct net_device *dev)
6982{
6983 struct tg3 *tp = netdev_priv(dev);
6984
David S. Millerf47c11e2005-06-24 20:18:35 -07006985 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07006987 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006988}
6989
6990#define TG3_REGDUMP_LEN (32 * 1024)
6991
6992static int tg3_get_regs_len(struct net_device *dev)
6993{
6994 return TG3_REGDUMP_LEN;
6995}
6996
6997static void tg3_get_regs(struct net_device *dev,
6998 struct ethtool_regs *regs, void *_p)
6999{
7000 u32 *p = _p;
7001 struct tg3 *tp = netdev_priv(dev);
7002 u8 *orig_p = _p;
7003 int i;
7004
7005 regs->version = 0;
7006
7007 memset(p, 0, TG3_REGDUMP_LEN);
7008
David S. Millerf47c11e2005-06-24 20:18:35 -07007009 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007010
7011#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7012#define GET_REG32_LOOP(base,len) \
7013do { p = (u32 *)(orig_p + (base)); \
7014 for (i = 0; i < len; i += 4) \
7015 __GET_REG32((base) + i); \
7016} while (0)
7017#define GET_REG32_1(reg) \
7018do { p = (u32 *)(orig_p + (reg)); \
7019 __GET_REG32((reg)); \
7020} while (0)
7021
7022 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7023 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7024 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7025 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7026 GET_REG32_1(SNDDATAC_MODE);
7027 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7028 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7029 GET_REG32_1(SNDBDC_MODE);
7030 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7031 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7032 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7033 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7034 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7035 GET_REG32_1(RCVDCC_MODE);
7036 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7037 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7038 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7039 GET_REG32_1(MBFREE_MODE);
7040 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7041 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7042 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7043 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7044 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7045 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7046 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7047 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7048 GET_REG32_LOOP(FTQ_RESET, 0x120);
7049 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7050 GET_REG32_1(DMAC_MODE);
7051 GET_REG32_LOOP(GRC_MODE, 0x4c);
7052 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7053 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7054
7055#undef __GET_REG32
7056#undef GET_REG32_LOOP
7057#undef GET_REG32_1
7058
David S. Millerf47c11e2005-06-24 20:18:35 -07007059 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007060}
7061
7062static int tg3_get_eeprom_len(struct net_device *dev)
7063{
7064 struct tg3 *tp = netdev_priv(dev);
7065
7066 return tp->nvram_size;
7067}
7068
7069static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7070
7071static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7072{
7073 struct tg3 *tp = netdev_priv(dev);
7074 int ret;
7075 u8 *pd;
7076 u32 i, offset, len, val, b_offset, b_count;
7077
7078 offset = eeprom->offset;
7079 len = eeprom->len;
7080 eeprom->len = 0;
7081
7082 eeprom->magic = TG3_EEPROM_MAGIC;
7083
7084 if (offset & 3) {
7085 /* adjustments to start on required 4 byte boundary */
7086 b_offset = offset & 3;
7087 b_count = 4 - b_offset;
7088 if (b_count > len) {
7089 /* i.e. offset=1 len=2 */
7090 b_count = len;
7091 }
7092 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7093 if (ret)
7094 return ret;
7095 val = cpu_to_le32(val);
7096 memcpy(data, ((char*)&val) + b_offset, b_count);
7097 len -= b_count;
7098 offset += b_count;
7099 eeprom->len += b_count;
7100 }
7101
7102 /* read bytes upto the last 4 byte boundary */
7103 pd = &data[eeprom->len];
7104 for (i = 0; i < (len - (len & 3)); i += 4) {
7105 ret = tg3_nvram_read(tp, offset + i, &val);
7106 if (ret) {
7107 eeprom->len += i;
7108 return ret;
7109 }
7110 val = cpu_to_le32(val);
7111 memcpy(pd + i, &val, 4);
7112 }
7113 eeprom->len += i;
7114
7115 if (len & 3) {
7116 /* read last bytes not ending on 4 byte boundary */
7117 pd = &data[eeprom->len];
7118 b_count = len & 3;
7119 b_offset = offset + len - b_count;
7120 ret = tg3_nvram_read(tp, b_offset, &val);
7121 if (ret)
7122 return ret;
7123 val = cpu_to_le32(val);
7124 memcpy(pd, ((char*)&val), b_count);
7125 eeprom->len += b_count;
7126 }
7127 return 0;
7128}
7129
7130static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7131
7132static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7133{
7134 struct tg3 *tp = netdev_priv(dev);
7135 int ret;
7136 u32 offset, len, b_offset, odd_len, start, end;
7137 u8 *buf;
7138
7139 if (eeprom->magic != TG3_EEPROM_MAGIC)
7140 return -EINVAL;
7141
7142 offset = eeprom->offset;
7143 len = eeprom->len;
7144
7145 if ((b_offset = (offset & 3))) {
7146 /* adjustments to start on required 4 byte boundary */
7147 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7148 if (ret)
7149 return ret;
7150 start = cpu_to_le32(start);
7151 len += b_offset;
7152 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07007153 if (len < 4)
7154 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007155 }
7156
7157 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07007158 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159 /* adjustments to end on required 4 byte boundary */
7160 odd_len = 1;
7161 len = (len + 3) & ~3;
7162 ret = tg3_nvram_read(tp, offset+len-4, &end);
7163 if (ret)
7164 return ret;
7165 end = cpu_to_le32(end);
7166 }
7167
7168 buf = data;
7169 if (b_offset || odd_len) {
7170 buf = kmalloc(len, GFP_KERNEL);
7171 if (buf == 0)
7172 return -ENOMEM;
7173 if (b_offset)
7174 memcpy(buf, &start, 4);
7175 if (odd_len)
7176 memcpy(buf+len-4, &end, 4);
7177 memcpy(buf + b_offset, data, eeprom->len);
7178 }
7179
7180 ret = tg3_nvram_write_block(tp, offset, len, buf);
7181
7182 if (buf != data)
7183 kfree(buf);
7184
7185 return ret;
7186}
7187
7188static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7189{
7190 struct tg3 *tp = netdev_priv(dev);
7191
7192 cmd->supported = (SUPPORTED_Autoneg);
7193
7194 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7195 cmd->supported |= (SUPPORTED_1000baseT_Half |
7196 SUPPORTED_1000baseT_Full);
7197
7198 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7199 cmd->supported |= (SUPPORTED_100baseT_Half |
7200 SUPPORTED_100baseT_Full |
7201 SUPPORTED_10baseT_Half |
7202 SUPPORTED_10baseT_Full |
7203 SUPPORTED_MII);
7204 else
7205 cmd->supported |= SUPPORTED_FIBRE;
7206
7207 cmd->advertising = tp->link_config.advertising;
7208 if (netif_running(dev)) {
7209 cmd->speed = tp->link_config.active_speed;
7210 cmd->duplex = tp->link_config.active_duplex;
7211 }
7212 cmd->port = 0;
7213 cmd->phy_address = PHY_ADDR;
7214 cmd->transceiver = 0;
7215 cmd->autoneg = tp->link_config.autoneg;
7216 cmd->maxtxpkt = 0;
7217 cmd->maxrxpkt = 0;
7218 return 0;
7219}
7220
7221static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7222{
7223 struct tg3 *tp = netdev_priv(dev);
7224
7225 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7226 /* These are the only valid advertisement bits allowed. */
7227 if (cmd->autoneg == AUTONEG_ENABLE &&
7228 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7229 ADVERTISED_1000baseT_Full |
7230 ADVERTISED_Autoneg |
7231 ADVERTISED_FIBRE)))
7232 return -EINVAL;
7233 }
7234
David S. Millerf47c11e2005-06-24 20:18:35 -07007235 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007236
7237 tp->link_config.autoneg = cmd->autoneg;
7238 if (cmd->autoneg == AUTONEG_ENABLE) {
7239 tp->link_config.advertising = cmd->advertising;
7240 tp->link_config.speed = SPEED_INVALID;
7241 tp->link_config.duplex = DUPLEX_INVALID;
7242 } else {
7243 tp->link_config.advertising = 0;
7244 tp->link_config.speed = cmd->speed;
7245 tp->link_config.duplex = cmd->duplex;
7246 }
7247
7248 if (netif_running(dev))
7249 tg3_setup_phy(tp, 1);
7250
David S. Millerf47c11e2005-06-24 20:18:35 -07007251 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007252
7253 return 0;
7254}
7255
7256static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7257{
7258 struct tg3 *tp = netdev_priv(dev);
7259
7260 strcpy(info->driver, DRV_MODULE_NAME);
7261 strcpy(info->version, DRV_MODULE_VERSION);
7262 strcpy(info->bus_info, pci_name(tp->pdev));
7263}
7264
7265static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7266{
7267 struct tg3 *tp = netdev_priv(dev);
7268
7269 wol->supported = WAKE_MAGIC;
7270 wol->wolopts = 0;
7271 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7272 wol->wolopts = WAKE_MAGIC;
7273 memset(&wol->sopass, 0, sizeof(wol->sopass));
7274}
7275
7276static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7277{
7278 struct tg3 *tp = netdev_priv(dev);
7279
7280 if (wol->wolopts & ~WAKE_MAGIC)
7281 return -EINVAL;
7282 if ((wol->wolopts & WAKE_MAGIC) &&
7283 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7284 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7285 return -EINVAL;
7286
David S. Millerf47c11e2005-06-24 20:18:35 -07007287 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007288 if (wol->wolopts & WAKE_MAGIC)
7289 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7290 else
7291 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
David S. Millerf47c11e2005-06-24 20:18:35 -07007292 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293
7294 return 0;
7295}
7296
7297static u32 tg3_get_msglevel(struct net_device *dev)
7298{
7299 struct tg3 *tp = netdev_priv(dev);
7300 return tp->msg_enable;
7301}
7302
7303static void tg3_set_msglevel(struct net_device *dev, u32 value)
7304{
7305 struct tg3 *tp = netdev_priv(dev);
7306 tp->msg_enable = value;
7307}
7308
7309#if TG3_TSO_SUPPORT != 0
7310static int tg3_set_tso(struct net_device *dev, u32 value)
7311{
7312 struct tg3 *tp = netdev_priv(dev);
7313
7314 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7315 if (value)
7316 return -EINVAL;
7317 return 0;
7318 }
7319 return ethtool_op_set_tso(dev, value);
7320}
7321#endif
7322
7323static int tg3_nway_reset(struct net_device *dev)
7324{
7325 struct tg3 *tp = netdev_priv(dev);
7326 u32 bmcr;
7327 int r;
7328
7329 if (!netif_running(dev))
7330 return -EAGAIN;
7331
David S. Millerf47c11e2005-06-24 20:18:35 -07007332 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007333 r = -EINVAL;
7334 tg3_readphy(tp, MII_BMCR, &bmcr);
7335 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7336 (bmcr & BMCR_ANENABLE)) {
7337 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7338 r = 0;
7339 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007340 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007341
7342 return r;
7343}
7344
7345static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7346{
7347 struct tg3 *tp = netdev_priv(dev);
7348
7349 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7350 ering->rx_mini_max_pending = 0;
7351 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7352
7353 ering->rx_pending = tp->rx_pending;
7354 ering->rx_mini_pending = 0;
7355 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7356 ering->tx_pending = tp->tx_pending;
7357}
7358
7359static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7360{
7361 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007362 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007363
7364 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7365 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7366 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7367 return -EINVAL;
7368
Michael Chanbbe832c2005-06-24 20:20:04 -07007369 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007370 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007371 irq_sync = 1;
7372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373
Michael Chanbbe832c2005-06-24 20:20:04 -07007374 tg3_full_lock(tp, irq_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007375
7376 tp->rx_pending = ering->rx_pending;
7377
7378 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7379 tp->rx_pending > 63)
7380 tp->rx_pending = 63;
7381 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7382 tp->tx_pending = ering->tx_pending;
7383
7384 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007385 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007386 tg3_init_hw(tp);
7387 tg3_netif_start(tp);
7388 }
7389
David S. Millerf47c11e2005-06-24 20:18:35 -07007390 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007391
7392 return 0;
7393}
7394
7395static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7396{
7397 struct tg3 *tp = netdev_priv(dev);
7398
7399 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7400 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7401 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7402}
7403
7404static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7405{
7406 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007407 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007408
Michael Chanbbe832c2005-06-24 20:20:04 -07007409 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007411 irq_sync = 1;
7412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007413
Michael Chanbbe832c2005-06-24 20:20:04 -07007414 tg3_full_lock(tp, irq_sync);
David S. Millerf47c11e2005-06-24 20:18:35 -07007415
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416 if (epause->autoneg)
7417 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7418 else
7419 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7420 if (epause->rx_pause)
7421 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7422 else
7423 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7424 if (epause->tx_pause)
7425 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7426 else
7427 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7428
7429 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007430 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007431 tg3_init_hw(tp);
7432 tg3_netif_start(tp);
7433 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007434
7435 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007436
7437 return 0;
7438}
7439
7440static u32 tg3_get_rx_csum(struct net_device *dev)
7441{
7442 struct tg3 *tp = netdev_priv(dev);
7443 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7444}
7445
7446static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7447{
7448 struct tg3 *tp = netdev_priv(dev);
7449
7450 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7451 if (data != 0)
7452 return -EINVAL;
7453 return 0;
7454 }
7455
David S. Millerf47c11e2005-06-24 20:18:35 -07007456 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457 if (data)
7458 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7459 else
7460 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07007461 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462
7463 return 0;
7464}
7465
7466static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7467{
7468 struct tg3 *tp = netdev_priv(dev);
7469
7470 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7471 if (data != 0)
7472 return -EINVAL;
7473 return 0;
7474 }
7475
7476 if (data)
7477 dev->features |= NETIF_F_IP_CSUM;
7478 else
7479 dev->features &= ~NETIF_F_IP_CSUM;
7480
7481 return 0;
7482}
7483
7484static int tg3_get_stats_count (struct net_device *dev)
7485{
7486 return TG3_NUM_STATS;
7487}
7488
Michael Chan4cafd3f2005-05-29 14:56:34 -07007489static int tg3_get_test_count (struct net_device *dev)
7490{
7491 return TG3_NUM_TEST;
7492}
7493
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7495{
7496 switch (stringset) {
7497 case ETH_SS_STATS:
7498 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7499 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07007500 case ETH_SS_TEST:
7501 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7502 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503 default:
7504 WARN_ON(1); /* we need a WARN() */
7505 break;
7506 }
7507}
7508
7509static void tg3_get_ethtool_stats (struct net_device *dev,
7510 struct ethtool_stats *estats, u64 *tmp_stats)
7511{
7512 struct tg3 *tp = netdev_priv(dev);
7513 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7514}
7515
Michael Chan566f86a2005-05-29 14:56:58 -07007516#define NVRAM_TEST_SIZE 0x100
7517
7518static int tg3_test_nvram(struct tg3 *tp)
7519{
7520 u32 *buf, csum;
7521 int i, j, err = 0;
7522
7523 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7524 if (buf == NULL)
7525 return -ENOMEM;
7526
7527 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7528 u32 val;
7529
7530 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7531 break;
7532 buf[j] = cpu_to_le32(val);
7533 }
7534 if (i < NVRAM_TEST_SIZE)
7535 goto out;
7536
7537 err = -EIO;
7538 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7539 goto out;
7540
7541 /* Bootstrap checksum at offset 0x10 */
7542 csum = calc_crc((unsigned char *) buf, 0x10);
7543 if(csum != cpu_to_le32(buf[0x10/4]))
7544 goto out;
7545
7546 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7547 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7548 if (csum != cpu_to_le32(buf[0xfc/4]))
7549 goto out;
7550
7551 err = 0;
7552
7553out:
7554 kfree(buf);
7555 return err;
7556}
7557
Michael Chanca430072005-05-29 14:57:23 -07007558#define TG3_SERDES_TIMEOUT_SEC 2
7559#define TG3_COPPER_TIMEOUT_SEC 6
7560
7561static int tg3_test_link(struct tg3 *tp)
7562{
7563 int i, max;
7564
7565 if (!netif_running(tp->dev))
7566 return -ENODEV;
7567
7568 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7569 max = TG3_SERDES_TIMEOUT_SEC;
7570 else
7571 max = TG3_COPPER_TIMEOUT_SEC;
7572
7573 for (i = 0; i < max; i++) {
7574 if (netif_carrier_ok(tp->dev))
7575 return 0;
7576
7577 if (msleep_interruptible(1000))
7578 break;
7579 }
7580
7581 return -EIO;
7582}
7583
Michael Chana71116d2005-05-29 14:58:11 -07007584/* Only test the commonly used registers */
7585static int tg3_test_registers(struct tg3 *tp)
7586{
7587 int i, is_5705;
7588 u32 offset, read_mask, write_mask, val, save_val, read_val;
7589 static struct {
7590 u16 offset;
7591 u16 flags;
7592#define TG3_FL_5705 0x1
7593#define TG3_FL_NOT_5705 0x2
7594#define TG3_FL_NOT_5788 0x4
7595 u32 read_mask;
7596 u32 write_mask;
7597 } reg_tbl[] = {
7598 /* MAC Control Registers */
7599 { MAC_MODE, TG3_FL_NOT_5705,
7600 0x00000000, 0x00ef6f8c },
7601 { MAC_MODE, TG3_FL_5705,
7602 0x00000000, 0x01ef6b8c },
7603 { MAC_STATUS, TG3_FL_NOT_5705,
7604 0x03800107, 0x00000000 },
7605 { MAC_STATUS, TG3_FL_5705,
7606 0x03800100, 0x00000000 },
7607 { MAC_ADDR_0_HIGH, 0x0000,
7608 0x00000000, 0x0000ffff },
7609 { MAC_ADDR_0_LOW, 0x0000,
7610 0x00000000, 0xffffffff },
7611 { MAC_RX_MTU_SIZE, 0x0000,
7612 0x00000000, 0x0000ffff },
7613 { MAC_TX_MODE, 0x0000,
7614 0x00000000, 0x00000070 },
7615 { MAC_TX_LENGTHS, 0x0000,
7616 0x00000000, 0x00003fff },
7617 { MAC_RX_MODE, TG3_FL_NOT_5705,
7618 0x00000000, 0x000007fc },
7619 { MAC_RX_MODE, TG3_FL_5705,
7620 0x00000000, 0x000007dc },
7621 { MAC_HASH_REG_0, 0x0000,
7622 0x00000000, 0xffffffff },
7623 { MAC_HASH_REG_1, 0x0000,
7624 0x00000000, 0xffffffff },
7625 { MAC_HASH_REG_2, 0x0000,
7626 0x00000000, 0xffffffff },
7627 { MAC_HASH_REG_3, 0x0000,
7628 0x00000000, 0xffffffff },
7629
7630 /* Receive Data and Receive BD Initiator Control Registers. */
7631 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7632 0x00000000, 0xffffffff },
7633 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7634 0x00000000, 0xffffffff },
7635 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7636 0x00000000, 0x00000003 },
7637 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7638 0x00000000, 0xffffffff },
7639 { RCVDBDI_STD_BD+0, 0x0000,
7640 0x00000000, 0xffffffff },
7641 { RCVDBDI_STD_BD+4, 0x0000,
7642 0x00000000, 0xffffffff },
7643 { RCVDBDI_STD_BD+8, 0x0000,
7644 0x00000000, 0xffff0002 },
7645 { RCVDBDI_STD_BD+0xc, 0x0000,
7646 0x00000000, 0xffffffff },
7647
7648 /* Receive BD Initiator Control Registers. */
7649 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7650 0x00000000, 0xffffffff },
7651 { RCVBDI_STD_THRESH, TG3_FL_5705,
7652 0x00000000, 0x000003ff },
7653 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7654 0x00000000, 0xffffffff },
7655
7656 /* Host Coalescing Control Registers. */
7657 { HOSTCC_MODE, TG3_FL_NOT_5705,
7658 0x00000000, 0x00000004 },
7659 { HOSTCC_MODE, TG3_FL_5705,
7660 0x00000000, 0x000000f6 },
7661 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7662 0x00000000, 0xffffffff },
7663 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7664 0x00000000, 0x000003ff },
7665 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7666 0x00000000, 0xffffffff },
7667 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7668 0x00000000, 0x000003ff },
7669 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7670 0x00000000, 0xffffffff },
7671 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7672 0x00000000, 0x000000ff },
7673 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7674 0x00000000, 0xffffffff },
7675 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7676 0x00000000, 0x000000ff },
7677 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7678 0x00000000, 0xffffffff },
7679 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7680 0x00000000, 0xffffffff },
7681 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7682 0x00000000, 0xffffffff },
7683 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7684 0x00000000, 0x000000ff },
7685 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7686 0x00000000, 0xffffffff },
7687 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7688 0x00000000, 0x000000ff },
7689 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7690 0x00000000, 0xffffffff },
7691 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7692 0x00000000, 0xffffffff },
7693 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7694 0x00000000, 0xffffffff },
7695 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7696 0x00000000, 0xffffffff },
7697 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7698 0x00000000, 0xffffffff },
7699 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7700 0xffffffff, 0x00000000 },
7701 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7702 0xffffffff, 0x00000000 },
7703
7704 /* Buffer Manager Control Registers. */
7705 { BUFMGR_MB_POOL_ADDR, 0x0000,
7706 0x00000000, 0x007fff80 },
7707 { BUFMGR_MB_POOL_SIZE, 0x0000,
7708 0x00000000, 0x007fffff },
7709 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7710 0x00000000, 0x0000003f },
7711 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7712 0x00000000, 0x000001ff },
7713 { BUFMGR_MB_HIGH_WATER, 0x0000,
7714 0x00000000, 0x000001ff },
7715 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7716 0xffffffff, 0x00000000 },
7717 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7718 0xffffffff, 0x00000000 },
7719
7720 /* Mailbox Registers */
7721 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7722 0x00000000, 0x000001ff },
7723 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7724 0x00000000, 0x000001ff },
7725 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7726 0x00000000, 0x000007ff },
7727 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7728 0x00000000, 0x000001ff },
7729
7730 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7731 };
7732
7733 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7734 is_5705 = 1;
7735 else
7736 is_5705 = 0;
7737
7738 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7739 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7740 continue;
7741
7742 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7743 continue;
7744
7745 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7746 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7747 continue;
7748
7749 offset = (u32) reg_tbl[i].offset;
7750 read_mask = reg_tbl[i].read_mask;
7751 write_mask = reg_tbl[i].write_mask;
7752
7753 /* Save the original register content */
7754 save_val = tr32(offset);
7755
7756 /* Determine the read-only value. */
7757 read_val = save_val & read_mask;
7758
7759 /* Write zero to the register, then make sure the read-only bits
7760 * are not changed and the read/write bits are all zeros.
7761 */
7762 tw32(offset, 0);
7763
7764 val = tr32(offset);
7765
7766 /* Test the read-only and read/write bits. */
7767 if (((val & read_mask) != read_val) || (val & write_mask))
7768 goto out;
7769
7770 /* Write ones to all the bits defined by RdMask and WrMask, then
7771 * make sure the read-only bits are not changed and the
7772 * read/write bits are all ones.
7773 */
7774 tw32(offset, read_mask | write_mask);
7775
7776 val = tr32(offset);
7777
7778 /* Test the read-only bits. */
7779 if ((val & read_mask) != read_val)
7780 goto out;
7781
7782 /* Test the read/write bits. */
7783 if ((val & write_mask) != write_mask)
7784 goto out;
7785
7786 tw32(offset, save_val);
7787 }
7788
7789 return 0;
7790
7791out:
7792 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7793 tw32(offset, save_val);
7794 return -EIO;
7795}
7796
Michael Chan7942e1d2005-05-29 14:58:36 -07007797static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7798{
7799 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7800 int i;
7801 u32 j;
7802
7803 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7804 for (j = 0; j < len; j += 4) {
7805 u32 val;
7806
7807 tg3_write_mem(tp, offset + j, test_pattern[i]);
7808 tg3_read_mem(tp, offset + j, &val);
7809 if (val != test_pattern[i])
7810 return -EIO;
7811 }
7812 }
7813 return 0;
7814}
7815
7816static int tg3_test_memory(struct tg3 *tp)
7817{
7818 static struct mem_entry {
7819 u32 offset;
7820 u32 len;
7821 } mem_tbl_570x[] = {
7822 { 0x00000000, 0x01000},
7823 { 0x00002000, 0x1c000},
7824 { 0xffffffff, 0x00000}
7825 }, mem_tbl_5705[] = {
7826 { 0x00000100, 0x0000c},
7827 { 0x00000200, 0x00008},
7828 { 0x00000b50, 0x00400},
7829 { 0x00004000, 0x00800},
7830 { 0x00006000, 0x01000},
7831 { 0x00008000, 0x02000},
7832 { 0x00010000, 0x0e000},
7833 { 0xffffffff, 0x00000}
7834 };
7835 struct mem_entry *mem_tbl;
7836 int err = 0;
7837 int i;
7838
7839 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7840 mem_tbl = mem_tbl_5705;
7841 else
7842 mem_tbl = mem_tbl_570x;
7843
7844 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7845 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7846 mem_tbl[i].len)) != 0)
7847 break;
7848 }
7849
7850 return err;
7851}
7852
Michael Chanc76949a2005-05-29 14:58:59 -07007853static int tg3_test_loopback(struct tg3 *tp)
7854{
7855 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7856 u32 desc_idx;
7857 struct sk_buff *skb, *rx_skb;
7858 u8 *tx_data;
7859 dma_addr_t map;
7860 int num_pkts, tx_len, rx_len, i, err;
7861 struct tg3_rx_buffer_desc *desc;
7862
7863 if (!netif_running(tp->dev))
7864 return -ENODEV;
7865
7866 err = -EIO;
7867
7868 tg3_abort_hw(tp, 1);
7869
Michael Chanc76949a2005-05-29 14:58:59 -07007870 tg3_reset_hw(tp);
7871
7872 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7873 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7874 MAC_MODE_PORT_MODE_GMII;
7875 tw32(MAC_MODE, mac_mode);
7876
7877 tx_len = 1514;
7878 skb = dev_alloc_skb(tx_len);
7879 tx_data = skb_put(skb, tx_len);
7880 memcpy(tx_data, tp->dev->dev_addr, 6);
7881 memset(tx_data + 6, 0x0, 8);
7882
7883 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7884
7885 for (i = 14; i < tx_len; i++)
7886 tx_data[i] = (u8) (i & 0xff);
7887
7888 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7889
7890 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7891 HOSTCC_MODE_NOW);
7892
7893 udelay(10);
7894
7895 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7896
7897 send_idx = 0;
7898 num_pkts = 0;
7899
7900 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7901
7902 send_idx++;
7903 num_pkts++;
7904
7905 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7906 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7907
7908 udelay(10);
7909
7910 for (i = 0; i < 10; i++) {
7911 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7912 HOSTCC_MODE_NOW);
7913
7914 udelay(10);
7915
7916 tx_idx = tp->hw_status->idx[0].tx_consumer;
7917 rx_idx = tp->hw_status->idx[0].rx_producer;
7918 if ((tx_idx == send_idx) &&
7919 (rx_idx == (rx_start_idx + num_pkts)))
7920 break;
7921 }
7922
7923 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7924 dev_kfree_skb(skb);
7925
7926 if (tx_idx != send_idx)
7927 goto out;
7928
7929 if (rx_idx != rx_start_idx + num_pkts)
7930 goto out;
7931
7932 desc = &tp->rx_rcb[rx_start_idx];
7933 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7934 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7935 if (opaque_key != RXD_OPAQUE_RING_STD)
7936 goto out;
7937
7938 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7939 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7940 goto out;
7941
7942 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7943 if (rx_len != tx_len)
7944 goto out;
7945
7946 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7947
7948 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7949 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7950
7951 for (i = 14; i < tx_len; i++) {
7952 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7953 goto out;
7954 }
7955 err = 0;
7956
7957 /* tg3_free_rings will unmap and free the rx_skb */
7958out:
7959 return err;
7960}
7961
Michael Chan4cafd3f2005-05-29 14:56:34 -07007962static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7963 u64 *data)
7964{
Michael Chan566f86a2005-05-29 14:56:58 -07007965 struct tg3 *tp = netdev_priv(dev);
7966
7967 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7968
7969 if (tg3_test_nvram(tp) != 0) {
7970 etest->flags |= ETH_TEST_FL_FAILED;
7971 data[0] = 1;
7972 }
Michael Chanca430072005-05-29 14:57:23 -07007973 if (tg3_test_link(tp) != 0) {
7974 etest->flags |= ETH_TEST_FL_FAILED;
7975 data[1] = 1;
7976 }
Michael Chana71116d2005-05-29 14:58:11 -07007977 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chanbbe832c2005-06-24 20:20:04 -07007978 int irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -07007979
Michael Chanbbe832c2005-06-24 20:20:04 -07007980 if (netif_running(dev)) {
7981 tg3_netif_stop(tp);
7982 irq_sync = 1;
7983 }
7984
7985 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -07007986
7987 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7988 tg3_nvram_lock(tp);
7989 tg3_halt_cpu(tp, RX_CPU_BASE);
7990 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7991 tg3_halt_cpu(tp, TX_CPU_BASE);
7992 tg3_nvram_unlock(tp);
7993
7994 if (tg3_test_registers(tp) != 0) {
7995 etest->flags |= ETH_TEST_FL_FAILED;
7996 data[2] = 1;
7997 }
Michael Chan7942e1d2005-05-29 14:58:36 -07007998 if (tg3_test_memory(tp) != 0) {
7999 etest->flags |= ETH_TEST_FL_FAILED;
8000 data[3] = 1;
8001 }
Michael Chanc76949a2005-05-29 14:58:59 -07008002 if (tg3_test_loopback(tp) != 0) {
8003 etest->flags |= ETH_TEST_FL_FAILED;
8004 data[4] = 1;
8005 }
Michael Chana71116d2005-05-29 14:58:11 -07008006
David S. Millerf47c11e2005-06-24 20:18:35 -07008007 tg3_full_unlock(tp);
8008
Michael Chand4bc3922005-05-29 14:59:20 -07008009 if (tg3_test_interrupt(tp) != 0) {
8010 etest->flags |= ETH_TEST_FL_FAILED;
8011 data[5] = 1;
8012 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008013
8014 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -07008015
Michael Chana71116d2005-05-29 14:58:11 -07008016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8017 if (netif_running(dev)) {
8018 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8019 tg3_init_hw(tp);
8020 tg3_netif_start(tp);
8021 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008022
8023 tg3_full_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008024 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07008025}
8026
Linus Torvalds1da177e2005-04-16 15:20:36 -07008027static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8028{
8029 struct mii_ioctl_data *data = if_mii(ifr);
8030 struct tg3 *tp = netdev_priv(dev);
8031 int err;
8032
8033 switch(cmd) {
8034 case SIOCGMIIPHY:
8035 data->phy_id = PHY_ADDR;
8036
8037 /* fallthru */
8038 case SIOCGMIIREG: {
8039 u32 mii_regval;
8040
8041 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8042 break; /* We have no PHY */
8043
David S. Millerf47c11e2005-06-24 20:18:35 -07008044 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008045 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -07008046 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008047
8048 data->val_out = mii_regval;
8049
8050 return err;
8051 }
8052
8053 case SIOCSMIIREG:
8054 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8055 break; /* We have no PHY */
8056
8057 if (!capable(CAP_NET_ADMIN))
8058 return -EPERM;
8059
David S. Millerf47c11e2005-06-24 20:18:35 -07008060 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008061 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -07008062 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008063
8064 return err;
8065
8066 default:
8067 /* do nothing */
8068 break;
8069 }
8070 return -EOPNOTSUPP;
8071}
8072
8073#if TG3_VLAN_TAG_USED
8074static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8075{
8076 struct tg3 *tp = netdev_priv(dev);
8077
David S. Millerf47c11e2005-06-24 20:18:35 -07008078 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008079
8080 tp->vlgrp = grp;
8081
8082 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8083 __tg3_set_rx_mode(dev);
8084
David S. Millerf47c11e2005-06-24 20:18:35 -07008085 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008086}
8087
8088static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8089{
8090 struct tg3 *tp = netdev_priv(dev);
8091
David S. Millerf47c11e2005-06-24 20:18:35 -07008092 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008093 if (tp->vlgrp)
8094 tp->vlgrp->vlan_devices[vid] = NULL;
David S. Millerf47c11e2005-06-24 20:18:35 -07008095 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096}
8097#endif
8098
David S. Miller15f98502005-05-18 22:49:26 -07008099static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8100{
8101 struct tg3 *tp = netdev_priv(dev);
8102
8103 memcpy(ec, &tp->coal, sizeof(*ec));
8104 return 0;
8105}
8106
Michael Chand244c892005-07-05 14:42:33 -07008107static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8108{
8109 struct tg3 *tp = netdev_priv(dev);
8110 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8111 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8112
8113 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8114 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8115 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8116 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8117 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8118 }
8119
8120 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8121 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8122 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8123 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8124 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8125 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8126 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8127 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8128 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8129 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8130 return -EINVAL;
8131
8132 /* No rx interrupts will be generated if both are zero */
8133 if ((ec->rx_coalesce_usecs == 0) &&
8134 (ec->rx_max_coalesced_frames == 0))
8135 return -EINVAL;
8136
8137 /* No tx interrupts will be generated if both are zero */
8138 if ((ec->tx_coalesce_usecs == 0) &&
8139 (ec->tx_max_coalesced_frames == 0))
8140 return -EINVAL;
8141
8142 /* Only copy relevant parameters, ignore all others. */
8143 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8144 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8145 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8146 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8147 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8148 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8149 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8150 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8151 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8152
8153 if (netif_running(dev)) {
8154 tg3_full_lock(tp, 0);
8155 __tg3_set_coalesce(tp, &tp->coal);
8156 tg3_full_unlock(tp);
8157 }
8158 return 0;
8159}
8160
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161static struct ethtool_ops tg3_ethtool_ops = {
8162 .get_settings = tg3_get_settings,
8163 .set_settings = tg3_set_settings,
8164 .get_drvinfo = tg3_get_drvinfo,
8165 .get_regs_len = tg3_get_regs_len,
8166 .get_regs = tg3_get_regs,
8167 .get_wol = tg3_get_wol,
8168 .set_wol = tg3_set_wol,
8169 .get_msglevel = tg3_get_msglevel,
8170 .set_msglevel = tg3_set_msglevel,
8171 .nway_reset = tg3_nway_reset,
8172 .get_link = ethtool_op_get_link,
8173 .get_eeprom_len = tg3_get_eeprom_len,
8174 .get_eeprom = tg3_get_eeprom,
8175 .set_eeprom = tg3_set_eeprom,
8176 .get_ringparam = tg3_get_ringparam,
8177 .set_ringparam = tg3_set_ringparam,
8178 .get_pauseparam = tg3_get_pauseparam,
8179 .set_pauseparam = tg3_set_pauseparam,
8180 .get_rx_csum = tg3_get_rx_csum,
8181 .set_rx_csum = tg3_set_rx_csum,
8182 .get_tx_csum = ethtool_op_get_tx_csum,
8183 .set_tx_csum = tg3_set_tx_csum,
8184 .get_sg = ethtool_op_get_sg,
8185 .set_sg = ethtool_op_set_sg,
8186#if TG3_TSO_SUPPORT != 0
8187 .get_tso = ethtool_op_get_tso,
8188 .set_tso = tg3_set_tso,
8189#endif
Michael Chan4cafd3f2005-05-29 14:56:34 -07008190 .self_test_count = tg3_get_test_count,
8191 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008192 .get_strings = tg3_get_strings,
8193 .get_stats_count = tg3_get_stats_count,
8194 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -07008195 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -07008196 .set_coalesce = tg3_set_coalesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008197};
8198
8199static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8200{
8201 u32 cursize, val;
8202
8203 tp->nvram_size = EEPROM_CHIP_SIZE;
8204
8205 if (tg3_nvram_read(tp, 0, &val) != 0)
8206 return;
8207
8208 if (swab32(val) != TG3_EEPROM_MAGIC)
8209 return;
8210
8211 /*
8212 * Size the chip by reading offsets at increasing powers of two.
8213 * When we encounter our validation signature, we know the addressing
8214 * has wrapped around, and thus have our chip size.
8215 */
8216 cursize = 0x800;
8217
8218 while (cursize < tp->nvram_size) {
8219 if (tg3_nvram_read(tp, cursize, &val) != 0)
8220 return;
8221
8222 if (swab32(val) == TG3_EEPROM_MAGIC)
8223 break;
8224
8225 cursize <<= 1;
8226 }
8227
8228 tp->nvram_size = cursize;
8229}
8230
8231static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8232{
8233 u32 val;
8234
8235 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8236 if (val != 0) {
8237 tp->nvram_size = (val >> 16) * 1024;
8238 return;
8239 }
8240 }
8241 tp->nvram_size = 0x20000;
8242}
8243
8244static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8245{
8246 u32 nvcfg1;
8247
8248 nvcfg1 = tr32(NVRAM_CFG1);
8249 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8250 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8251 }
8252 else {
8253 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8254 tw32(NVRAM_CFG1, nvcfg1);
8255 }
8256
Michael Chan85e94ce2005-04-21 17:05:28 -07008257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008258 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8259 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8260 tp->nvram_jedecnum = JEDEC_ATMEL;
8261 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8262 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8263 break;
8264 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8265 tp->nvram_jedecnum = JEDEC_ATMEL;
8266 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8267 break;
8268 case FLASH_VENDOR_ATMEL_EEPROM:
8269 tp->nvram_jedecnum = JEDEC_ATMEL;
8270 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8271 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8272 break;
8273 case FLASH_VENDOR_ST:
8274 tp->nvram_jedecnum = JEDEC_ST;
8275 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8276 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8277 break;
8278 case FLASH_VENDOR_SAIFUN:
8279 tp->nvram_jedecnum = JEDEC_SAIFUN;
8280 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8281 break;
8282 case FLASH_VENDOR_SST_SMALL:
8283 case FLASH_VENDOR_SST_LARGE:
8284 tp->nvram_jedecnum = JEDEC_SST;
8285 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8286 break;
8287 }
8288 }
8289 else {
8290 tp->nvram_jedecnum = JEDEC_ATMEL;
8291 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8292 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8293 }
8294}
8295
Michael Chan361b4ac2005-04-21 17:11:21 -07008296static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8297{
8298 u32 nvcfg1;
8299
8300 nvcfg1 = tr32(NVRAM_CFG1);
8301
Michael Chane6af3012005-04-21 17:12:05 -07008302 /* NVRAM protection for TPM */
8303 if (nvcfg1 & (1 << 27))
8304 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8305
Michael Chan361b4ac2005-04-21 17:11:21 -07008306 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8307 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8308 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8309 tp->nvram_jedecnum = JEDEC_ATMEL;
8310 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8311 break;
8312 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8313 tp->nvram_jedecnum = JEDEC_ATMEL;
8314 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8315 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8316 break;
8317 case FLASH_5752VENDOR_ST_M45PE10:
8318 case FLASH_5752VENDOR_ST_M45PE20:
8319 case FLASH_5752VENDOR_ST_M45PE40:
8320 tp->nvram_jedecnum = JEDEC_ST;
8321 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8322 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8323 break;
8324 }
8325
8326 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8327 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8328 case FLASH_5752PAGE_SIZE_256:
8329 tp->nvram_pagesize = 256;
8330 break;
8331 case FLASH_5752PAGE_SIZE_512:
8332 tp->nvram_pagesize = 512;
8333 break;
8334 case FLASH_5752PAGE_SIZE_1K:
8335 tp->nvram_pagesize = 1024;
8336 break;
8337 case FLASH_5752PAGE_SIZE_2K:
8338 tp->nvram_pagesize = 2048;
8339 break;
8340 case FLASH_5752PAGE_SIZE_4K:
8341 tp->nvram_pagesize = 4096;
8342 break;
8343 case FLASH_5752PAGE_SIZE_264:
8344 tp->nvram_pagesize = 264;
8345 break;
8346 }
8347 }
8348 else {
8349 /* For eeprom, set pagesize to maximum eeprom size */
8350 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8351
8352 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8353 tw32(NVRAM_CFG1, nvcfg1);
8354 }
8355}
8356
Linus Torvalds1da177e2005-04-16 15:20:36 -07008357/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8358static void __devinit tg3_nvram_init(struct tg3 *tp)
8359{
8360 int j;
8361
8362 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8363 return;
8364
8365 tw32_f(GRC_EEPROM_ADDR,
8366 (EEPROM_ADDR_FSM_RESET |
8367 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8368 EEPROM_ADDR_CLKPERD_SHIFT)));
8369
8370 /* XXX schedule_timeout() ... */
8371 for (j = 0; j < 100; j++)
8372 udelay(10);
8373
8374 /* Enable seeprom accesses. */
8375 tw32_f(GRC_LOCAL_CTRL,
8376 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8377 udelay(100);
8378
8379 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8380 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8381 tp->tg3_flags |= TG3_FLAG_NVRAM;
8382
Michael Chane6af3012005-04-21 17:12:05 -07008383 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008384
Michael Chan361b4ac2005-04-21 17:11:21 -07008385 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8386 tg3_get_5752_nvram_info(tp);
8387 else
8388 tg3_get_nvram_info(tp);
8389
Linus Torvalds1da177e2005-04-16 15:20:36 -07008390 tg3_get_nvram_size(tp);
8391
Michael Chane6af3012005-04-21 17:12:05 -07008392 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008393
8394 } else {
8395 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8396
8397 tg3_get_eeprom_size(tp);
8398 }
8399}
8400
8401static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8402 u32 offset, u32 *val)
8403{
8404 u32 tmp;
8405 int i;
8406
8407 if (offset > EEPROM_ADDR_ADDR_MASK ||
8408 (offset % 4) != 0)
8409 return -EINVAL;
8410
8411 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8412 EEPROM_ADDR_DEVID_MASK |
8413 EEPROM_ADDR_READ);
8414 tw32(GRC_EEPROM_ADDR,
8415 tmp |
8416 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8417 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8418 EEPROM_ADDR_ADDR_MASK) |
8419 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8420
8421 for (i = 0; i < 10000; i++) {
8422 tmp = tr32(GRC_EEPROM_ADDR);
8423
8424 if (tmp & EEPROM_ADDR_COMPLETE)
8425 break;
8426 udelay(100);
8427 }
8428 if (!(tmp & EEPROM_ADDR_COMPLETE))
8429 return -EBUSY;
8430
8431 *val = tr32(GRC_EEPROM_DATA);
8432 return 0;
8433}
8434
8435#define NVRAM_CMD_TIMEOUT 10000
8436
8437static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8438{
8439 int i;
8440
8441 tw32(NVRAM_CMD, nvram_cmd);
8442 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8443 udelay(10);
8444 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8445 udelay(10);
8446 break;
8447 }
8448 }
8449 if (i == NVRAM_CMD_TIMEOUT) {
8450 return -EBUSY;
8451 }
8452 return 0;
8453}
8454
8455static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8456{
8457 int ret;
8458
8459 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8460 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8461 return -EINVAL;
8462 }
8463
8464 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8465 return tg3_nvram_read_using_eeprom(tp, offset, val);
8466
8467 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8468 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8469 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8470
8471 offset = ((offset / tp->nvram_pagesize) <<
8472 ATMEL_AT45DB0X1B_PAGE_POS) +
8473 (offset % tp->nvram_pagesize);
8474 }
8475
8476 if (offset > NVRAM_ADDR_MSK)
8477 return -EINVAL;
8478
8479 tg3_nvram_lock(tp);
8480
Michael Chane6af3012005-04-21 17:12:05 -07008481 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008482
8483 tw32(NVRAM_ADDR, offset);
8484 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8485 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8486
8487 if (ret == 0)
8488 *val = swab32(tr32(NVRAM_RDDATA));
8489
8490 tg3_nvram_unlock(tp);
8491
Michael Chane6af3012005-04-21 17:12:05 -07008492 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008493
8494 return ret;
8495}
8496
8497static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8498 u32 offset, u32 len, u8 *buf)
8499{
8500 int i, j, rc = 0;
8501 u32 val;
8502
8503 for (i = 0; i < len; i += 4) {
8504 u32 addr, data;
8505
8506 addr = offset + i;
8507
8508 memcpy(&data, buf + i, 4);
8509
8510 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8511
8512 val = tr32(GRC_EEPROM_ADDR);
8513 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8514
8515 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8516 EEPROM_ADDR_READ);
8517 tw32(GRC_EEPROM_ADDR, val |
8518 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8519 (addr & EEPROM_ADDR_ADDR_MASK) |
8520 EEPROM_ADDR_START |
8521 EEPROM_ADDR_WRITE);
8522
8523 for (j = 0; j < 10000; j++) {
8524 val = tr32(GRC_EEPROM_ADDR);
8525
8526 if (val & EEPROM_ADDR_COMPLETE)
8527 break;
8528 udelay(100);
8529 }
8530 if (!(val & EEPROM_ADDR_COMPLETE)) {
8531 rc = -EBUSY;
8532 break;
8533 }
8534 }
8535
8536 return rc;
8537}
8538
8539/* offset and length are dword aligned */
8540static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8541 u8 *buf)
8542{
8543 int ret = 0;
8544 u32 pagesize = tp->nvram_pagesize;
8545 u32 pagemask = pagesize - 1;
8546 u32 nvram_cmd;
8547 u8 *tmp;
8548
8549 tmp = kmalloc(pagesize, GFP_KERNEL);
8550 if (tmp == NULL)
8551 return -ENOMEM;
8552
8553 while (len) {
8554 int j;
Michael Chane6af3012005-04-21 17:12:05 -07008555 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008556
8557 phy_addr = offset & ~pagemask;
8558
8559 for (j = 0; j < pagesize; j += 4) {
8560 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8561 (u32 *) (tmp + j))))
8562 break;
8563 }
8564 if (ret)
8565 break;
8566
8567 page_off = offset & pagemask;
8568 size = pagesize;
8569 if (len < size)
8570 size = len;
8571
8572 len -= size;
8573
8574 memcpy(tmp + page_off, buf, size);
8575
8576 offset = offset + (pagesize - page_off);
8577
Michael Chane6af3012005-04-21 17:12:05 -07008578 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008579
8580 /*
8581 * Before we can erase the flash page, we need
8582 * to issue a special "write enable" command.
8583 */
8584 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8585
8586 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8587 break;
8588
8589 /* Erase the target page */
8590 tw32(NVRAM_ADDR, phy_addr);
8591
8592 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8593 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8594
8595 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8596 break;
8597
8598 /* Issue another write enable to start the write. */
8599 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8600
8601 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8602 break;
8603
8604 for (j = 0; j < pagesize; j += 4) {
8605 u32 data;
8606
8607 data = *((u32 *) (tmp + j));
8608 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8609
8610 tw32(NVRAM_ADDR, phy_addr + j);
8611
8612 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8613 NVRAM_CMD_WR;
8614
8615 if (j == 0)
8616 nvram_cmd |= NVRAM_CMD_FIRST;
8617 else if (j == (pagesize - 4))
8618 nvram_cmd |= NVRAM_CMD_LAST;
8619
8620 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8621 break;
8622 }
8623 if (ret)
8624 break;
8625 }
8626
8627 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8628 tg3_nvram_exec_cmd(tp, nvram_cmd);
8629
8630 kfree(tmp);
8631
8632 return ret;
8633}
8634
8635/* offset and length are dword aligned */
8636static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8637 u8 *buf)
8638{
8639 int i, ret = 0;
8640
8641 for (i = 0; i < len; i += 4, offset += 4) {
8642 u32 data, page_off, phy_addr, nvram_cmd;
8643
8644 memcpy(&data, buf + i, 4);
8645 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8646
8647 page_off = offset % tp->nvram_pagesize;
8648
8649 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8650 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8651
8652 phy_addr = ((offset / tp->nvram_pagesize) <<
8653 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8654 }
8655 else {
8656 phy_addr = offset;
8657 }
8658
8659 tw32(NVRAM_ADDR, phy_addr);
8660
8661 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8662
8663 if ((page_off == 0) || (i == 0))
8664 nvram_cmd |= NVRAM_CMD_FIRST;
8665 else if (page_off == (tp->nvram_pagesize - 4))
8666 nvram_cmd |= NVRAM_CMD_LAST;
8667
8668 if (i == (len - 4))
8669 nvram_cmd |= NVRAM_CMD_LAST;
8670
8671 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8672 (nvram_cmd & NVRAM_CMD_FIRST)) {
8673
8674 if ((ret = tg3_nvram_exec_cmd(tp,
8675 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8676 NVRAM_CMD_DONE)))
8677
8678 break;
8679 }
8680 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8681 /* We always do complete word writes to eeprom. */
8682 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8683 }
8684
8685 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8686 break;
8687 }
8688 return ret;
8689}
8690
8691/* offset and length are dword aligned */
8692static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8693{
8694 int ret;
8695
8696 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8697 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8698 return -EINVAL;
8699 }
8700
8701 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07008702 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8703 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008704 udelay(40);
8705 }
8706
8707 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8708 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8709 }
8710 else {
8711 u32 grc_mode;
8712
8713 tg3_nvram_lock(tp);
8714
Michael Chane6af3012005-04-21 17:12:05 -07008715 tg3_enable_nvram_access(tp);
8716 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8717 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008718 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008719
8720 grc_mode = tr32(GRC_MODE);
8721 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8722
8723 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8724 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8725
8726 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8727 buf);
8728 }
8729 else {
8730 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8731 buf);
8732 }
8733
8734 grc_mode = tr32(GRC_MODE);
8735 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8736
Michael Chane6af3012005-04-21 17:12:05 -07008737 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008738 tg3_nvram_unlock(tp);
8739 }
8740
8741 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07008742 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008743 udelay(40);
8744 }
8745
8746 return ret;
8747}
8748
8749struct subsys_tbl_ent {
8750 u16 subsys_vendor, subsys_devid;
8751 u32 phy_id;
8752};
8753
8754static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8755 /* Broadcom boards. */
8756 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8757 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8758 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8759 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
8760 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8761 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8762 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
8763 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8764 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8765 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8766 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8767
8768 /* 3com boards. */
8769 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8770 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8771 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
8772 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8773 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8774
8775 /* DELL boards. */
8776 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8777 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8778 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8779 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8780
8781 /* Compaq boards. */
8782 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8783 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8784 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
8785 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8786 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8787
8788 /* IBM boards. */
8789 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8790};
8791
8792static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8793{
8794 int i;
8795
8796 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8797 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8798 tp->pdev->subsystem_vendor) &&
8799 (subsys_id_to_phy_id[i].subsys_devid ==
8800 tp->pdev->subsystem_device))
8801 return &subsys_id_to_phy_id[i];
8802 }
8803 return NULL;
8804}
8805
Michael Chan7d0c41e2005-04-21 17:06:20 -07008806/* Since this function may be called in D3-hot power state during
8807 * tg3_init_one(), only config cycles are allowed.
8808 */
8809static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008810{
Linus Torvalds1da177e2005-04-16 15:20:36 -07008811 u32 val;
Michael Chan7d0c41e2005-04-21 17:06:20 -07008812
8813 /* Make sure register accesses (indirect or otherwise)
8814 * will function correctly.
8815 */
8816 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8817 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008818
8819 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -07008820 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8821
Linus Torvalds1da177e2005-04-16 15:20:36 -07008822 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8823 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8824 u32 nic_cfg, led_cfg;
Michael Chan7d0c41e2005-04-21 17:06:20 -07008825 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8826 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008827
8828 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8829 tp->nic_sram_data_cfg = nic_cfg;
8830
8831 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8832 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8833 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8834 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8835 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8836 (ver > 0) && (ver < 0x100))
8837 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8838
Linus Torvalds1da177e2005-04-16 15:20:36 -07008839 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8840 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8841 eeprom_phy_serdes = 1;
8842
8843 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8844 if (nic_phy_id != 0) {
8845 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8846 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8847
8848 eeprom_phy_id = (id1 >> 16) << 10;
8849 eeprom_phy_id |= (id2 & 0xfc00) << 16;
8850 eeprom_phy_id |= (id2 & 0x03ff) << 0;
8851 } else
8852 eeprom_phy_id = 0;
8853
Michael Chan7d0c41e2005-04-21 17:06:20 -07008854 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -07008855 if (eeprom_phy_serdes) {
8856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8857 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8858 else
8859 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8860 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07008861
John W. Linvillecbf46852005-04-21 17:01:29 -07008862 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008863 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8864 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -07008865 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008866 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8867
8868 switch (led_cfg) {
8869 default:
8870 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8871 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8872 break;
8873
8874 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8875 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8876 break;
8877
8878 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8879 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -07008880
8881 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8882 * read on some older 5700/5701 bootcode.
8883 */
8884 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8885 ASIC_REV_5700 ||
8886 GET_ASIC_REV(tp->pci_chip_rev_id) ==
8887 ASIC_REV_5701)
8888 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8889
Linus Torvalds1da177e2005-04-16 15:20:36 -07008890 break;
8891
8892 case SHASTA_EXT_LED_SHARED:
8893 tp->led_ctrl = LED_CTRL_MODE_SHARED;
8894 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8895 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8896 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8897 LED_CTRL_MODE_PHY_2);
8898 break;
8899
8900 case SHASTA_EXT_LED_MAC:
8901 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8902 break;
8903
8904 case SHASTA_EXT_LED_COMBO:
8905 tp->led_ctrl = LED_CTRL_MODE_COMBO;
8906 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8907 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8908 LED_CTRL_MODE_PHY_2);
8909 break;
8910
8911 };
8912
8913 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8915 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8916 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8917
8918 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8919 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8920 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8921 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8922
8923 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8924 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07008925 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008926 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8927 }
8928 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8929 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8930
8931 if (cfg2 & (1 << 17))
8932 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8933
8934 /* serdes signal pre-emphasis in register 0x590 set by */
8935 /* bootcode if bit 18 is set */
8936 if (cfg2 & (1 << 18))
8937 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8938 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07008939}
8940
8941static int __devinit tg3_phy_probe(struct tg3 *tp)
8942{
8943 u32 hw_phy_id_1, hw_phy_id_2;
8944 u32 hw_phy_id, hw_phy_id_masked;
8945 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946
8947 /* Reading the PHY ID register can conflict with ASF
8948 * firwmare access to the PHY hardware.
8949 */
8950 err = 0;
8951 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8952 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8953 } else {
8954 /* Now read the physical PHY_ID from the chip and verify
8955 * that it is sane. If it doesn't look good, we fall back
8956 * to either the hard-coded table based PHY_ID and failing
8957 * that the value found in the eeprom area.
8958 */
8959 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8960 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8961
8962 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
8963 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8964 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
8965
8966 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8967 }
8968
8969 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8970 tp->phy_id = hw_phy_id;
8971 if (hw_phy_id_masked == PHY_ID_BCM8002)
8972 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -07008973 else
8974 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008975 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -07008976 if (tp->phy_id != PHY_ID_INVALID) {
8977 /* Do nothing, phy ID already set up in
8978 * tg3_get_eeprom_hw_cfg().
8979 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008980 } else {
8981 struct subsys_tbl_ent *p;
8982
8983 /* No eeprom signature? Try the hardcoded
8984 * subsys device table.
8985 */
8986 p = lookup_by_subsys(tp);
8987 if (!p)
8988 return -ENODEV;
8989
8990 tp->phy_id = p->phy_id;
8991 if (!tp->phy_id ||
8992 tp->phy_id == PHY_ID_BCM8002)
8993 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8994 }
8995 }
8996
Michael Chan747e8f82005-07-25 12:33:22 -07008997 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07008998 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8999 u32 bmsr, adv_reg, tg3_ctrl;
9000
9001 tg3_readphy(tp, MII_BMSR, &bmsr);
9002 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9003 (bmsr & BMSR_LSTATUS))
9004 goto skip_phy_reset;
9005
9006 err = tg3_phy_reset(tp);
9007 if (err)
9008 return err;
9009
9010 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9011 ADVERTISE_100HALF | ADVERTISE_100FULL |
9012 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9013 tg3_ctrl = 0;
9014 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9015 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9016 MII_TG3_CTRL_ADV_1000_FULL);
9017 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9018 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9019 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9020 MII_TG3_CTRL_ENABLE_AS_MASTER);
9021 }
9022
9023 if (!tg3_copper_is_advertising_all(tp)) {
9024 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9025
9026 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9027 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9028
9029 tg3_writephy(tp, MII_BMCR,
9030 BMCR_ANENABLE | BMCR_ANRESTART);
9031 }
9032 tg3_phy_set_wirespeed(tp);
9033
9034 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9035 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9036 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9037 }
9038
9039skip_phy_reset:
9040 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9041 err = tg3_init_5401phy_dsp(tp);
9042 if (err)
9043 return err;
9044 }
9045
9046 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9047 err = tg3_init_5401phy_dsp(tp);
9048 }
9049
Michael Chan747e8f82005-07-25 12:33:22 -07009050 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009051 tp->link_config.advertising =
9052 (ADVERTISED_1000baseT_Half |
9053 ADVERTISED_1000baseT_Full |
9054 ADVERTISED_Autoneg |
9055 ADVERTISED_FIBRE);
9056 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9057 tp->link_config.advertising &=
9058 ~(ADVERTISED_1000baseT_Half |
9059 ADVERTISED_1000baseT_Full);
9060
9061 return err;
9062}
9063
9064static void __devinit tg3_read_partno(struct tg3 *tp)
9065{
9066 unsigned char vpd_data[256];
9067 int i;
9068
9069 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9070 /* Sun decided not to put the necessary bits in the
9071 * NVRAM of their onboard tg3 parts :(
9072 */
9073 strcpy(tp->board_part_number, "Sun 570X");
9074 return;
9075 }
9076
9077 for (i = 0; i < 256; i += 4) {
9078 u32 tmp;
9079
9080 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9081 goto out_not_found;
9082
9083 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9084 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9085 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9086 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9087 }
9088
9089 /* Now parse and find the part number. */
9090 for (i = 0; i < 256; ) {
9091 unsigned char val = vpd_data[i];
9092 int block_end;
9093
9094 if (val == 0x82 || val == 0x91) {
9095 i = (i + 3 +
9096 (vpd_data[i + 1] +
9097 (vpd_data[i + 2] << 8)));
9098 continue;
9099 }
9100
9101 if (val != 0x90)
9102 goto out_not_found;
9103
9104 block_end = (i + 3 +
9105 (vpd_data[i + 1] +
9106 (vpd_data[i + 2] << 8)));
9107 i += 3;
9108 while (i < block_end) {
9109 if (vpd_data[i + 0] == 'P' &&
9110 vpd_data[i + 1] == 'N') {
9111 int partno_len = vpd_data[i + 2];
9112
9113 if (partno_len > 24)
9114 goto out_not_found;
9115
9116 memcpy(tp->board_part_number,
9117 &vpd_data[i + 3],
9118 partno_len);
9119
9120 /* Success. */
9121 return;
9122 }
9123 }
9124
9125 /* Part number not found. */
9126 goto out_not_found;
9127 }
9128
9129out_not_found:
9130 strcpy(tp->board_part_number, "none");
9131}
9132
9133#ifdef CONFIG_SPARC64
9134static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9135{
9136 struct pci_dev *pdev = tp->pdev;
9137 struct pcidev_cookie *pcp = pdev->sysdata;
9138
9139 if (pcp != NULL) {
9140 int node = pcp->prom_node;
9141 u32 venid;
9142 int err;
9143
9144 err = prom_getproperty(node, "subsystem-vendor-id",
9145 (char *) &venid, sizeof(venid));
9146 if (err == 0 || err == -1)
9147 return 0;
9148 if (venid == PCI_VENDOR_ID_SUN)
9149 return 1;
9150 }
9151 return 0;
9152}
9153#endif
9154
9155static int __devinit tg3_get_invariants(struct tg3 *tp)
9156{
9157 static struct pci_device_id write_reorder_chipsets[] = {
9158 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9159 PCI_DEVICE_ID_INTEL_82801AA_8) },
9160 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9161 PCI_DEVICE_ID_INTEL_82801AB_8) },
9162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9163 PCI_DEVICE_ID_INTEL_82801BA_11) },
9164 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9165 PCI_DEVICE_ID_INTEL_82801BA_6) },
9166 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9167 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9168 { },
9169 };
9170 u32 misc_ctrl_reg;
9171 u32 cacheline_sz_reg;
9172 u32 pci_state_reg, grc_misc_cfg;
9173 u32 val;
9174 u16 pci_cmd;
9175 int err;
9176
9177#ifdef CONFIG_SPARC64
9178 if (tg3_is_sun_570X(tp))
9179 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9180#endif
9181
9182 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
9183 * reordering to the mailbox registers done by the host
9184 * controller can cause major troubles. We read back from
9185 * every mailbox register write to force the writes to be
9186 * posted to the chip in order.
9187 */
9188 if (pci_dev_present(write_reorder_chipsets))
9189 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9190
9191 /* Force memory write invalidate off. If we leave it on,
9192 * then on 5700_BX chips we have to enable a workaround.
9193 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9194 * to match the cacheline size. The Broadcom driver have this
9195 * workaround but turns MWI off all the times so never uses
9196 * it. This seems to suggest that the workaround is insufficient.
9197 */
9198 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9199 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9200 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9201
9202 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9203 * has the register indirect write enable bit set before
9204 * we try to access any of the MMIO registers. It is also
9205 * critical that the PCI-X hw workaround situation is decided
9206 * before that as well.
9207 */
9208 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9209 &misc_ctrl_reg);
9210
9211 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9212 MISC_HOST_CTRL_CHIPREV_SHIFT);
9213
Michael Chanff645be2005-04-21 17:09:53 -07009214 /* Wrong chip ID in 5752 A0. This code can be removed later
9215 * as A0 is not in production.
9216 */
9217 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9218 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9219
Michael Chan4cf78e42005-07-25 12:29:19 -07009220 /* Find msi capability. */
9221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9222 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9223
Linus Torvalds1da177e2005-04-16 15:20:36 -07009224 /* Initialize misc host control in PCI block. */
9225 tp->misc_host_ctrl |= (misc_ctrl_reg &
9226 MISC_HOST_CTRL_CHIPREV);
9227 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9228 tp->misc_host_ctrl);
9229
9230 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9231 &cacheline_sz_reg);
9232
9233 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9234 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9235 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9236 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9237
John W. Linville2052da92005-04-21 16:56:08 -07009238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -07009239 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
John W. Linville6708e5c2005-04-21 17:00:52 -07009241 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9242
John W. Linville1b440c562005-04-21 17:03:18 -07009243 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9244 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9245 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9246
John W. Linvillebb7064d2005-04-21 17:02:41 -07009247 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009248 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9249
Michael Chan0f893dc2005-07-25 12:30:38 -07009250 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9251 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9252 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9253 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9254
Linus Torvalds1da177e2005-04-16 15:20:36 -07009255 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9256 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9257
9258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9259 tp->pci_lat_timer < 64) {
9260 tp->pci_lat_timer = 64;
9261
9262 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9263 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9264 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9265 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9266
9267 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9268 cacheline_sz_reg);
9269 }
9270
9271 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9272 &pci_state_reg);
9273
9274 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9275 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9276
9277 /* If this is a 5700 BX chipset, and we are in PCI-X
9278 * mode, enable register write workaround.
9279 *
9280 * The workaround is to use indirect register accesses
9281 * for all chip writes not to mailbox registers.
9282 */
9283 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9284 u32 pm_reg;
9285 u16 pci_cmd;
9286
9287 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9288
9289 /* The chip can have it's power management PCI config
9290 * space registers clobbered due to this bug.
9291 * So explicitly force the chip into D0 here.
9292 */
9293 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9294 &pm_reg);
9295 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9296 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9297 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9298 pm_reg);
9299
9300 /* Also, force SERR#/PERR# in PCI command. */
9301 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9302 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9303 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9304 }
9305 }
9306
9307 /* Back to back register writes can cause problems on this chip,
9308 * the workaround is to read back all reg writes except those to
9309 * mailbox regs. See tg3_write_indirect_reg32().
9310 *
9311 * PCI Express 5750_A0 rev chips need this workaround too.
9312 */
9313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9314 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9315 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9316 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9317
9318 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9319 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9320 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9321 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9322
9323 /* Chip-specific fixup from Broadcom driver */
9324 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9325 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9326 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9327 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9328 }
9329
Michael Chan7d0c41e2005-04-21 17:06:20 -07009330 /* Get eeprom hw config before calling tg3_set_power_state().
9331 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9332 * determined before calling tg3_set_power_state() so that
9333 * we know whether or not to switch out of Vaux power.
9334 * When the flag is set, it means that GPIO1 is used for eeprom
9335 * write protect and also implies that it is a LOM where GPIOs
9336 * are not used to switch power.
9337 */
9338 tg3_get_eeprom_hw_cfg(tp);
9339
Michael Chan314fba32005-04-21 17:07:04 -07009340 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9341 * GPIO1 driven high will bring 5700's external PHY out of reset.
9342 * It is also used as eeprom write protect on LOMs.
9343 */
9344 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9345 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9346 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9347 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9348 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -07009349 /* Unused GPIO3 must be driven as output on 5752 because there
9350 * are no pull-up resistors on unused GPIO pins.
9351 */
9352 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9353 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -07009354
Linus Torvalds1da177e2005-04-16 15:20:36 -07009355 /* Force the chip into D0. */
9356 err = tg3_set_power_state(tp, 0);
9357 if (err) {
9358 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9359 pci_name(tp->pdev));
9360 return err;
9361 }
9362
9363 /* 5700 B0 chips do not support checksumming correctly due
9364 * to hardware bugs.
9365 */
9366 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9367 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9368
9369 /* Pseudo-header checksum is done by hardware logic and not
9370 * the offload processers, so make the chip do the pseudo-
9371 * header checksums on receive. For transmit it is more
9372 * convenient to do the pseudo-header checksum in software
9373 * as Linux does that on transmit for us in all cases.
9374 */
9375 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9376 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9377
9378 /* Derive initial jumbo mode from MTU assigned in
9379 * ether_setup() via the alloc_etherdev() call
9380 */
Michael Chan0f893dc2005-07-25 12:30:38 -07009381 if (tp->dev->mtu > ETH_DATA_LEN &&
9382 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9383 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009384
9385 /* Determine WakeOnLan speed to use. */
9386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9387 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9388 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9389 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9390 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9391 } else {
9392 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9393 }
9394
9395 /* A few boards don't want Ethernet@WireSpeed phy feature */
9396 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9397 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9398 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -07009399 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9400 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009401 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9402
9403 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9404 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9405 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9406 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9407 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9408
John W. Linvillebb7064d2005-04-21 17:02:41 -07009409 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009410 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9411
Linus Torvalds1da177e2005-04-16 15:20:36 -07009412 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009413 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9414 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9415 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9416
9417 /* Initialize MAC MI mode, polling disabled. */
9418 tw32_f(MAC_MI_MODE, tp->mi_mode);
9419 udelay(80);
9420
9421 /* Initialize data/descriptor byte/word swapping. */
9422 val = tr32(GRC_MODE);
9423 val &= GRC_MODE_HOST_STACKUP;
9424 tw32(GRC_MODE, val | tp->grc_mode);
9425
9426 tg3_switch_clocks(tp);
9427
9428 /* Clear this out for sanity. */
9429 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9430
9431 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9432 &pci_state_reg);
9433 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9434 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9435 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9436
9437 if (chiprevid == CHIPREV_ID_5701_A0 ||
9438 chiprevid == CHIPREV_ID_5701_B0 ||
9439 chiprevid == CHIPREV_ID_5701_B2 ||
9440 chiprevid == CHIPREV_ID_5701_B5) {
9441 void __iomem *sram_base;
9442
9443 /* Write some dummy words into the SRAM status block
9444 * area, see if it reads back correctly. If the return
9445 * value is bad, force enable the PCIX workaround.
9446 */
9447 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9448
9449 writel(0x00000000, sram_base);
9450 writel(0x00000000, sram_base + 4);
9451 writel(0xffffffff, sram_base + 4);
9452 if (readl(sram_base) != 0x00000000)
9453 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9454 }
9455 }
9456
9457 udelay(50);
9458 tg3_nvram_init(tp);
9459
9460 grc_misc_cfg = tr32(GRC_MISC_CFG);
9461 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9462
9463 /* Broadcom's driver says that CIOBE multisplit has a bug */
9464#if 0
9465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9466 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9467 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9468 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9469 }
9470#endif
9471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9472 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9473 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9474 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9475
David S. Millerfac9b832005-05-18 22:46:34 -07009476 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9477 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9478 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9479 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9480 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9481 HOSTCC_MODE_CLRTICK_TXBD);
9482
9483 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9484 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9485 tp->misc_host_ctrl);
9486 }
9487
Linus Torvalds1da177e2005-04-16 15:20:36 -07009488 /* these are limited to 10/100 only */
9489 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9490 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9491 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9492 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9493 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9494 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9495 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9496 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9497 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9498 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9499 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9500
9501 err = tg3_phy_probe(tp);
9502 if (err) {
9503 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9504 pci_name(tp->pdev), err);
9505 /* ... but do not return immediately ... */
9506 }
9507
9508 tg3_read_partno(tp);
9509
9510 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9511 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9512 } else {
9513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9514 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9515 else
9516 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9517 }
9518
9519 /* 5700 {AX,BX} chips have a broken status block link
9520 * change bit implementation, so we must use the
9521 * status register in those cases.
9522 */
9523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9524 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9525 else
9526 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9527
9528 /* The led_ctrl is set during tg3_phy_probe, here we might
9529 * have to force the link status polling mechanism based
9530 * upon subsystem IDs.
9531 */
9532 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9533 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9534 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9535 TG3_FLAG_USE_LINKCHG_REG);
9536 }
9537
9538 /* For all SERDES we poll the MAC status register. */
9539 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9540 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9541 else
9542 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9543
9544 /* 5700 BX chips need to have their TX producer index mailboxes
9545 * written twice to workaround a bug.
9546 */
9547 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9548 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9549 else
9550 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9551
9552 /* It seems all chips can get confused if TX buffers
9553 * straddle the 4GB address boundary in some cases.
9554 */
9555 tp->dev->hard_start_xmit = tg3_start_xmit;
9556
9557 tp->rx_offset = 2;
9558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9559 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9560 tp->rx_offset = 0;
9561
9562 /* By default, disable wake-on-lan. User can change this
9563 * using ETHTOOL_SWOL.
9564 */
9565 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9566
9567 return err;
9568}
9569
9570#ifdef CONFIG_SPARC64
9571static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9572{
9573 struct net_device *dev = tp->dev;
9574 struct pci_dev *pdev = tp->pdev;
9575 struct pcidev_cookie *pcp = pdev->sysdata;
9576
9577 if (pcp != NULL) {
9578 int node = pcp->prom_node;
9579
9580 if (prom_getproplen(node, "local-mac-address") == 6) {
9581 prom_getproperty(node, "local-mac-address",
9582 dev->dev_addr, 6);
9583 return 0;
9584 }
9585 }
9586 return -ENODEV;
9587}
9588
9589static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9590{
9591 struct net_device *dev = tp->dev;
9592
9593 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9594 return 0;
9595}
9596#endif
9597
9598static int __devinit tg3_get_device_address(struct tg3 *tp)
9599{
9600 struct net_device *dev = tp->dev;
9601 u32 hi, lo, mac_offset;
9602
9603#ifdef CONFIG_SPARC64
9604 if (!tg3_get_macaddr_sparc(tp))
9605 return 0;
9606#endif
9607
9608 mac_offset = 0x7c;
Michael Chan4cf78e42005-07-25 12:29:19 -07009609 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9610 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009612 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9613 mac_offset = 0xcc;
9614 if (tg3_nvram_lock(tp))
9615 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9616 else
9617 tg3_nvram_unlock(tp);
9618 }
9619
9620 /* First try to get it from MAC address mailbox. */
9621 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9622 if ((hi >> 16) == 0x484b) {
9623 dev->dev_addr[0] = (hi >> 8) & 0xff;
9624 dev->dev_addr[1] = (hi >> 0) & 0xff;
9625
9626 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9627 dev->dev_addr[2] = (lo >> 24) & 0xff;
9628 dev->dev_addr[3] = (lo >> 16) & 0xff;
9629 dev->dev_addr[4] = (lo >> 8) & 0xff;
9630 dev->dev_addr[5] = (lo >> 0) & 0xff;
9631 }
9632 /* Next, try NVRAM. */
9633 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9634 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9635 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9636 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9637 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9638 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9639 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9640 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9641 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9642 }
9643 /* Finally just fetch it out of the MAC control regs. */
9644 else {
9645 hi = tr32(MAC_ADDR_0_HIGH);
9646 lo = tr32(MAC_ADDR_0_LOW);
9647
9648 dev->dev_addr[5] = lo & 0xff;
9649 dev->dev_addr[4] = (lo >> 8) & 0xff;
9650 dev->dev_addr[3] = (lo >> 16) & 0xff;
9651 dev->dev_addr[2] = (lo >> 24) & 0xff;
9652 dev->dev_addr[1] = hi & 0xff;
9653 dev->dev_addr[0] = (hi >> 8) & 0xff;
9654 }
9655
9656 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9657#ifdef CONFIG_SPARC64
9658 if (!tg3_get_default_macaddr_sparc(tp))
9659 return 0;
9660#endif
9661 return -EINVAL;
9662 }
9663 return 0;
9664}
9665
David S. Miller59e6b432005-05-18 22:50:10 -07009666#define BOUNDARY_SINGLE_CACHELINE 1
9667#define BOUNDARY_MULTI_CACHELINE 2
9668
9669static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9670{
9671 int cacheline_size;
9672 u8 byte;
9673 int goal;
9674
9675 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9676 if (byte == 0)
9677 cacheline_size = 1024;
9678 else
9679 cacheline_size = (int) byte * 4;
9680
9681 /* On 5703 and later chips, the boundary bits have no
9682 * effect.
9683 */
9684 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9685 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9686 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9687 goto out;
9688
9689#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9690 goal = BOUNDARY_MULTI_CACHELINE;
9691#else
9692#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9693 goal = BOUNDARY_SINGLE_CACHELINE;
9694#else
9695 goal = 0;
9696#endif
9697#endif
9698
9699 if (!goal)
9700 goto out;
9701
9702 /* PCI controllers on most RISC systems tend to disconnect
9703 * when a device tries to burst across a cache-line boundary.
9704 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9705 *
9706 * Unfortunately, for PCI-E there are only limited
9707 * write-side controls for this, and thus for reads
9708 * we will still get the disconnects. We'll also waste
9709 * these PCI cycles for both read and write for chips
9710 * other than 5700 and 5701 which do not implement the
9711 * boundary bits.
9712 */
9713 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9714 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9715 switch (cacheline_size) {
9716 case 16:
9717 case 32:
9718 case 64:
9719 case 128:
9720 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9721 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9722 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9723 } else {
9724 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9725 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9726 }
9727 break;
9728
9729 case 256:
9730 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9731 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9732 break;
9733
9734 default:
9735 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9736 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9737 break;
9738 };
9739 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9740 switch (cacheline_size) {
9741 case 16:
9742 case 32:
9743 case 64:
9744 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9745 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9746 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9747 break;
9748 }
9749 /* fallthrough */
9750 case 128:
9751 default:
9752 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9753 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9754 break;
9755 };
9756 } else {
9757 switch (cacheline_size) {
9758 case 16:
9759 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9760 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9761 DMA_RWCTRL_WRITE_BNDRY_16);
9762 break;
9763 }
9764 /* fallthrough */
9765 case 32:
9766 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9767 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9768 DMA_RWCTRL_WRITE_BNDRY_32);
9769 break;
9770 }
9771 /* fallthrough */
9772 case 64:
9773 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9774 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9775 DMA_RWCTRL_WRITE_BNDRY_64);
9776 break;
9777 }
9778 /* fallthrough */
9779 case 128:
9780 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9781 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9782 DMA_RWCTRL_WRITE_BNDRY_128);
9783 break;
9784 }
9785 /* fallthrough */
9786 case 256:
9787 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9788 DMA_RWCTRL_WRITE_BNDRY_256);
9789 break;
9790 case 512:
9791 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9792 DMA_RWCTRL_WRITE_BNDRY_512);
9793 break;
9794 case 1024:
9795 default:
9796 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9797 DMA_RWCTRL_WRITE_BNDRY_1024);
9798 break;
9799 };
9800 }
9801
9802out:
9803 return val;
9804}
9805
Linus Torvalds1da177e2005-04-16 15:20:36 -07009806static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9807{
9808 struct tg3_internal_buffer_desc test_desc;
9809 u32 sram_dma_descs;
9810 int i, ret;
9811
9812 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9813
9814 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9815 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9816 tw32(RDMAC_STATUS, 0);
9817 tw32(WDMAC_STATUS, 0);
9818
9819 tw32(BUFMGR_MODE, 0);
9820 tw32(FTQ_RESET, 0);
9821
9822 test_desc.addr_hi = ((u64) buf_dma) >> 32;
9823 test_desc.addr_lo = buf_dma & 0xffffffff;
9824 test_desc.nic_mbuf = 0x00002100;
9825 test_desc.len = size;
9826
9827 /*
9828 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9829 * the *second* time the tg3 driver was getting loaded after an
9830 * initial scan.
9831 *
9832 * Broadcom tells me:
9833 * ...the DMA engine is connected to the GRC block and a DMA
9834 * reset may affect the GRC block in some unpredictable way...
9835 * The behavior of resets to individual blocks has not been tested.
9836 *
9837 * Broadcom noted the GRC reset will also reset all sub-components.
9838 */
9839 if (to_device) {
9840 test_desc.cqid_sqid = (13 << 8) | 2;
9841
9842 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9843 udelay(40);
9844 } else {
9845 test_desc.cqid_sqid = (16 << 8) | 7;
9846
9847 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9848 udelay(40);
9849 }
9850 test_desc.flags = 0x00000005;
9851
9852 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9853 u32 val;
9854
9855 val = *(((u32 *)&test_desc) + i);
9856 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9857 sram_dma_descs + (i * sizeof(u32)));
9858 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9859 }
9860 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9861
9862 if (to_device) {
9863 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9864 } else {
9865 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9866 }
9867
9868 ret = -ENODEV;
9869 for (i = 0; i < 40; i++) {
9870 u32 val;
9871
9872 if (to_device)
9873 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9874 else
9875 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9876 if ((val & 0xffff) == sram_dma_descs) {
9877 ret = 0;
9878 break;
9879 }
9880
9881 udelay(100);
9882 }
9883
9884 return ret;
9885}
9886
David S. Millerded73402005-05-23 13:59:47 -07009887#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07009888
9889static int __devinit tg3_test_dma(struct tg3 *tp)
9890{
9891 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -07009892 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009893 int ret;
9894
9895 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9896 if (!buf) {
9897 ret = -ENOMEM;
9898 goto out_nofree;
9899 }
9900
9901 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9902 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9903
David S. Miller59e6b432005-05-18 22:50:10 -07009904 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009905
9906 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9907 /* DMA read watermark not used on PCIE */
9908 tp->dma_rwctrl |= 0x00180000;
9909 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -07009910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9911 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009912 tp->dma_rwctrl |= 0x003f0000;
9913 else
9914 tp->dma_rwctrl |= 0x003f000f;
9915 } else {
9916 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9917 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9918 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9919
9920 if (ccval == 0x6 || ccval == 0x7)
9921 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9922
David S. Miller59e6b432005-05-18 22:50:10 -07009923 /* Set bit 23 to enable PCIX hw bug fix */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009924 tp->dma_rwctrl |= 0x009f0000;
Michael Chan4cf78e42005-07-25 12:29:19 -07009925 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9926 /* 5780 always in PCIX mode */
9927 tp->dma_rwctrl |= 0x00144000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009928 } else {
9929 tp->dma_rwctrl |= 0x001b000f;
9930 }
9931 }
9932
9933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9935 tp->dma_rwctrl &= 0xfffffff0;
9936
9937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9939 /* Remove this if it causes problems for some boards. */
9940 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9941
9942 /* On 5700/5701 chips, we need to set this bit.
9943 * Otherwise the chip will issue cacheline transactions
9944 * to streamable DMA memory with not all the byte
9945 * enables turned on. This is an error on several
9946 * RISC PCI controllers, in particular sparc64.
9947 *
9948 * On 5703/5704 chips, this bit has been reassigned
9949 * a different meaning. In particular, it is used
9950 * on those chips to enable a PCI-X workaround.
9951 */
9952 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9953 }
9954
9955 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9956
9957#if 0
9958 /* Unneeded, already done by tg3_get_invariants. */
9959 tg3_switch_clocks(tp);
9960#endif
9961
9962 ret = 0;
9963 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9964 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9965 goto out;
9966
David S. Miller59e6b432005-05-18 22:50:10 -07009967 /* It is best to perform DMA test with maximum write burst size
9968 * to expose the 5700/5701 write DMA bug.
9969 */
9970 saved_dma_rwctrl = tp->dma_rwctrl;
9971 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9972 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9973
Linus Torvalds1da177e2005-04-16 15:20:36 -07009974 while (1) {
9975 u32 *p = buf, i;
9976
9977 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9978 p[i] = i;
9979
9980 /* Send the buffer to the chip. */
9981 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9982 if (ret) {
9983 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9984 break;
9985 }
9986
9987#if 0
9988 /* validate data reached card RAM correctly. */
9989 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9990 u32 val;
9991 tg3_read_mem(tp, 0x2100 + (i*4), &val);
9992 if (le32_to_cpu(val) != p[i]) {
9993 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
9994 /* ret = -ENODEV here? */
9995 }
9996 p[i] = 0;
9997 }
9998#endif
9999 /* Now read it back. */
10000 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10001 if (ret) {
10002 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10003
10004 break;
10005 }
10006
10007 /* Verify it. */
10008 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10009 if (p[i] == i)
10010 continue;
10011
David S. Miller59e6b432005-05-18 22:50:10 -070010012 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10013 DMA_RWCTRL_WRITE_BNDRY_16) {
10014 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010015 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10016 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10017 break;
10018 } else {
10019 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10020 ret = -ENODEV;
10021 goto out;
10022 }
10023 }
10024
10025 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10026 /* Success. */
10027 ret = 0;
10028 break;
10029 }
10030 }
David S. Miller59e6b432005-05-18 22:50:10 -070010031 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10032 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070010033 static struct pci_device_id dma_wait_state_chipsets[] = {
10034 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10035 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10036 { },
10037 };
10038
David S. Miller59e6b432005-05-18 22:50:10 -070010039 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070010040 * now look for chipsets that are known to expose the
10041 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070010042 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070010043 if (pci_dev_present(dma_wait_state_chipsets)) {
10044 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10045 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10046 }
10047 else
10048 /* Safe to use the calculated DMA boundary. */
10049 tp->dma_rwctrl = saved_dma_rwctrl;
10050
David S. Miller59e6b432005-05-18 22:50:10 -070010051 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010053
10054out:
10055 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10056out_nofree:
10057 return ret;
10058}
10059
10060static void __devinit tg3_init_link_config(struct tg3 *tp)
10061{
10062 tp->link_config.advertising =
10063 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10064 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10065 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10066 ADVERTISED_Autoneg | ADVERTISED_MII);
10067 tp->link_config.speed = SPEED_INVALID;
10068 tp->link_config.duplex = DUPLEX_INVALID;
10069 tp->link_config.autoneg = AUTONEG_ENABLE;
10070 netif_carrier_off(tp->dev);
10071 tp->link_config.active_speed = SPEED_INVALID;
10072 tp->link_config.active_duplex = DUPLEX_INVALID;
10073 tp->link_config.phy_is_low_power = 0;
10074 tp->link_config.orig_speed = SPEED_INVALID;
10075 tp->link_config.orig_duplex = DUPLEX_INVALID;
10076 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10077}
10078
10079static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10080{
Michael Chanfdfec1722005-07-25 12:31:48 -070010081 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10082 tp->bufmgr_config.mbuf_read_dma_low_water =
10083 DEFAULT_MB_RDMA_LOW_WATER_5705;
10084 tp->bufmgr_config.mbuf_mac_rx_low_water =
10085 DEFAULT_MB_MACRX_LOW_WATER_5705;
10086 tp->bufmgr_config.mbuf_high_water =
10087 DEFAULT_MB_HIGH_WATER_5705;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010088
Michael Chanfdfec1722005-07-25 12:31:48 -070010089 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10090 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10091 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10092 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10093 tp->bufmgr_config.mbuf_high_water_jumbo =
10094 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10095 } else {
10096 tp->bufmgr_config.mbuf_read_dma_low_water =
10097 DEFAULT_MB_RDMA_LOW_WATER;
10098 tp->bufmgr_config.mbuf_mac_rx_low_water =
10099 DEFAULT_MB_MACRX_LOW_WATER;
10100 tp->bufmgr_config.mbuf_high_water =
10101 DEFAULT_MB_HIGH_WATER;
10102
10103 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10104 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10105 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10106 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10107 tp->bufmgr_config.mbuf_high_water_jumbo =
10108 DEFAULT_MB_HIGH_WATER_JUMBO;
10109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010110
10111 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10112 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10113}
10114
10115static char * __devinit tg3_phy_string(struct tg3 *tp)
10116{
10117 switch (tp->phy_id & PHY_ID_MASK) {
10118 case PHY_ID_BCM5400: return "5400";
10119 case PHY_ID_BCM5401: return "5401";
10120 case PHY_ID_BCM5411: return "5411";
10121 case PHY_ID_BCM5701: return "5701";
10122 case PHY_ID_BCM5703: return "5703";
10123 case PHY_ID_BCM5704: return "5704";
10124 case PHY_ID_BCM5705: return "5705";
10125 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070010126 case PHY_ID_BCM5752: return "5752";
Michael Chan4cf78e42005-07-25 12:29:19 -070010127 case PHY_ID_BCM5780: return "5780";
Linus Torvalds1da177e2005-04-16 15:20:36 -070010128 case PHY_ID_BCM8002: return "8002/serdes";
10129 case 0: return "serdes";
10130 default: return "unknown";
10131 };
10132}
10133
10134static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10135{
10136 struct pci_dev *peer;
10137 unsigned int func, devnr = tp->pdev->devfn & ~7;
10138
10139 for (func = 0; func < 8; func++) {
10140 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10141 if (peer && peer != tp->pdev)
10142 break;
10143 pci_dev_put(peer);
10144 }
10145 if (!peer || peer == tp->pdev)
10146 BUG();
10147
10148 /*
10149 * We don't need to keep the refcount elevated; there's no way
10150 * to remove one half of this device without removing the other
10151 */
10152 pci_dev_put(peer);
10153
10154 return peer;
10155}
10156
David S. Miller15f98502005-05-18 22:49:26 -070010157static void __devinit tg3_init_coal(struct tg3 *tp)
10158{
10159 struct ethtool_coalesce *ec = &tp->coal;
10160
10161 memset(ec, 0, sizeof(*ec));
10162 ec->cmd = ETHTOOL_GCOALESCE;
10163 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10164 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10165 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10166 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10167 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10168 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10169 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10170 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10171 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10172
10173 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10174 HOSTCC_MODE_CLRTICK_TXBD)) {
10175 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10176 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10177 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10178 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10179 }
Michael Chand244c892005-07-05 14:42:33 -070010180
10181 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10182 ec->rx_coalesce_usecs_irq = 0;
10183 ec->tx_coalesce_usecs_irq = 0;
10184 ec->stats_block_coalesce_usecs = 0;
10185 }
David S. Miller15f98502005-05-18 22:49:26 -070010186}
10187
Linus Torvalds1da177e2005-04-16 15:20:36 -070010188static int __devinit tg3_init_one(struct pci_dev *pdev,
10189 const struct pci_device_id *ent)
10190{
10191 static int tg3_version_printed = 0;
10192 unsigned long tg3reg_base, tg3reg_len;
10193 struct net_device *dev;
10194 struct tg3 *tp;
10195 int i, err, pci_using_dac, pm_cap;
10196
10197 if (tg3_version_printed++ == 0)
10198 printk(KERN_INFO "%s", version);
10199
10200 err = pci_enable_device(pdev);
10201 if (err) {
10202 printk(KERN_ERR PFX "Cannot enable PCI device, "
10203 "aborting.\n");
10204 return err;
10205 }
10206
10207 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10208 printk(KERN_ERR PFX "Cannot find proper PCI device "
10209 "base address, aborting.\n");
10210 err = -ENODEV;
10211 goto err_out_disable_pdev;
10212 }
10213
10214 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10215 if (err) {
10216 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10217 "aborting.\n");
10218 goto err_out_disable_pdev;
10219 }
10220
10221 pci_set_master(pdev);
10222
10223 /* Find power-management capability. */
10224 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10225 if (pm_cap == 0) {
10226 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10227 "aborting.\n");
10228 err = -EIO;
10229 goto err_out_free_res;
10230 }
10231
10232 /* Configure DMA attributes. */
10233 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10234 if (!err) {
10235 pci_using_dac = 1;
10236 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10237 if (err < 0) {
10238 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10239 "for consistent allocations\n");
10240 goto err_out_free_res;
10241 }
10242 } else {
10243 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10244 if (err) {
10245 printk(KERN_ERR PFX "No usable DMA configuration, "
10246 "aborting.\n");
10247 goto err_out_free_res;
10248 }
10249 pci_using_dac = 0;
10250 }
10251
10252 tg3reg_base = pci_resource_start(pdev, 0);
10253 tg3reg_len = pci_resource_len(pdev, 0);
10254
10255 dev = alloc_etherdev(sizeof(*tp));
10256 if (!dev) {
10257 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10258 err = -ENOMEM;
10259 goto err_out_free_res;
10260 }
10261
10262 SET_MODULE_OWNER(dev);
10263 SET_NETDEV_DEV(dev, &pdev->dev);
10264
10265 if (pci_using_dac)
10266 dev->features |= NETIF_F_HIGHDMA;
10267 dev->features |= NETIF_F_LLTX;
10268#if TG3_VLAN_TAG_USED
10269 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10270 dev->vlan_rx_register = tg3_vlan_rx_register;
10271 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10272#endif
10273
10274 tp = netdev_priv(dev);
10275 tp->pdev = pdev;
10276 tp->dev = dev;
10277 tp->pm_cap = pm_cap;
10278 tp->mac_mode = TG3_DEF_MAC_MODE;
10279 tp->rx_mode = TG3_DEF_RX_MODE;
10280 tp->tx_mode = TG3_DEF_TX_MODE;
10281 tp->mi_mode = MAC_MI_MODE_BASE;
10282 if (tg3_debug > 0)
10283 tp->msg_enable = tg3_debug;
10284 else
10285 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10286
10287 /* The word/byte swap controls here control register access byte
10288 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10289 * setting below.
10290 */
10291 tp->misc_host_ctrl =
10292 MISC_HOST_CTRL_MASK_PCI_INT |
10293 MISC_HOST_CTRL_WORD_SWAP |
10294 MISC_HOST_CTRL_INDIR_ACCESS |
10295 MISC_HOST_CTRL_PCISTATE_RW;
10296
10297 /* The NONFRM (non-frame) byte/word swap controls take effect
10298 * on descriptor entries, anything which isn't packet data.
10299 *
10300 * The StrongARM chips on the board (one for tx, one for rx)
10301 * are running in big-endian mode.
10302 */
10303 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10304 GRC_MODE_WSWAP_NONFRM_DATA);
10305#ifdef __BIG_ENDIAN
10306 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10307#endif
10308 spin_lock_init(&tp->lock);
10309 spin_lock_init(&tp->tx_lock);
10310 spin_lock_init(&tp->indirect_lock);
10311 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10312
10313 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10314 if (tp->regs == 0UL) {
10315 printk(KERN_ERR PFX "Cannot map device registers, "
10316 "aborting.\n");
10317 err = -ENOMEM;
10318 goto err_out_free_dev;
10319 }
10320
10321 tg3_init_link_config(tp);
10322
Linus Torvalds1da177e2005-04-16 15:20:36 -070010323 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10324 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10325 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10326
10327 dev->open = tg3_open;
10328 dev->stop = tg3_close;
10329 dev->get_stats = tg3_get_stats;
10330 dev->set_multicast_list = tg3_set_rx_mode;
10331 dev->set_mac_address = tg3_set_mac_addr;
10332 dev->do_ioctl = tg3_ioctl;
10333 dev->tx_timeout = tg3_tx_timeout;
10334 dev->poll = tg3_poll;
10335 dev->ethtool_ops = &tg3_ethtool_ops;
10336 dev->weight = 64;
10337 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10338 dev->change_mtu = tg3_change_mtu;
10339 dev->irq = pdev->irq;
10340#ifdef CONFIG_NET_POLL_CONTROLLER
10341 dev->poll_controller = tg3_poll_controller;
10342#endif
10343
10344 err = tg3_get_invariants(tp);
10345 if (err) {
10346 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10347 "aborting.\n");
10348 goto err_out_iounmap;
10349 }
10350
Michael Chanfdfec1722005-07-25 12:31:48 -070010351 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010352
10353#if TG3_TSO_SUPPORT != 0
10354 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10355 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10356 }
10357 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10359 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10360 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10361 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10362 } else {
10363 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10364 }
10365
10366 /* TSO is off by default, user can enable using ethtool. */
10367#if 0
10368 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10369 dev->features |= NETIF_F_TSO;
10370#endif
10371
10372#endif
10373
10374 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10375 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10376 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10377 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10378 tp->rx_pending = 63;
10379 }
10380
10381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10382 tp->pdev_peer = tg3_find_5704_peer(tp);
10383
10384 err = tg3_get_device_address(tp);
10385 if (err) {
10386 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10387 "aborting.\n");
10388 goto err_out_iounmap;
10389 }
10390
10391 /*
10392 * Reset chip in case UNDI or EFI driver did not shutdown
10393 * DMA self test will enable WDMAC and we'll see (spurious)
10394 * pending DMA on the PCI bus at that point.
10395 */
10396 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10397 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10398 pci_save_state(tp->pdev);
10399 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Michael Chan944d9802005-05-29 14:57:48 -070010400 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010401 }
10402
10403 err = tg3_test_dma(tp);
10404 if (err) {
10405 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10406 goto err_out_iounmap;
10407 }
10408
10409 /* Tigon3 can do ipv4 only... and some chips have buggy
10410 * checksumming.
10411 */
10412 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10413 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10414 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10415 } else
10416 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10417
10418 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10419 dev->features &= ~NETIF_F_HIGHDMA;
10420
10421 /* flow control autonegotiation is default behavior */
10422 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10423
David S. Miller15f98502005-05-18 22:49:26 -070010424 tg3_init_coal(tp);
10425
David S. Miller7d3f4c92005-08-06 06:35:48 -070010426 /* Now that we have fully setup the chip, save away a snapshot
10427 * of the PCI config space. We need to restore this after
10428 * GRC_MISC_CFG core clock resets and some resume events.
10429 */
10430 pci_save_state(tp->pdev);
10431
Linus Torvalds1da177e2005-04-16 15:20:36 -070010432 err = register_netdev(dev);
10433 if (err) {
10434 printk(KERN_ERR PFX "Cannot register net device, "
10435 "aborting.\n");
10436 goto err_out_iounmap;
10437 }
10438
10439 pci_set_drvdata(pdev, dev);
10440
Linus Torvalds1da177e2005-04-16 15:20:36 -070010441 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10442 dev->name,
10443 tp->board_part_number,
10444 tp->pci_chip_rev_id,
10445 tg3_phy_string(tp),
10446 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10447 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10448 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10449 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10450 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10451 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10452
10453 for (i = 0; i < 6; i++)
10454 printk("%2.2x%c", dev->dev_addr[i],
10455 i == 5 ? '\n' : ':');
10456
10457 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10458 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10459 "TSOcap[%d] \n",
10460 dev->name,
10461 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10462 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10463 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10464 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10465 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10466 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10467 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
David S. Miller59e6b432005-05-18 22:50:10 -070010468 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10469 dev->name, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010470
10471 return 0;
10472
10473err_out_iounmap:
10474 iounmap(tp->regs);
10475
10476err_out_free_dev:
10477 free_netdev(dev);
10478
10479err_out_free_res:
10480 pci_release_regions(pdev);
10481
10482err_out_disable_pdev:
10483 pci_disable_device(pdev);
10484 pci_set_drvdata(pdev, NULL);
10485 return err;
10486}
10487
10488static void __devexit tg3_remove_one(struct pci_dev *pdev)
10489{
10490 struct net_device *dev = pci_get_drvdata(pdev);
10491
10492 if (dev) {
10493 struct tg3 *tp = netdev_priv(dev);
10494
10495 unregister_netdev(dev);
10496 iounmap(tp->regs);
10497 free_netdev(dev);
10498 pci_release_regions(pdev);
10499 pci_disable_device(pdev);
10500 pci_set_drvdata(pdev, NULL);
10501 }
10502}
10503
10504static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10505{
10506 struct net_device *dev = pci_get_drvdata(pdev);
10507 struct tg3 *tp = netdev_priv(dev);
10508 int err;
10509
10510 if (!netif_running(dev))
10511 return 0;
10512
10513 tg3_netif_stop(tp);
10514
10515 del_timer_sync(&tp->timer);
10516
David S. Millerf47c11e2005-06-24 20:18:35 -070010517 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010518 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070010519 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010520
10521 netif_device_detach(dev);
10522
David S. Millerf47c11e2005-06-24 20:18:35 -070010523 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070010524 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
David S. Millerf47c11e2005-06-24 20:18:35 -070010525 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010526
10527 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10528 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070010529 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010530
10531 tg3_init_hw(tp);
10532
10533 tp->timer.expires = jiffies + tp->timer_offset;
10534 add_timer(&tp->timer);
10535
10536 netif_device_attach(dev);
10537 tg3_netif_start(tp);
10538
David S. Millerf47c11e2005-06-24 20:18:35 -070010539 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010540 }
10541
10542 return err;
10543}
10544
10545static int tg3_resume(struct pci_dev *pdev)
10546{
10547 struct net_device *dev = pci_get_drvdata(pdev);
10548 struct tg3 *tp = netdev_priv(dev);
10549 int err;
10550
10551 if (!netif_running(dev))
10552 return 0;
10553
10554 pci_restore_state(tp->pdev);
10555
10556 err = tg3_set_power_state(tp, 0);
10557 if (err)
10558 return err;
10559
10560 netif_device_attach(dev);
10561
David S. Millerf47c11e2005-06-24 20:18:35 -070010562 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010563
10564 tg3_init_hw(tp);
10565
10566 tp->timer.expires = jiffies + tp->timer_offset;
10567 add_timer(&tp->timer);
10568
Linus Torvalds1da177e2005-04-16 15:20:36 -070010569 tg3_netif_start(tp);
10570
David S. Millerf47c11e2005-06-24 20:18:35 -070010571 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010572
10573 return 0;
10574}
10575
10576static struct pci_driver tg3_driver = {
10577 .name = DRV_MODULE_NAME,
10578 .id_table = tg3_pci_tbl,
10579 .probe = tg3_init_one,
10580 .remove = __devexit_p(tg3_remove_one),
10581 .suspend = tg3_suspend,
10582 .resume = tg3_resume
10583};
10584
10585static int __init tg3_init(void)
10586{
10587 return pci_module_init(&tg3_driver);
10588}
10589
10590static void __exit tg3_cleanup(void)
10591{
10592 pci_unregister_driver(&tg3_driver);
10593}
10594
10595module_init(tg3_init);
10596module_exit(tg3_cleanup);