blob: 6fb29ca3fd30195215cef1b7923fefd3db193976 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020027#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070040#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020041#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include <net/checksum.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC64
51#include <asm/idprom.h>
52#include <asm/oplib.h>
53#include <asm/pbm.h>
54#endif
55
56#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57#define TG3_VLAN_TAG_USED 1
58#else
59#define TG3_VLAN_TAG_USED 0
60#endif
61
62#ifdef NETIF_F_TSO
63#define TG3_TSO_SUPPORT 1
64#else
65#define TG3_TSO_SUPPORT 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Michael Chanec41c7d2006-01-17 02:40:55 -080072#define DRV_MODULE_VERSION "3.48"
73#define DRV_MODULE_RELDATE "Jan 16, 2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define TX_BUFFS_AVAIL(TP) \
Michael Chan51b91462005-09-01 17:41:28 -0700128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
Michael Chan4cafd3f2005-05-29 14:56:34 -0700141#define TG3_NUM_TEST 6
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
John W. Linville6e9017a2005-04-21 16:58:56 -0700214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
John W. Linvilleaf2bcd92005-04-21 16:57:50 -0700215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Xose Vazquez Perezd8659252005-05-23 12:54:51 -0700216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chana4e2b342005-10-26 15:46:52 -0700224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chan4cf78e42005-07-25 12:29:19 -0700228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { 0, }
249};
250
251MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
252
253static struct {
254 const char string[ETH_GSTRING_LEN];
255} ethtool_stats_keys[TG3_NUM_STATS] = {
256 { "rx_octets" },
257 { "rx_fragments" },
258 { "rx_ucast_packets" },
259 { "rx_mcast_packets" },
260 { "rx_bcast_packets" },
261 { "rx_fcs_errors" },
262 { "rx_align_errors" },
263 { "rx_xon_pause_rcvd" },
264 { "rx_xoff_pause_rcvd" },
265 { "rx_mac_ctrl_rcvd" },
266 { "rx_xoff_entered" },
267 { "rx_frame_too_long_errors" },
268 { "rx_jabbers" },
269 { "rx_undersize_packets" },
270 { "rx_in_length_errors" },
271 { "rx_out_length_errors" },
272 { "rx_64_or_less_octet_packets" },
273 { "rx_65_to_127_octet_packets" },
274 { "rx_128_to_255_octet_packets" },
275 { "rx_256_to_511_octet_packets" },
276 { "rx_512_to_1023_octet_packets" },
277 { "rx_1024_to_1522_octet_packets" },
278 { "rx_1523_to_2047_octet_packets" },
279 { "rx_2048_to_4095_octet_packets" },
280 { "rx_4096_to_8191_octet_packets" },
281 { "rx_8192_to_9022_octet_packets" },
282
283 { "tx_octets" },
284 { "tx_collisions" },
285
286 { "tx_xon_sent" },
287 { "tx_xoff_sent" },
288 { "tx_flow_control" },
289 { "tx_mac_errors" },
290 { "tx_single_collisions" },
291 { "tx_mult_collisions" },
292 { "tx_deferred" },
293 { "tx_excessive_collisions" },
294 { "tx_late_collisions" },
295 { "tx_collide_2times" },
296 { "tx_collide_3times" },
297 { "tx_collide_4times" },
298 { "tx_collide_5times" },
299 { "tx_collide_6times" },
300 { "tx_collide_7times" },
301 { "tx_collide_8times" },
302 { "tx_collide_9times" },
303 { "tx_collide_10times" },
304 { "tx_collide_11times" },
305 { "tx_collide_12times" },
306 { "tx_collide_13times" },
307 { "tx_collide_14times" },
308 { "tx_collide_15times" },
309 { "tx_ucast_packets" },
310 { "tx_mcast_packets" },
311 { "tx_bcast_packets" },
312 { "tx_carrier_sense_errors" },
313 { "tx_discards" },
314 { "tx_errors" },
315
316 { "dma_writeq_full" },
317 { "dma_write_prioq_full" },
318 { "rxbds_empty" },
319 { "rx_discards" },
320 { "rx_errors" },
321 { "rx_threshold_hit" },
322
323 { "dma_readq_full" },
324 { "dma_read_prioq_full" },
325 { "tx_comp_queue_full" },
326
327 { "ring_set_send_prod_index" },
328 { "ring_status_update" },
329 { "nic_irqs" },
330 { "nic_avoided_irqs" },
331 { "nic_tx_threshold_hit" }
332};
333
Michael Chan4cafd3f2005-05-29 14:56:34 -0700334static struct {
335 const char string[ETH_GSTRING_LEN];
336} ethtool_test_keys[TG3_NUM_TEST] = {
337 { "nvram test (online) " },
338 { "link test (online) " },
339 { "register test (offline)" },
340 { "memory test (offline)" },
341 { "loopback test (offline)" },
342 { "interrupt test (offline)" },
343};
344
Michael Chanb401e9e2005-12-19 16:27:04 -0800345static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
346{
347 writel(val, tp->regs + off);
348}
349
350static u32 tg3_read32(struct tg3 *tp, u32 off)
351{
352 return (readl(tp->regs + off));
353}
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
356{
Michael Chan68929142005-08-09 20:17:14 -0700357 unsigned long flags;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700363}
364
365static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
366{
367 writel(val, tp->regs + off);
368 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
370
Michael Chan68929142005-08-09 20:17:14 -0700371static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
372{
373 unsigned long flags;
374 u32 val;
375
376 spin_lock_irqsave(&tp->indirect_lock, flags);
377 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
379 spin_unlock_irqrestore(&tp->indirect_lock, flags);
380 return val;
381}
382
383static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
384{
385 unsigned long flags;
386
387 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
388 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
389 TG3_64BIT_REG_LOW, val);
390 return;
391 }
392 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
393 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
394 TG3_64BIT_REG_LOW, val);
395 return;
396 }
397
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
400 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
402
403 /* In indirect mode when disabling interrupts, we also need
404 * to clear the interrupt bit in the GRC local ctrl register.
405 */
406 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
407 (val == 0x1)) {
408 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
409 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
410 }
411}
412
413static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
414{
415 unsigned long flags;
416 u32 val;
417
418 spin_lock_irqsave(&tp->indirect_lock, flags);
419 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
420 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
421 spin_unlock_irqrestore(&tp->indirect_lock, flags);
422 return val;
423}
424
Michael Chanb401e9e2005-12-19 16:27:04 -0800425/* usec_wait specifies the wait time in usec when writing to certain registers
426 * where it is unsafe to read back the register without some delay.
427 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
428 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
429 */
430static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
Michael Chanb401e9e2005-12-19 16:27:04 -0800432 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
433 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
434 /* Non-posted methods */
435 tp->write32(tp, off, val);
436 else {
437 /* Posted method */
438 tg3_write32(tp, off, val);
439 if (usec_wait)
440 udelay(usec_wait);
441 tp->read32(tp, off);
442 }
443 /* Wait again after the read for the posted method to guarantee that
444 * the wait time is met.
445 */
446 if (usec_wait)
447 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
449
Michael Chan09ee9292005-08-09 20:17:00 -0700450static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
451{
452 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700453 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
454 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
455 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700456}
457
Michael Chan20094932005-08-09 20:16:32 -0700458static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
460 void __iomem *mbox = tp->regs + off;
461 writel(val, mbox);
462 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
463 writel(val, mbox);
464 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
465 readl(mbox);
466}
467
Michael Chan20094932005-08-09 20:16:32 -0700468#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700469#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700470#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
471#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700472#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700473
474#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800475#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
476#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700477#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
480{
Michael Chan68929142005-08-09 20:17:14 -0700481 unsigned long flags;
482
483 spin_lock_irqsave(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
486
487 /* Always leave this as zero. */
488 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
Michael Chan68929142005-08-09 20:17:14 -0700489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490}
491
Michael Chan28fbef72005-10-26 15:48:35 -0700492static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
493{
494 /* If no workaround is needed, write to mem space directly */
495 if (tp->write32 != tg3_write_indirect_reg32)
496 tw32(NIC_SRAM_WIN_BASE + off, val);
497 else
498 tg3_write_mem(tp, off, val);
499}
500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
502{
Michael Chan68929142005-08-09 20:17:14 -0700503 unsigned long flags;
504
505 spin_lock_irqsave(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
508
509 /* Always leave this as zero. */
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
Michael Chan68929142005-08-09 20:17:14 -0700511 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512}
513
514static void tg3_disable_ints(struct tg3 *tp)
515{
516 tw32(TG3PCI_MISC_HOST_CTRL,
517 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700518 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
521static inline void tg3_cond_int(struct tg3 *tp)
522{
Michael Chan38f38432005-09-05 17:53:32 -0700523 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
524 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
526}
527
528static void tg3_enable_ints(struct tg3 *tp)
529{
Michael Chanbbe832c2005-06-24 20:20:04 -0700530 tp->irq_sync = 0;
531 wmb();
532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 tw32(TG3PCI_MISC_HOST_CTRL,
534 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
536 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 tg3_cond_int(tp);
538}
539
Michael Chan04237dd2005-04-25 15:17:17 -0700540static inline unsigned int tg3_has_work(struct tg3 *tp)
541{
542 struct tg3_hw_status *sblk = tp->hw_status;
543 unsigned int work_exists = 0;
544
545 /* check for phy events */
546 if (!(tp->tg3_flags &
547 (TG3_FLAG_USE_LINKCHG_REG |
548 TG3_FLAG_POLL_SERDES))) {
549 if (sblk->status & SD_STATUS_LINK_CHG)
550 work_exists = 1;
551 }
552 /* check for RX/TX work to do */
553 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
554 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
555 work_exists = 1;
556
557 return work_exists;
558}
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700561 * similar to tg3_enable_ints, but it accurately determines whether there
562 * is new work pending and can return without flushing the PIO write
563 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 */
565static void tg3_restart_ints(struct tg3 *tp)
566{
David S. Millerfac9b832005-05-18 22:46:34 -0700567 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
568 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 mmiowb();
570
David S. Millerfac9b832005-05-18 22:46:34 -0700571 /* When doing tagged status, this work check is unnecessary.
572 * The last_tag we write above tells the chip which piece of
573 * work we've completed.
574 */
575 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
576 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700577 tw32(HOSTCC_MODE, tp->coalesce_mode |
578 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
580
581static inline void tg3_netif_stop(struct tg3 *tp)
582{
Michael Chanbbe832c2005-06-24 20:20:04 -0700583 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 netif_poll_disable(tp->dev);
585 netif_tx_disable(tp->dev);
586}
587
588static inline void tg3_netif_start(struct tg3 *tp)
589{
590 netif_wake_queue(tp->dev);
591 /* NOTE: unconditional netif_wake_queue is only appropriate
592 * so long as all callers are assured to have free tx slots
593 * (such as after tg3_init_hw)
594 */
595 netif_poll_enable(tp->dev);
David S. Millerf47c11e2005-06-24 20:18:35 -0700596 tp->hw_status->status |= SD_STATUS_UPDATED;
597 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static void tg3_switch_clocks(struct tg3 *tp)
601{
602 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
603 u32 orig_clock_ctrl;
604
Michael Chana4e2b342005-10-26 15:46:52 -0700605 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -0700606 return;
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 orig_clock_ctrl = clock_ctrl;
609 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
610 CLOCK_CTRL_CLKRUN_OENABLE |
611 0x1f);
612 tp->pci_clock_ctrl = clock_ctrl;
613
614 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
615 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800616 tw32_wait_f(TG3PCI_CLOCK_CTRL,
617 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
619 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800620 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621 clock_ctrl |
622 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
623 40);
624 tw32_wait_f(TG3PCI_CLOCK_CTRL,
625 clock_ctrl | (CLOCK_CTRL_ALTCLK),
626 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800628 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629}
630
631#define PHY_BUSY_LOOPS 5000
632
633static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
634{
635 u32 frame_val;
636 unsigned int loops;
637 int ret;
638
639 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
640 tw32_f(MAC_MI_MODE,
641 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
642 udelay(80);
643 }
644
645 *val = 0x0;
646
647 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
648 MI_COM_PHY_ADDR_MASK);
649 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
650 MI_COM_REG_ADDR_MASK);
651 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
652
653 tw32_f(MAC_MI_COM, frame_val);
654
655 loops = PHY_BUSY_LOOPS;
656 while (loops != 0) {
657 udelay(10);
658 frame_val = tr32(MAC_MI_COM);
659
660 if ((frame_val & MI_COM_BUSY) == 0) {
661 udelay(5);
662 frame_val = tr32(MAC_MI_COM);
663 break;
664 }
665 loops -= 1;
666 }
667
668 ret = -EBUSY;
669 if (loops != 0) {
670 *val = frame_val & MI_COM_DATA_MASK;
671 ret = 0;
672 }
673
674 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
675 tw32_f(MAC_MI_MODE, tp->mi_mode);
676 udelay(80);
677 }
678
679 return ret;
680}
681
682static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
683{
684 u32 frame_val;
685 unsigned int loops;
686 int ret;
687
688 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689 tw32_f(MAC_MI_MODE,
690 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691 udelay(80);
692 }
693
694 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695 MI_COM_PHY_ADDR_MASK);
696 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697 MI_COM_REG_ADDR_MASK);
698 frame_val |= (val & MI_COM_DATA_MASK);
699 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
700
701 tw32_f(MAC_MI_COM, frame_val);
702
703 loops = PHY_BUSY_LOOPS;
704 while (loops != 0) {
705 udelay(10);
706 frame_val = tr32(MAC_MI_COM);
707 if ((frame_val & MI_COM_BUSY) == 0) {
708 udelay(5);
709 frame_val = tr32(MAC_MI_COM);
710 break;
711 }
712 loops -= 1;
713 }
714
715 ret = -EBUSY;
716 if (loops != 0)
717 ret = 0;
718
719 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720 tw32_f(MAC_MI_MODE, tp->mi_mode);
721 udelay(80);
722 }
723
724 return ret;
725}
726
727static void tg3_phy_set_wirespeed(struct tg3 *tp)
728{
729 u32 val;
730
731 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
732 return;
733
734 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
735 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
736 tg3_writephy(tp, MII_TG3_AUX_CTRL,
737 (val | (1 << 15) | (1 << 4)));
738}
739
740static int tg3_bmcr_reset(struct tg3 *tp)
741{
742 u32 phy_control;
743 int limit, err;
744
745 /* OK, reset it, and poll the BMCR_RESET bit until it
746 * clears or we time out.
747 */
748 phy_control = BMCR_RESET;
749 err = tg3_writephy(tp, MII_BMCR, phy_control);
750 if (err != 0)
751 return -EBUSY;
752
753 limit = 5000;
754 while (limit--) {
755 err = tg3_readphy(tp, MII_BMCR, &phy_control);
756 if (err != 0)
757 return -EBUSY;
758
759 if ((phy_control & BMCR_RESET) == 0) {
760 udelay(40);
761 break;
762 }
763 udelay(10);
764 }
765 if (limit <= 0)
766 return -EBUSY;
767
768 return 0;
769}
770
771static int tg3_wait_macro_done(struct tg3 *tp)
772{
773 int limit = 100;
774
775 while (limit--) {
776 u32 tmp32;
777
778 if (!tg3_readphy(tp, 0x16, &tmp32)) {
779 if ((tmp32 & 0x1000) == 0)
780 break;
781 }
782 }
783 if (limit <= 0)
784 return -EBUSY;
785
786 return 0;
787}
788
789static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
790{
791 static const u32 test_pat[4][6] = {
792 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
793 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
794 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
795 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
796 };
797 int chan;
798
799 for (chan = 0; chan < 4; chan++) {
800 int i;
801
802 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
803 (chan * 0x2000) | 0x0200);
804 tg3_writephy(tp, 0x16, 0x0002);
805
806 for (i = 0; i < 6; i++)
807 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
808 test_pat[chan][i]);
809
810 tg3_writephy(tp, 0x16, 0x0202);
811 if (tg3_wait_macro_done(tp)) {
812 *resetp = 1;
813 return -EBUSY;
814 }
815
816 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
817 (chan * 0x2000) | 0x0200);
818 tg3_writephy(tp, 0x16, 0x0082);
819 if (tg3_wait_macro_done(tp)) {
820 *resetp = 1;
821 return -EBUSY;
822 }
823
824 tg3_writephy(tp, 0x16, 0x0802);
825 if (tg3_wait_macro_done(tp)) {
826 *resetp = 1;
827 return -EBUSY;
828 }
829
830 for (i = 0; i < 6; i += 2) {
831 u32 low, high;
832
833 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
834 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
835 tg3_wait_macro_done(tp)) {
836 *resetp = 1;
837 return -EBUSY;
838 }
839 low &= 0x7fff;
840 high &= 0x000f;
841 if (low != test_pat[chan][i] ||
842 high != test_pat[chan][i+1]) {
843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
844 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
846
847 return -EBUSY;
848 }
849 }
850 }
851
852 return 0;
853}
854
855static int tg3_phy_reset_chanpat(struct tg3 *tp)
856{
857 int chan;
858
859 for (chan = 0; chan < 4; chan++) {
860 int i;
861
862 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
863 (chan * 0x2000) | 0x0200);
864 tg3_writephy(tp, 0x16, 0x0002);
865 for (i = 0; i < 6; i++)
866 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
867 tg3_writephy(tp, 0x16, 0x0202);
868 if (tg3_wait_macro_done(tp))
869 return -EBUSY;
870 }
871
872 return 0;
873}
874
875static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
876{
877 u32 reg32, phy9_orig;
878 int retries, do_phy_reset, err;
879
880 retries = 10;
881 do_phy_reset = 1;
882 do {
883 if (do_phy_reset) {
884 err = tg3_bmcr_reset(tp);
885 if (err)
886 return err;
887 do_phy_reset = 0;
888 }
889
890 /* Disable transmitter and interrupt. */
891 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
892 continue;
893
894 reg32 |= 0x3000;
895 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
896
897 /* Set full-duplex, 1000 mbps. */
898 tg3_writephy(tp, MII_BMCR,
899 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
900
901 /* Set to master mode. */
902 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
903 continue;
904
905 tg3_writephy(tp, MII_TG3_CTRL,
906 (MII_TG3_CTRL_AS_MASTER |
907 MII_TG3_CTRL_ENABLE_AS_MASTER));
908
909 /* Enable SM_DSP_CLOCK and 6dB. */
910 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
911
912 /* Block the PHY control access. */
913 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
914 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
915
916 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
917 if (!err)
918 break;
919 } while (--retries);
920
921 err = tg3_phy_reset_chanpat(tp);
922 if (err)
923 return err;
924
925 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
927
928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
929 tg3_writephy(tp, 0x16, 0x0000);
930
931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
933 /* Set Extended packet length bit for jumbo frames */
934 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
935 }
936 else {
937 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
938 }
939
940 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
941
942 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
943 reg32 &= ~0x3000;
944 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
945 } else if (!err)
946 err = -EBUSY;
947
948 return err;
949}
950
951/* This will reset the tigon3 PHY if there is no valid
952 * link unless the FORCE argument is non-zero.
953 */
954static int tg3_phy_reset(struct tg3 *tp)
955{
956 u32 phy_status;
957 int err;
958
959 err = tg3_readphy(tp, MII_BMSR, &phy_status);
960 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
961 if (err != 0)
962 return -EBUSY;
963
964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
967 err = tg3_phy_reset_5703_4_5(tp);
968 if (err)
969 return err;
970 goto out;
971 }
972
973 err = tg3_bmcr_reset(tp);
974 if (err)
975 return err;
976
977out:
978 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
979 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
981 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
982 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
983 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
984 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
985 }
986 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
987 tg3_writephy(tp, 0x1c, 0x8d68);
988 tg3_writephy(tp, 0x1c, 0x8d68);
989 }
990 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
991 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
992 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
993 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
994 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
995 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
996 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
997 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
998 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
999 }
1000 /* Set Extended packet length bit (bit 14) on all chips that */
1001 /* support jumbo frames */
1002 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1003 /* Cannot do read-modify-write on 5401 */
1004 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001005 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 u32 phy_reg;
1007
1008 /* Set bit 14 with read-modify-write to preserve other bits */
1009 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1010 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1011 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1012 }
1013
1014 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1015 * jumbo frames transmission.
1016 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001017 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 u32 phy_reg;
1019
1020 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1021 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1022 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1023 }
1024
1025 tg3_phy_set_wirespeed(tp);
1026 return 0;
1027}
1028
1029static void tg3_frob_aux_power(struct tg3 *tp)
1030{
1031 struct tg3 *tp_peer = tp;
1032
1033 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1034 return;
1035
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001036 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1037 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1038 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001040 dev_peer = pci_get_drvdata(tp->pdev_peer);
1041 if (!dev_peer)
1042 BUG();
1043 tp_peer = netdev_priv(dev_peer);
1044 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001047 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1048 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1049 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001052 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1053 (GRC_LCLCTRL_GPIO_OE0 |
1054 GRC_LCLCTRL_GPIO_OE1 |
1055 GRC_LCLCTRL_GPIO_OE2 |
1056 GRC_LCLCTRL_GPIO_OUTPUT0 |
1057 GRC_LCLCTRL_GPIO_OUTPUT1),
1058 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 } else {
1060 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001061 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063 if (tp_peer != tp &&
1064 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1065 return;
1066
Michael Chandc56b7d2005-12-19 16:26:28 -08001067 /* Workaround to prevent overdrawing Amps. */
1068 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1069 ASIC_REV_5714) {
1070 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001071 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1072 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001073 }
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 /* On 5753 and variants, GPIO2 cannot be used. */
1076 no_gpio2 = tp->nic_sram_data_cfg &
1077 NIC_SRAM_DATA_CFG_NO_GPIO2;
1078
Michael Chandc56b7d2005-12-19 16:26:28 -08001079 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 GRC_LCLCTRL_GPIO_OE1 |
1081 GRC_LCLCTRL_GPIO_OE2 |
1082 GRC_LCLCTRL_GPIO_OUTPUT1 |
1083 GRC_LCLCTRL_GPIO_OUTPUT2;
1084 if (no_gpio2) {
1085 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1086 GRC_LCLCTRL_GPIO_OUTPUT2);
1087 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001088 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
1091 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1092
Michael Chanb401e9e2005-12-19 16:27:04 -08001093 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1094 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
1096 if (!no_gpio2) {
1097 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001098 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1099 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 }
1101 }
1102 } else {
1103 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1104 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1105 if (tp_peer != tp &&
1106 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107 return;
1108
Michael Chanb401e9e2005-12-19 16:27:04 -08001109 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1110 (GRC_LCLCTRL_GPIO_OE1 |
1111 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112
Michael Chanb401e9e2005-12-19 16:27:04 -08001113 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Michael Chanb401e9e2005-12-19 16:27:04 -08001116 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1117 (GRC_LCLCTRL_GPIO_OE1 |
1118 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
1120 }
1121}
1122
1123static int tg3_setup_phy(struct tg3 *, int);
1124
1125#define RESET_KIND_SHUTDOWN 0
1126#define RESET_KIND_INIT 1
1127#define RESET_KIND_SUSPEND 2
1128
1129static void tg3_write_sig_post_reset(struct tg3 *, int);
1130static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001131static int tg3_nvram_lock(struct tg3 *);
1132static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134static int tg3_set_power_state(struct tg3 *tp, int state)
1135{
1136 u32 misc_host_ctrl;
1137 u16 power_control, power_caps;
1138 int pm = tp->pm_cap;
1139
1140 /* Make sure register accesses (indirect or otherwise)
1141 * will function correctly.
1142 */
1143 pci_write_config_dword(tp->pdev,
1144 TG3PCI_MISC_HOST_CTRL,
1145 tp->misc_host_ctrl);
1146
1147 pci_read_config_word(tp->pdev,
1148 pm + PCI_PM_CTRL,
1149 &power_control);
1150 power_control |= PCI_PM_CTRL_PME_STATUS;
1151 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1152 switch (state) {
1153 case 0:
1154 power_control |= 0;
1155 pci_write_config_word(tp->pdev,
1156 pm + PCI_PM_CTRL,
1157 power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001158 udelay(100); /* Delay after power state change */
1159
1160 /* Switch out of Vaux if it is not a LOM */
Michael Chanb401e9e2005-12-19 16:27:04 -08001161 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1162 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 return 0;
1165
1166 case 1:
1167 power_control |= 1;
1168 break;
1169
1170 case 2:
1171 power_control |= 2;
1172 break;
1173
1174 case 3:
1175 power_control |= 3;
1176 break;
1177
1178 default:
1179 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1180 "requested.\n",
1181 tp->dev->name, state);
1182 return -EINVAL;
1183 };
1184
1185 power_control |= PCI_PM_CTRL_PME_ENABLE;
1186
1187 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1188 tw32(TG3PCI_MISC_HOST_CTRL,
1189 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1190
1191 if (tp->link_config.phy_is_low_power == 0) {
1192 tp->link_config.phy_is_low_power = 1;
1193 tp->link_config.orig_speed = tp->link_config.speed;
1194 tp->link_config.orig_duplex = tp->link_config.duplex;
1195 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1196 }
1197
Michael Chan747e8f82005-07-25 12:33:22 -07001198 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 tp->link_config.speed = SPEED_10;
1200 tp->link_config.duplex = DUPLEX_HALF;
1201 tp->link_config.autoneg = AUTONEG_ENABLE;
1202 tg3_setup_phy(tp, 0);
1203 }
1204
Michael Chan6921d202005-12-13 21:15:53 -08001205 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1206 int i;
1207 u32 val;
1208
1209 for (i = 0; i < 200; i++) {
1210 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1211 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1212 break;
1213 msleep(1);
1214 }
1215 }
1216 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1217 WOL_DRV_STATE_SHUTDOWN |
1218 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1221
1222 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1223 u32 mac_mode;
1224
1225 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1226 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1227 udelay(40);
1228
1229 mac_mode = MAC_MODE_PORT_MODE_MII;
1230
1231 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1232 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1233 mac_mode |= MAC_MODE_LINK_POLARITY;
1234 } else {
1235 mac_mode = MAC_MODE_PORT_MODE_TBI;
1236 }
1237
John W. Linvillecbf46852005-04-21 17:01:29 -07001238 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 tw32(MAC_LED_CTRL, tp->led_ctrl);
1240
1241 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1242 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1243 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1244
1245 tw32_f(MAC_MODE, mac_mode);
1246 udelay(100);
1247
1248 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1249 udelay(10);
1250 }
1251
1252 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1253 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1254 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1255 u32 base_val;
1256
1257 base_val = tp->pci_clock_ctrl;
1258 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1259 CLOCK_CTRL_TXCLK_DISABLE);
1260
Michael Chanb401e9e2005-12-19 16:27:04 -08001261 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1262 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chana4e2b342005-10-26 15:46:52 -07001263 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chan4cf78e42005-07-25 12:29:19 -07001264 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07001265 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1267 u32 newbits1, newbits2;
1268
1269 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1271 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1272 CLOCK_CTRL_TXCLK_DISABLE |
1273 CLOCK_CTRL_ALTCLK);
1274 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1275 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1276 newbits1 = CLOCK_CTRL_625_CORE;
1277 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1278 } else {
1279 newbits1 = CLOCK_CTRL_ALTCLK;
1280 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1281 }
1282
Michael Chanb401e9e2005-12-19 16:27:04 -08001283 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1284 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Michael Chanb401e9e2005-12-19 16:27:04 -08001286 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1287 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1290 u32 newbits3;
1291
1292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1294 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1295 CLOCK_CTRL_TXCLK_DISABLE |
1296 CLOCK_CTRL_44MHZ_CORE);
1297 } else {
1298 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1299 }
1300
Michael Chanb401e9e2005-12-19 16:27:04 -08001301 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1302 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 }
1304 }
1305
Michael Chan6921d202005-12-13 21:15:53 -08001306 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1307 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1308 /* Turn off the PHY */
1309 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1310 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1311 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1312 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
Michael Chandc56b7d2005-12-19 16:26:28 -08001313 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1314 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
Michael Chan6921d202005-12-13 21:15:53 -08001315 }
1316 }
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 tg3_frob_aux_power(tp);
1319
1320 /* Workaround for unstable PLL clock */
1321 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1322 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1323 u32 val = tr32(0x7d00);
1324
1325 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1326 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08001327 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08001328 int err;
1329
1330 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08001332 if (!err)
1333 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 }
1336
1337 /* Finally, set the new power state. */
1338 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001339 udelay(100); /* Delay after power state change */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1342
1343 return 0;
1344}
1345
1346static void tg3_link_report(struct tg3 *tp)
1347{
1348 if (!netif_carrier_ok(tp->dev)) {
1349 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1350 } else {
1351 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1352 tp->dev->name,
1353 (tp->link_config.active_speed == SPEED_1000 ?
1354 1000 :
1355 (tp->link_config.active_speed == SPEED_100 ?
1356 100 : 10)),
1357 (tp->link_config.active_duplex == DUPLEX_FULL ?
1358 "full" : "half"));
1359
1360 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1361 "%s for RX.\n",
1362 tp->dev->name,
1363 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1364 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1365 }
1366}
1367
1368static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1369{
1370 u32 new_tg3_flags = 0;
1371 u32 old_rx_mode = tp->rx_mode;
1372 u32 old_tx_mode = tp->tx_mode;
1373
1374 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
Michael Chan747e8f82005-07-25 12:33:22 -07001375
1376 /* Convert 1000BaseX flow control bits to 1000BaseT
1377 * bits before resolving flow control.
1378 */
1379 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1380 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1381 ADVERTISE_PAUSE_ASYM);
1382 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1383
1384 if (local_adv & ADVERTISE_1000XPAUSE)
1385 local_adv |= ADVERTISE_PAUSE_CAP;
1386 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1387 local_adv |= ADVERTISE_PAUSE_ASYM;
1388 if (remote_adv & LPA_1000XPAUSE)
1389 remote_adv |= LPA_PAUSE_CAP;
1390 if (remote_adv & LPA_1000XPAUSE_ASYM)
1391 remote_adv |= LPA_PAUSE_ASYM;
1392 }
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 if (local_adv & ADVERTISE_PAUSE_CAP) {
1395 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1396 if (remote_adv & LPA_PAUSE_CAP)
1397 new_tg3_flags |=
1398 (TG3_FLAG_RX_PAUSE |
1399 TG3_FLAG_TX_PAUSE);
1400 else if (remote_adv & LPA_PAUSE_ASYM)
1401 new_tg3_flags |=
1402 (TG3_FLAG_RX_PAUSE);
1403 } else {
1404 if (remote_adv & LPA_PAUSE_CAP)
1405 new_tg3_flags |=
1406 (TG3_FLAG_RX_PAUSE |
1407 TG3_FLAG_TX_PAUSE);
1408 }
1409 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1410 if ((remote_adv & LPA_PAUSE_CAP) &&
1411 (remote_adv & LPA_PAUSE_ASYM))
1412 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1413 }
1414
1415 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1416 tp->tg3_flags |= new_tg3_flags;
1417 } else {
1418 new_tg3_flags = tp->tg3_flags;
1419 }
1420
1421 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1422 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1423 else
1424 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1425
1426 if (old_rx_mode != tp->rx_mode) {
1427 tw32_f(MAC_RX_MODE, tp->rx_mode);
1428 }
1429
1430 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1431 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1432 else
1433 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1434
1435 if (old_tx_mode != tp->tx_mode) {
1436 tw32_f(MAC_TX_MODE, tp->tx_mode);
1437 }
1438}
1439
1440static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1441{
1442 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1443 case MII_TG3_AUX_STAT_10HALF:
1444 *speed = SPEED_10;
1445 *duplex = DUPLEX_HALF;
1446 break;
1447
1448 case MII_TG3_AUX_STAT_10FULL:
1449 *speed = SPEED_10;
1450 *duplex = DUPLEX_FULL;
1451 break;
1452
1453 case MII_TG3_AUX_STAT_100HALF:
1454 *speed = SPEED_100;
1455 *duplex = DUPLEX_HALF;
1456 break;
1457
1458 case MII_TG3_AUX_STAT_100FULL:
1459 *speed = SPEED_100;
1460 *duplex = DUPLEX_FULL;
1461 break;
1462
1463 case MII_TG3_AUX_STAT_1000HALF:
1464 *speed = SPEED_1000;
1465 *duplex = DUPLEX_HALF;
1466 break;
1467
1468 case MII_TG3_AUX_STAT_1000FULL:
1469 *speed = SPEED_1000;
1470 *duplex = DUPLEX_FULL;
1471 break;
1472
1473 default:
1474 *speed = SPEED_INVALID;
1475 *duplex = DUPLEX_INVALID;
1476 break;
1477 };
1478}
1479
1480static void tg3_phy_copper_begin(struct tg3 *tp)
1481{
1482 u32 new_adv;
1483 int i;
1484
1485 if (tp->link_config.phy_is_low_power) {
1486 /* Entering low power mode. Disable gigabit and
1487 * 100baseT advertisements.
1488 */
1489 tg3_writephy(tp, MII_TG3_CTRL, 0);
1490
1491 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1492 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1493 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1494 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1495
1496 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1497 } else if (tp->link_config.speed == SPEED_INVALID) {
1498 tp->link_config.advertising =
1499 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1500 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1501 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1502 ADVERTISED_Autoneg | ADVERTISED_MII);
1503
1504 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1505 tp->link_config.advertising &=
1506 ~(ADVERTISED_1000baseT_Half |
1507 ADVERTISED_1000baseT_Full);
1508
1509 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1510 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1511 new_adv |= ADVERTISE_10HALF;
1512 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1513 new_adv |= ADVERTISE_10FULL;
1514 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1515 new_adv |= ADVERTISE_100HALF;
1516 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1517 new_adv |= ADVERTISE_100FULL;
1518 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1519
1520 if (tp->link_config.advertising &
1521 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1522 new_adv = 0;
1523 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1524 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1525 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1526 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1527 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1528 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1529 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1530 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1531 MII_TG3_CTRL_ENABLE_AS_MASTER);
1532 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1533 } else {
1534 tg3_writephy(tp, MII_TG3_CTRL, 0);
1535 }
1536 } else {
1537 /* Asking for a specific link mode. */
1538 if (tp->link_config.speed == SPEED_1000) {
1539 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1540 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1541
1542 if (tp->link_config.duplex == DUPLEX_FULL)
1543 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1544 else
1545 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1546 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1547 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1548 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1549 MII_TG3_CTRL_ENABLE_AS_MASTER);
1550 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1551 } else {
1552 tg3_writephy(tp, MII_TG3_CTRL, 0);
1553
1554 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1555 if (tp->link_config.speed == SPEED_100) {
1556 if (tp->link_config.duplex == DUPLEX_FULL)
1557 new_adv |= ADVERTISE_100FULL;
1558 else
1559 new_adv |= ADVERTISE_100HALF;
1560 } else {
1561 if (tp->link_config.duplex == DUPLEX_FULL)
1562 new_adv |= ADVERTISE_10FULL;
1563 else
1564 new_adv |= ADVERTISE_10HALF;
1565 }
1566 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1567 }
1568 }
1569
1570 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1571 tp->link_config.speed != SPEED_INVALID) {
1572 u32 bmcr, orig_bmcr;
1573
1574 tp->link_config.active_speed = tp->link_config.speed;
1575 tp->link_config.active_duplex = tp->link_config.duplex;
1576
1577 bmcr = 0;
1578 switch (tp->link_config.speed) {
1579 default:
1580 case SPEED_10:
1581 break;
1582
1583 case SPEED_100:
1584 bmcr |= BMCR_SPEED100;
1585 break;
1586
1587 case SPEED_1000:
1588 bmcr |= TG3_BMCR_SPEED1000;
1589 break;
1590 };
1591
1592 if (tp->link_config.duplex == DUPLEX_FULL)
1593 bmcr |= BMCR_FULLDPLX;
1594
1595 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1596 (bmcr != orig_bmcr)) {
1597 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1598 for (i = 0; i < 1500; i++) {
1599 u32 tmp;
1600
1601 udelay(10);
1602 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1603 tg3_readphy(tp, MII_BMSR, &tmp))
1604 continue;
1605 if (!(tmp & BMSR_LSTATUS)) {
1606 udelay(40);
1607 break;
1608 }
1609 }
1610 tg3_writephy(tp, MII_BMCR, bmcr);
1611 udelay(40);
1612 }
1613 } else {
1614 tg3_writephy(tp, MII_BMCR,
1615 BMCR_ANENABLE | BMCR_ANRESTART);
1616 }
1617}
1618
1619static int tg3_init_5401phy_dsp(struct tg3 *tp)
1620{
1621 int err;
1622
1623 /* Turn off tap power management. */
1624 /* Set Extended packet length bit */
1625 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1626
1627 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1628 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1629
1630 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1631 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1632
1633 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1634 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1635
1636 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1637 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1638
1639 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1640 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1641
1642 udelay(40);
1643
1644 return err;
1645}
1646
1647static int tg3_copper_is_advertising_all(struct tg3 *tp)
1648{
1649 u32 adv_reg, all_mask;
1650
1651 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1652 return 0;
1653
1654 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1655 ADVERTISE_100HALF | ADVERTISE_100FULL);
1656 if ((adv_reg & all_mask) != all_mask)
1657 return 0;
1658 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1659 u32 tg3_ctrl;
1660
1661 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1662 return 0;
1663
1664 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1665 MII_TG3_CTRL_ADV_1000_FULL);
1666 if ((tg3_ctrl & all_mask) != all_mask)
1667 return 0;
1668 }
1669 return 1;
1670}
1671
1672static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1673{
1674 int current_link_up;
1675 u32 bmsr, dummy;
1676 u16 current_speed;
1677 u8 current_duplex;
1678 int i, err;
1679
1680 tw32(MAC_EVENT, 0);
1681
1682 tw32_f(MAC_STATUS,
1683 (MAC_STATUS_SYNC_CHANGED |
1684 MAC_STATUS_CFG_CHANGED |
1685 MAC_STATUS_MI_COMPLETION |
1686 MAC_STATUS_LNKSTATE_CHANGED));
1687 udelay(40);
1688
1689 tp->mi_mode = MAC_MI_MODE_BASE;
1690 tw32_f(MAC_MI_MODE, tp->mi_mode);
1691 udelay(80);
1692
1693 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1694
1695 /* Some third-party PHYs need to be reset on link going
1696 * down.
1697 */
1698 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1700 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1701 netif_carrier_ok(tp->dev)) {
1702 tg3_readphy(tp, MII_BMSR, &bmsr);
1703 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1704 !(bmsr & BMSR_LSTATUS))
1705 force_reset = 1;
1706 }
1707 if (force_reset)
1708 tg3_phy_reset(tp);
1709
1710 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1711 tg3_readphy(tp, MII_BMSR, &bmsr);
1712 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1713 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1714 bmsr = 0;
1715
1716 if (!(bmsr & BMSR_LSTATUS)) {
1717 err = tg3_init_5401phy_dsp(tp);
1718 if (err)
1719 return err;
1720
1721 tg3_readphy(tp, MII_BMSR, &bmsr);
1722 for (i = 0; i < 1000; i++) {
1723 udelay(10);
1724 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1725 (bmsr & BMSR_LSTATUS)) {
1726 udelay(40);
1727 break;
1728 }
1729 }
1730
1731 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1732 !(bmsr & BMSR_LSTATUS) &&
1733 tp->link_config.active_speed == SPEED_1000) {
1734 err = tg3_phy_reset(tp);
1735 if (!err)
1736 err = tg3_init_5401phy_dsp(tp);
1737 if (err)
1738 return err;
1739 }
1740 }
1741 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1742 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1743 /* 5701 {A0,B0} CRC bug workaround */
1744 tg3_writephy(tp, 0x15, 0x0a75);
1745 tg3_writephy(tp, 0x1c, 0x8c68);
1746 tg3_writephy(tp, 0x1c, 0x8d68);
1747 tg3_writephy(tp, 0x1c, 0x8c68);
1748 }
1749
1750 /* Clear pending interrupts... */
1751 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1752 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1753
1754 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1755 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1756 else
1757 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1758
1759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1761 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1762 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1763 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1764 else
1765 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1766 }
1767
1768 current_link_up = 0;
1769 current_speed = SPEED_INVALID;
1770 current_duplex = DUPLEX_INVALID;
1771
1772 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1773 u32 val;
1774
1775 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1776 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1777 if (!(val & (1 << 10))) {
1778 val |= (1 << 10);
1779 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1780 goto relink;
1781 }
1782 }
1783
1784 bmsr = 0;
1785 for (i = 0; i < 100; i++) {
1786 tg3_readphy(tp, MII_BMSR, &bmsr);
1787 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1788 (bmsr & BMSR_LSTATUS))
1789 break;
1790 udelay(40);
1791 }
1792
1793 if (bmsr & BMSR_LSTATUS) {
1794 u32 aux_stat, bmcr;
1795
1796 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1797 for (i = 0; i < 2000; i++) {
1798 udelay(10);
1799 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1800 aux_stat)
1801 break;
1802 }
1803
1804 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1805 &current_speed,
1806 &current_duplex);
1807
1808 bmcr = 0;
1809 for (i = 0; i < 200; i++) {
1810 tg3_readphy(tp, MII_BMCR, &bmcr);
1811 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1812 continue;
1813 if (bmcr && bmcr != 0x7fff)
1814 break;
1815 udelay(10);
1816 }
1817
1818 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1819 if (bmcr & BMCR_ANENABLE) {
1820 current_link_up = 1;
1821
1822 /* Force autoneg restart if we are exiting
1823 * low power mode.
1824 */
1825 if (!tg3_copper_is_advertising_all(tp))
1826 current_link_up = 0;
1827 } else {
1828 current_link_up = 0;
1829 }
1830 } else {
1831 if (!(bmcr & BMCR_ANENABLE) &&
1832 tp->link_config.speed == current_speed &&
1833 tp->link_config.duplex == current_duplex) {
1834 current_link_up = 1;
1835 } else {
1836 current_link_up = 0;
1837 }
1838 }
1839
1840 tp->link_config.active_speed = current_speed;
1841 tp->link_config.active_duplex = current_duplex;
1842 }
1843
1844 if (current_link_up == 1 &&
1845 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1846 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1847 u32 local_adv, remote_adv;
1848
1849 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1850 local_adv = 0;
1851 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1852
1853 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1854 remote_adv = 0;
1855
1856 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1857
1858 /* If we are not advertising full pause capability,
1859 * something is wrong. Bring the link down and reconfigure.
1860 */
1861 if (local_adv != ADVERTISE_PAUSE_CAP) {
1862 current_link_up = 0;
1863 } else {
1864 tg3_setup_flow_control(tp, local_adv, remote_adv);
1865 }
1866 }
1867relink:
Michael Chan6921d202005-12-13 21:15:53 -08001868 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 u32 tmp;
1870
1871 tg3_phy_copper_begin(tp);
1872
1873 tg3_readphy(tp, MII_BMSR, &tmp);
1874 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1875 (tmp & BMSR_LSTATUS))
1876 current_link_up = 1;
1877 }
1878
1879 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1880 if (current_link_up == 1) {
1881 if (tp->link_config.active_speed == SPEED_100 ||
1882 tp->link_config.active_speed == SPEED_10)
1883 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1884 else
1885 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1886 } else
1887 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1888
1889 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1890 if (tp->link_config.active_duplex == DUPLEX_HALF)
1891 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1892
1893 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1895 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1896 (current_link_up == 1 &&
1897 tp->link_config.active_speed == SPEED_10))
1898 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1899 } else {
1900 if (current_link_up == 1)
1901 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1902 }
1903
1904 /* ??? Without this setting Netgear GA302T PHY does not
1905 * ??? send/receive packets...
1906 */
1907 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1908 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1909 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1910 tw32_f(MAC_MI_MODE, tp->mi_mode);
1911 udelay(80);
1912 }
1913
1914 tw32_f(MAC_MODE, tp->mac_mode);
1915 udelay(40);
1916
1917 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1918 /* Polled via timer. */
1919 tw32_f(MAC_EVENT, 0);
1920 } else {
1921 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1922 }
1923 udelay(40);
1924
1925 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1926 current_link_up == 1 &&
1927 tp->link_config.active_speed == SPEED_1000 &&
1928 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1929 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1930 udelay(120);
1931 tw32_f(MAC_STATUS,
1932 (MAC_STATUS_SYNC_CHANGED |
1933 MAC_STATUS_CFG_CHANGED));
1934 udelay(40);
1935 tg3_write_mem(tp,
1936 NIC_SRAM_FIRMWARE_MBOX,
1937 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1938 }
1939
1940 if (current_link_up != netif_carrier_ok(tp->dev)) {
1941 if (current_link_up)
1942 netif_carrier_on(tp->dev);
1943 else
1944 netif_carrier_off(tp->dev);
1945 tg3_link_report(tp);
1946 }
1947
1948 return 0;
1949}
1950
1951struct tg3_fiber_aneginfo {
1952 int state;
1953#define ANEG_STATE_UNKNOWN 0
1954#define ANEG_STATE_AN_ENABLE 1
1955#define ANEG_STATE_RESTART_INIT 2
1956#define ANEG_STATE_RESTART 3
1957#define ANEG_STATE_DISABLE_LINK_OK 4
1958#define ANEG_STATE_ABILITY_DETECT_INIT 5
1959#define ANEG_STATE_ABILITY_DETECT 6
1960#define ANEG_STATE_ACK_DETECT_INIT 7
1961#define ANEG_STATE_ACK_DETECT 8
1962#define ANEG_STATE_COMPLETE_ACK_INIT 9
1963#define ANEG_STATE_COMPLETE_ACK 10
1964#define ANEG_STATE_IDLE_DETECT_INIT 11
1965#define ANEG_STATE_IDLE_DETECT 12
1966#define ANEG_STATE_LINK_OK 13
1967#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1968#define ANEG_STATE_NEXT_PAGE_WAIT 15
1969
1970 u32 flags;
1971#define MR_AN_ENABLE 0x00000001
1972#define MR_RESTART_AN 0x00000002
1973#define MR_AN_COMPLETE 0x00000004
1974#define MR_PAGE_RX 0x00000008
1975#define MR_NP_LOADED 0x00000010
1976#define MR_TOGGLE_TX 0x00000020
1977#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1978#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1979#define MR_LP_ADV_SYM_PAUSE 0x00000100
1980#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1981#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1982#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1983#define MR_LP_ADV_NEXT_PAGE 0x00001000
1984#define MR_TOGGLE_RX 0x00002000
1985#define MR_NP_RX 0x00004000
1986
1987#define MR_LINK_OK 0x80000000
1988
1989 unsigned long link_time, cur_time;
1990
1991 u32 ability_match_cfg;
1992 int ability_match_count;
1993
1994 char ability_match, idle_match, ack_match;
1995
1996 u32 txconfig, rxconfig;
1997#define ANEG_CFG_NP 0x00000080
1998#define ANEG_CFG_ACK 0x00000040
1999#define ANEG_CFG_RF2 0x00000020
2000#define ANEG_CFG_RF1 0x00000010
2001#define ANEG_CFG_PS2 0x00000001
2002#define ANEG_CFG_PS1 0x00008000
2003#define ANEG_CFG_HD 0x00004000
2004#define ANEG_CFG_FD 0x00002000
2005#define ANEG_CFG_INVAL 0x00001f06
2006
2007};
2008#define ANEG_OK 0
2009#define ANEG_DONE 1
2010#define ANEG_TIMER_ENAB 2
2011#define ANEG_FAILED -1
2012
2013#define ANEG_STATE_SETTLE_TIME 10000
2014
2015static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2016 struct tg3_fiber_aneginfo *ap)
2017{
2018 unsigned long delta;
2019 u32 rx_cfg_reg;
2020 int ret;
2021
2022 if (ap->state == ANEG_STATE_UNKNOWN) {
2023 ap->rxconfig = 0;
2024 ap->link_time = 0;
2025 ap->cur_time = 0;
2026 ap->ability_match_cfg = 0;
2027 ap->ability_match_count = 0;
2028 ap->ability_match = 0;
2029 ap->idle_match = 0;
2030 ap->ack_match = 0;
2031 }
2032 ap->cur_time++;
2033
2034 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2035 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2036
2037 if (rx_cfg_reg != ap->ability_match_cfg) {
2038 ap->ability_match_cfg = rx_cfg_reg;
2039 ap->ability_match = 0;
2040 ap->ability_match_count = 0;
2041 } else {
2042 if (++ap->ability_match_count > 1) {
2043 ap->ability_match = 1;
2044 ap->ability_match_cfg = rx_cfg_reg;
2045 }
2046 }
2047 if (rx_cfg_reg & ANEG_CFG_ACK)
2048 ap->ack_match = 1;
2049 else
2050 ap->ack_match = 0;
2051
2052 ap->idle_match = 0;
2053 } else {
2054 ap->idle_match = 1;
2055 ap->ability_match_cfg = 0;
2056 ap->ability_match_count = 0;
2057 ap->ability_match = 0;
2058 ap->ack_match = 0;
2059
2060 rx_cfg_reg = 0;
2061 }
2062
2063 ap->rxconfig = rx_cfg_reg;
2064 ret = ANEG_OK;
2065
2066 switch(ap->state) {
2067 case ANEG_STATE_UNKNOWN:
2068 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2069 ap->state = ANEG_STATE_AN_ENABLE;
2070
2071 /* fallthru */
2072 case ANEG_STATE_AN_ENABLE:
2073 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2074 if (ap->flags & MR_AN_ENABLE) {
2075 ap->link_time = 0;
2076 ap->cur_time = 0;
2077 ap->ability_match_cfg = 0;
2078 ap->ability_match_count = 0;
2079 ap->ability_match = 0;
2080 ap->idle_match = 0;
2081 ap->ack_match = 0;
2082
2083 ap->state = ANEG_STATE_RESTART_INIT;
2084 } else {
2085 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2086 }
2087 break;
2088
2089 case ANEG_STATE_RESTART_INIT:
2090 ap->link_time = ap->cur_time;
2091 ap->flags &= ~(MR_NP_LOADED);
2092 ap->txconfig = 0;
2093 tw32(MAC_TX_AUTO_NEG, 0);
2094 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2095 tw32_f(MAC_MODE, tp->mac_mode);
2096 udelay(40);
2097
2098 ret = ANEG_TIMER_ENAB;
2099 ap->state = ANEG_STATE_RESTART;
2100
2101 /* fallthru */
2102 case ANEG_STATE_RESTART:
2103 delta = ap->cur_time - ap->link_time;
2104 if (delta > ANEG_STATE_SETTLE_TIME) {
2105 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2106 } else {
2107 ret = ANEG_TIMER_ENAB;
2108 }
2109 break;
2110
2111 case ANEG_STATE_DISABLE_LINK_OK:
2112 ret = ANEG_DONE;
2113 break;
2114
2115 case ANEG_STATE_ABILITY_DETECT_INIT:
2116 ap->flags &= ~(MR_TOGGLE_TX);
2117 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2118 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2119 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2120 tw32_f(MAC_MODE, tp->mac_mode);
2121 udelay(40);
2122
2123 ap->state = ANEG_STATE_ABILITY_DETECT;
2124 break;
2125
2126 case ANEG_STATE_ABILITY_DETECT:
2127 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2128 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2129 }
2130 break;
2131
2132 case ANEG_STATE_ACK_DETECT_INIT:
2133 ap->txconfig |= ANEG_CFG_ACK;
2134 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2135 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2136 tw32_f(MAC_MODE, tp->mac_mode);
2137 udelay(40);
2138
2139 ap->state = ANEG_STATE_ACK_DETECT;
2140
2141 /* fallthru */
2142 case ANEG_STATE_ACK_DETECT:
2143 if (ap->ack_match != 0) {
2144 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2145 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2146 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2147 } else {
2148 ap->state = ANEG_STATE_AN_ENABLE;
2149 }
2150 } else if (ap->ability_match != 0 &&
2151 ap->rxconfig == 0) {
2152 ap->state = ANEG_STATE_AN_ENABLE;
2153 }
2154 break;
2155
2156 case ANEG_STATE_COMPLETE_ACK_INIT:
2157 if (ap->rxconfig & ANEG_CFG_INVAL) {
2158 ret = ANEG_FAILED;
2159 break;
2160 }
2161 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2162 MR_LP_ADV_HALF_DUPLEX |
2163 MR_LP_ADV_SYM_PAUSE |
2164 MR_LP_ADV_ASYM_PAUSE |
2165 MR_LP_ADV_REMOTE_FAULT1 |
2166 MR_LP_ADV_REMOTE_FAULT2 |
2167 MR_LP_ADV_NEXT_PAGE |
2168 MR_TOGGLE_RX |
2169 MR_NP_RX);
2170 if (ap->rxconfig & ANEG_CFG_FD)
2171 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2172 if (ap->rxconfig & ANEG_CFG_HD)
2173 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2174 if (ap->rxconfig & ANEG_CFG_PS1)
2175 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2176 if (ap->rxconfig & ANEG_CFG_PS2)
2177 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2178 if (ap->rxconfig & ANEG_CFG_RF1)
2179 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2180 if (ap->rxconfig & ANEG_CFG_RF2)
2181 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2182 if (ap->rxconfig & ANEG_CFG_NP)
2183 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2184
2185 ap->link_time = ap->cur_time;
2186
2187 ap->flags ^= (MR_TOGGLE_TX);
2188 if (ap->rxconfig & 0x0008)
2189 ap->flags |= MR_TOGGLE_RX;
2190 if (ap->rxconfig & ANEG_CFG_NP)
2191 ap->flags |= MR_NP_RX;
2192 ap->flags |= MR_PAGE_RX;
2193
2194 ap->state = ANEG_STATE_COMPLETE_ACK;
2195 ret = ANEG_TIMER_ENAB;
2196 break;
2197
2198 case ANEG_STATE_COMPLETE_ACK:
2199 if (ap->ability_match != 0 &&
2200 ap->rxconfig == 0) {
2201 ap->state = ANEG_STATE_AN_ENABLE;
2202 break;
2203 }
2204 delta = ap->cur_time - ap->link_time;
2205 if (delta > ANEG_STATE_SETTLE_TIME) {
2206 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2207 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2208 } else {
2209 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2210 !(ap->flags & MR_NP_RX)) {
2211 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2212 } else {
2213 ret = ANEG_FAILED;
2214 }
2215 }
2216 }
2217 break;
2218
2219 case ANEG_STATE_IDLE_DETECT_INIT:
2220 ap->link_time = ap->cur_time;
2221 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2222 tw32_f(MAC_MODE, tp->mac_mode);
2223 udelay(40);
2224
2225 ap->state = ANEG_STATE_IDLE_DETECT;
2226 ret = ANEG_TIMER_ENAB;
2227 break;
2228
2229 case ANEG_STATE_IDLE_DETECT:
2230 if (ap->ability_match != 0 &&
2231 ap->rxconfig == 0) {
2232 ap->state = ANEG_STATE_AN_ENABLE;
2233 break;
2234 }
2235 delta = ap->cur_time - ap->link_time;
2236 if (delta > ANEG_STATE_SETTLE_TIME) {
2237 /* XXX another gem from the Broadcom driver :( */
2238 ap->state = ANEG_STATE_LINK_OK;
2239 }
2240 break;
2241
2242 case ANEG_STATE_LINK_OK:
2243 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2244 ret = ANEG_DONE;
2245 break;
2246
2247 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2248 /* ??? unimplemented */
2249 break;
2250
2251 case ANEG_STATE_NEXT_PAGE_WAIT:
2252 /* ??? unimplemented */
2253 break;
2254
2255 default:
2256 ret = ANEG_FAILED;
2257 break;
2258 };
2259
2260 return ret;
2261}
2262
2263static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2264{
2265 int res = 0;
2266 struct tg3_fiber_aneginfo aninfo;
2267 int status = ANEG_FAILED;
2268 unsigned int tick;
2269 u32 tmp;
2270
2271 tw32_f(MAC_TX_AUTO_NEG, 0);
2272
2273 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2274 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2275 udelay(40);
2276
2277 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2278 udelay(40);
2279
2280 memset(&aninfo, 0, sizeof(aninfo));
2281 aninfo.flags |= MR_AN_ENABLE;
2282 aninfo.state = ANEG_STATE_UNKNOWN;
2283 aninfo.cur_time = 0;
2284 tick = 0;
2285 while (++tick < 195000) {
2286 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2287 if (status == ANEG_DONE || status == ANEG_FAILED)
2288 break;
2289
2290 udelay(1);
2291 }
2292
2293 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2294 tw32_f(MAC_MODE, tp->mac_mode);
2295 udelay(40);
2296
2297 *flags = aninfo.flags;
2298
2299 if (status == ANEG_DONE &&
2300 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2301 MR_LP_ADV_FULL_DUPLEX)))
2302 res = 1;
2303
2304 return res;
2305}
2306
2307static void tg3_init_bcm8002(struct tg3 *tp)
2308{
2309 u32 mac_status = tr32(MAC_STATUS);
2310 int i;
2311
2312 /* Reset when initting first time or we have a link. */
2313 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2314 !(mac_status & MAC_STATUS_PCS_SYNCED))
2315 return;
2316
2317 /* Set PLL lock range. */
2318 tg3_writephy(tp, 0x16, 0x8007);
2319
2320 /* SW reset */
2321 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2322
2323 /* Wait for reset to complete. */
2324 /* XXX schedule_timeout() ... */
2325 for (i = 0; i < 500; i++)
2326 udelay(10);
2327
2328 /* Config mode; select PMA/Ch 1 regs. */
2329 tg3_writephy(tp, 0x10, 0x8411);
2330
2331 /* Enable auto-lock and comdet, select txclk for tx. */
2332 tg3_writephy(tp, 0x11, 0x0a10);
2333
2334 tg3_writephy(tp, 0x18, 0x00a0);
2335 tg3_writephy(tp, 0x16, 0x41ff);
2336
2337 /* Assert and deassert POR. */
2338 tg3_writephy(tp, 0x13, 0x0400);
2339 udelay(40);
2340 tg3_writephy(tp, 0x13, 0x0000);
2341
2342 tg3_writephy(tp, 0x11, 0x0a50);
2343 udelay(40);
2344 tg3_writephy(tp, 0x11, 0x0a10);
2345
2346 /* Wait for signal to stabilize */
2347 /* XXX schedule_timeout() ... */
2348 for (i = 0; i < 15000; i++)
2349 udelay(10);
2350
2351 /* Deselect the channel register so we can read the PHYID
2352 * later.
2353 */
2354 tg3_writephy(tp, 0x10, 0x8011);
2355}
2356
2357static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2358{
2359 u32 sg_dig_ctrl, sg_dig_status;
2360 u32 serdes_cfg, expected_sg_dig_ctrl;
2361 int workaround, port_a;
2362 int current_link_up;
2363
2364 serdes_cfg = 0;
2365 expected_sg_dig_ctrl = 0;
2366 workaround = 0;
2367 port_a = 1;
2368 current_link_up = 0;
2369
2370 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2371 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2372 workaround = 1;
2373 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2374 port_a = 0;
2375
2376 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2377 /* preserve bits 20-23 for voltage regulator */
2378 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2379 }
2380
2381 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2382
2383 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2384 if (sg_dig_ctrl & (1 << 31)) {
2385 if (workaround) {
2386 u32 val = serdes_cfg;
2387
2388 if (port_a)
2389 val |= 0xc010000;
2390 else
2391 val |= 0x4010000;
2392 tw32_f(MAC_SERDES_CFG, val);
2393 }
2394 tw32_f(SG_DIG_CTRL, 0x01388400);
2395 }
2396 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2397 tg3_setup_flow_control(tp, 0, 0);
2398 current_link_up = 1;
2399 }
2400 goto out;
2401 }
2402
2403 /* Want auto-negotiation. */
2404 expected_sg_dig_ctrl = 0x81388400;
2405
2406 /* Pause capability */
2407 expected_sg_dig_ctrl |= (1 << 11);
2408
2409 /* Asymettric pause */
2410 expected_sg_dig_ctrl |= (1 << 12);
2411
2412 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2413 if (workaround)
2414 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2415 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2416 udelay(5);
2417 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2418
2419 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2420 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2421 MAC_STATUS_SIGNAL_DET)) {
2422 int i;
2423
2424 /* Giver time to negotiate (~200ms) */
2425 for (i = 0; i < 40000; i++) {
2426 sg_dig_status = tr32(SG_DIG_STATUS);
2427 if (sg_dig_status & (0x3))
2428 break;
2429 udelay(5);
2430 }
2431 mac_status = tr32(MAC_STATUS);
2432
2433 if ((sg_dig_status & (1 << 1)) &&
2434 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2435 u32 local_adv, remote_adv;
2436
2437 local_adv = ADVERTISE_PAUSE_CAP;
2438 remote_adv = 0;
2439 if (sg_dig_status & (1 << 19))
2440 remote_adv |= LPA_PAUSE_CAP;
2441 if (sg_dig_status & (1 << 20))
2442 remote_adv |= LPA_PAUSE_ASYM;
2443
2444 tg3_setup_flow_control(tp, local_adv, remote_adv);
2445 current_link_up = 1;
2446 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2447 } else if (!(sg_dig_status & (1 << 1))) {
2448 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2449 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2450 else {
2451 if (workaround) {
2452 u32 val = serdes_cfg;
2453
2454 if (port_a)
2455 val |= 0xc010000;
2456 else
2457 val |= 0x4010000;
2458
2459 tw32_f(MAC_SERDES_CFG, val);
2460 }
2461
2462 tw32_f(SG_DIG_CTRL, 0x01388400);
2463 udelay(40);
2464
2465 /* Link parallel detection - link is up */
2466 /* only if we have PCS_SYNC and not */
2467 /* receiving config code words */
2468 mac_status = tr32(MAC_STATUS);
2469 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2470 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2471 tg3_setup_flow_control(tp, 0, 0);
2472 current_link_up = 1;
2473 }
2474 }
2475 }
2476 }
2477
2478out:
2479 return current_link_up;
2480}
2481
2482static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2483{
2484 int current_link_up = 0;
2485
2486 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2487 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2488 goto out;
2489 }
2490
2491 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2492 u32 flags;
2493 int i;
2494
2495 if (fiber_autoneg(tp, &flags)) {
2496 u32 local_adv, remote_adv;
2497
2498 local_adv = ADVERTISE_PAUSE_CAP;
2499 remote_adv = 0;
2500 if (flags & MR_LP_ADV_SYM_PAUSE)
2501 remote_adv |= LPA_PAUSE_CAP;
2502 if (flags & MR_LP_ADV_ASYM_PAUSE)
2503 remote_adv |= LPA_PAUSE_ASYM;
2504
2505 tg3_setup_flow_control(tp, local_adv, remote_adv);
2506
2507 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2508 current_link_up = 1;
2509 }
2510 for (i = 0; i < 30; i++) {
2511 udelay(20);
2512 tw32_f(MAC_STATUS,
2513 (MAC_STATUS_SYNC_CHANGED |
2514 MAC_STATUS_CFG_CHANGED));
2515 udelay(40);
2516 if ((tr32(MAC_STATUS) &
2517 (MAC_STATUS_SYNC_CHANGED |
2518 MAC_STATUS_CFG_CHANGED)) == 0)
2519 break;
2520 }
2521
2522 mac_status = tr32(MAC_STATUS);
2523 if (current_link_up == 0 &&
2524 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2525 !(mac_status & MAC_STATUS_RCVD_CFG))
2526 current_link_up = 1;
2527 } else {
2528 /* Forcing 1000FD link up. */
2529 current_link_up = 1;
2530 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2531
2532 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2533 udelay(40);
2534 }
2535
2536out:
2537 return current_link_up;
2538}
2539
2540static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2541{
2542 u32 orig_pause_cfg;
2543 u16 orig_active_speed;
2544 u8 orig_active_duplex;
2545 u32 mac_status;
2546 int current_link_up;
2547 int i;
2548
2549 orig_pause_cfg =
2550 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2551 TG3_FLAG_TX_PAUSE));
2552 orig_active_speed = tp->link_config.active_speed;
2553 orig_active_duplex = tp->link_config.active_duplex;
2554
2555 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2556 netif_carrier_ok(tp->dev) &&
2557 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2558 mac_status = tr32(MAC_STATUS);
2559 mac_status &= (MAC_STATUS_PCS_SYNCED |
2560 MAC_STATUS_SIGNAL_DET |
2561 MAC_STATUS_CFG_CHANGED |
2562 MAC_STATUS_RCVD_CFG);
2563 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2564 MAC_STATUS_SIGNAL_DET)) {
2565 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2566 MAC_STATUS_CFG_CHANGED));
2567 return 0;
2568 }
2569 }
2570
2571 tw32_f(MAC_TX_AUTO_NEG, 0);
2572
2573 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2574 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2575 tw32_f(MAC_MODE, tp->mac_mode);
2576 udelay(40);
2577
2578 if (tp->phy_id == PHY_ID_BCM8002)
2579 tg3_init_bcm8002(tp);
2580
2581 /* Enable link change event even when serdes polling. */
2582 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2583 udelay(40);
2584
2585 current_link_up = 0;
2586 mac_status = tr32(MAC_STATUS);
2587
2588 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2589 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2590 else
2591 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2592
2593 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2594 tw32_f(MAC_MODE, tp->mac_mode);
2595 udelay(40);
2596
2597 tp->hw_status->status =
2598 (SD_STATUS_UPDATED |
2599 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2600
2601 for (i = 0; i < 100; i++) {
2602 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2603 MAC_STATUS_CFG_CHANGED));
2604 udelay(5);
2605 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2606 MAC_STATUS_CFG_CHANGED)) == 0)
2607 break;
2608 }
2609
2610 mac_status = tr32(MAC_STATUS);
2611 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2612 current_link_up = 0;
2613 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2614 tw32_f(MAC_MODE, (tp->mac_mode |
2615 MAC_MODE_SEND_CONFIGS));
2616 udelay(1);
2617 tw32_f(MAC_MODE, tp->mac_mode);
2618 }
2619 }
2620
2621 if (current_link_up == 1) {
2622 tp->link_config.active_speed = SPEED_1000;
2623 tp->link_config.active_duplex = DUPLEX_FULL;
2624 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2625 LED_CTRL_LNKLED_OVERRIDE |
2626 LED_CTRL_1000MBPS_ON));
2627 } else {
2628 tp->link_config.active_speed = SPEED_INVALID;
2629 tp->link_config.active_duplex = DUPLEX_INVALID;
2630 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2631 LED_CTRL_LNKLED_OVERRIDE |
2632 LED_CTRL_TRAFFIC_OVERRIDE));
2633 }
2634
2635 if (current_link_up != netif_carrier_ok(tp->dev)) {
2636 if (current_link_up)
2637 netif_carrier_on(tp->dev);
2638 else
2639 netif_carrier_off(tp->dev);
2640 tg3_link_report(tp);
2641 } else {
2642 u32 now_pause_cfg =
2643 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2644 TG3_FLAG_TX_PAUSE);
2645 if (orig_pause_cfg != now_pause_cfg ||
2646 orig_active_speed != tp->link_config.active_speed ||
2647 orig_active_duplex != tp->link_config.active_duplex)
2648 tg3_link_report(tp);
2649 }
2650
2651 return 0;
2652}
2653
Michael Chan747e8f82005-07-25 12:33:22 -07002654static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2655{
2656 int current_link_up, err = 0;
2657 u32 bmsr, bmcr;
2658 u16 current_speed;
2659 u8 current_duplex;
2660
2661 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2662 tw32_f(MAC_MODE, tp->mac_mode);
2663 udelay(40);
2664
2665 tw32(MAC_EVENT, 0);
2666
2667 tw32_f(MAC_STATUS,
2668 (MAC_STATUS_SYNC_CHANGED |
2669 MAC_STATUS_CFG_CHANGED |
2670 MAC_STATUS_MI_COMPLETION |
2671 MAC_STATUS_LNKSTATE_CHANGED));
2672 udelay(40);
2673
2674 if (force_reset)
2675 tg3_phy_reset(tp);
2676
2677 current_link_up = 0;
2678 current_speed = SPEED_INVALID;
2679 current_duplex = DUPLEX_INVALID;
2680
2681 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2682 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2683
2684 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2685
2686 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2687 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2688 /* do nothing, just check for link up at the end */
2689 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2690 u32 adv, new_adv;
2691
2692 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2693 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2694 ADVERTISE_1000XPAUSE |
2695 ADVERTISE_1000XPSE_ASYM |
2696 ADVERTISE_SLCT);
2697
2698 /* Always advertise symmetric PAUSE just like copper */
2699 new_adv |= ADVERTISE_1000XPAUSE;
2700
2701 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2702 new_adv |= ADVERTISE_1000XHALF;
2703 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2704 new_adv |= ADVERTISE_1000XFULL;
2705
2706 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2707 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2708 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2709 tg3_writephy(tp, MII_BMCR, bmcr);
2710
2711 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2712 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2713 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2714
2715 return err;
2716 }
2717 } else {
2718 u32 new_bmcr;
2719
2720 bmcr &= ~BMCR_SPEED1000;
2721 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2722
2723 if (tp->link_config.duplex == DUPLEX_FULL)
2724 new_bmcr |= BMCR_FULLDPLX;
2725
2726 if (new_bmcr != bmcr) {
2727 /* BMCR_SPEED1000 is a reserved bit that needs
2728 * to be set on write.
2729 */
2730 new_bmcr |= BMCR_SPEED1000;
2731
2732 /* Force a linkdown */
2733 if (netif_carrier_ok(tp->dev)) {
2734 u32 adv;
2735
2736 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2737 adv &= ~(ADVERTISE_1000XFULL |
2738 ADVERTISE_1000XHALF |
2739 ADVERTISE_SLCT);
2740 tg3_writephy(tp, MII_ADVERTISE, adv);
2741 tg3_writephy(tp, MII_BMCR, bmcr |
2742 BMCR_ANRESTART |
2743 BMCR_ANENABLE);
2744 udelay(10);
2745 netif_carrier_off(tp->dev);
2746 }
2747 tg3_writephy(tp, MII_BMCR, new_bmcr);
2748 bmcr = new_bmcr;
2749 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2750 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2751 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2752 }
2753 }
2754
2755 if (bmsr & BMSR_LSTATUS) {
2756 current_speed = SPEED_1000;
2757 current_link_up = 1;
2758 if (bmcr & BMCR_FULLDPLX)
2759 current_duplex = DUPLEX_FULL;
2760 else
2761 current_duplex = DUPLEX_HALF;
2762
2763 if (bmcr & BMCR_ANENABLE) {
2764 u32 local_adv, remote_adv, common;
2765
2766 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2767 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2768 common = local_adv & remote_adv;
2769 if (common & (ADVERTISE_1000XHALF |
2770 ADVERTISE_1000XFULL)) {
2771 if (common & ADVERTISE_1000XFULL)
2772 current_duplex = DUPLEX_FULL;
2773 else
2774 current_duplex = DUPLEX_HALF;
2775
2776 tg3_setup_flow_control(tp, local_adv,
2777 remote_adv);
2778 }
2779 else
2780 current_link_up = 0;
2781 }
2782 }
2783
2784 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2785 if (tp->link_config.active_duplex == DUPLEX_HALF)
2786 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2787
2788 tw32_f(MAC_MODE, tp->mac_mode);
2789 udelay(40);
2790
2791 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2792
2793 tp->link_config.active_speed = current_speed;
2794 tp->link_config.active_duplex = current_duplex;
2795
2796 if (current_link_up != netif_carrier_ok(tp->dev)) {
2797 if (current_link_up)
2798 netif_carrier_on(tp->dev);
2799 else {
2800 netif_carrier_off(tp->dev);
2801 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2802 }
2803 tg3_link_report(tp);
2804 }
2805 return err;
2806}
2807
2808static void tg3_serdes_parallel_detect(struct tg3 *tp)
2809{
2810 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2811 /* Give autoneg time to complete. */
2812 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2813 return;
2814 }
2815 if (!netif_carrier_ok(tp->dev) &&
2816 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2817 u32 bmcr;
2818
2819 tg3_readphy(tp, MII_BMCR, &bmcr);
2820 if (bmcr & BMCR_ANENABLE) {
2821 u32 phy1, phy2;
2822
2823 /* Select shadow register 0x1f */
2824 tg3_writephy(tp, 0x1c, 0x7c00);
2825 tg3_readphy(tp, 0x1c, &phy1);
2826
2827 /* Select expansion interrupt status register */
2828 tg3_writephy(tp, 0x17, 0x0f01);
2829 tg3_readphy(tp, 0x15, &phy2);
2830 tg3_readphy(tp, 0x15, &phy2);
2831
2832 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2833 /* We have signal detect and not receiving
2834 * config code words, link is up by parallel
2835 * detection.
2836 */
2837
2838 bmcr &= ~BMCR_ANENABLE;
2839 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2840 tg3_writephy(tp, MII_BMCR, bmcr);
2841 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2842 }
2843 }
2844 }
2845 else if (netif_carrier_ok(tp->dev) &&
2846 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2847 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2848 u32 phy2;
2849
2850 /* Select expansion interrupt status register */
2851 tg3_writephy(tp, 0x17, 0x0f01);
2852 tg3_readphy(tp, 0x15, &phy2);
2853 if (phy2 & 0x20) {
2854 u32 bmcr;
2855
2856 /* Config code words received, turn on autoneg. */
2857 tg3_readphy(tp, MII_BMCR, &bmcr);
2858 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2859
2860 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2861
2862 }
2863 }
2864}
2865
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2867{
2868 int err;
2869
2870 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2871 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07002872 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2873 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 } else {
2875 err = tg3_setup_copper_phy(tp, force_reset);
2876 }
2877
2878 if (tp->link_config.active_speed == SPEED_1000 &&
2879 tp->link_config.active_duplex == DUPLEX_HALF)
2880 tw32(MAC_TX_LENGTHS,
2881 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2882 (6 << TX_LENGTHS_IPG_SHIFT) |
2883 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2884 else
2885 tw32(MAC_TX_LENGTHS,
2886 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2887 (6 << TX_LENGTHS_IPG_SHIFT) |
2888 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2889
2890 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2891 if (netif_carrier_ok(tp->dev)) {
2892 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07002893 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 } else {
2895 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2896 }
2897 }
2898
2899 return err;
2900}
2901
2902/* Tigon3 never reports partial packet sends. So we do not
2903 * need special logic to handle SKBs that have not had all
2904 * of their frags sent yet, like SunGEM does.
2905 */
2906static void tg3_tx(struct tg3 *tp)
2907{
2908 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2909 u32 sw_idx = tp->tx_cons;
2910
2911 while (sw_idx != hw_idx) {
2912 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2913 struct sk_buff *skb = ri->skb;
2914 int i;
2915
2916 if (unlikely(skb == NULL))
2917 BUG();
2918
2919 pci_unmap_single(tp->pdev,
2920 pci_unmap_addr(ri, mapping),
2921 skb_headlen(skb),
2922 PCI_DMA_TODEVICE);
2923
2924 ri->skb = NULL;
2925
2926 sw_idx = NEXT_TX(sw_idx);
2927
2928 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2929 if (unlikely(sw_idx == hw_idx))
2930 BUG();
2931
2932 ri = &tp->tx_buffers[sw_idx];
2933 if (unlikely(ri->skb != NULL))
2934 BUG();
2935
2936 pci_unmap_page(tp->pdev,
2937 pci_unmap_addr(ri, mapping),
2938 skb_shinfo(skb)->frags[i].size,
2939 PCI_DMA_TODEVICE);
2940
2941 sw_idx = NEXT_TX(sw_idx);
2942 }
2943
David S. Millerf47c11e2005-06-24 20:18:35 -07002944 dev_kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 }
2946
2947 tp->tx_cons = sw_idx;
2948
Michael Chan51b91462005-09-01 17:41:28 -07002949 if (unlikely(netif_queue_stopped(tp->dev))) {
2950 spin_lock(&tp->tx_lock);
2951 if (netif_queue_stopped(tp->dev) &&
2952 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2953 netif_wake_queue(tp->dev);
2954 spin_unlock(&tp->tx_lock);
2955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956}
2957
2958/* Returns size of skb allocated or < 0 on error.
2959 *
2960 * We only need to fill in the address because the other members
2961 * of the RX descriptor are invariant, see tg3_init_rings.
2962 *
2963 * Note the purposeful assymetry of cpu vs. chip accesses. For
2964 * posting buffers we only dirty the first cache line of the RX
2965 * descriptor (containing the address). Whereas for the RX status
2966 * buffers the cpu only reads the last cacheline of the RX descriptor
2967 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2968 */
2969static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2970 int src_idx, u32 dest_idx_unmasked)
2971{
2972 struct tg3_rx_buffer_desc *desc;
2973 struct ring_info *map, *src_map;
2974 struct sk_buff *skb;
2975 dma_addr_t mapping;
2976 int skb_size, dest_idx;
2977
2978 src_map = NULL;
2979 switch (opaque_key) {
2980 case RXD_OPAQUE_RING_STD:
2981 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2982 desc = &tp->rx_std[dest_idx];
2983 map = &tp->rx_std_buffers[dest_idx];
2984 if (src_idx >= 0)
2985 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07002986 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 break;
2988
2989 case RXD_OPAQUE_RING_JUMBO:
2990 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2991 desc = &tp->rx_jumbo[dest_idx];
2992 map = &tp->rx_jumbo_buffers[dest_idx];
2993 if (src_idx >= 0)
2994 src_map = &tp->rx_jumbo_buffers[src_idx];
2995 skb_size = RX_JUMBO_PKT_BUF_SZ;
2996 break;
2997
2998 default:
2999 return -EINVAL;
3000 };
3001
3002 /* Do not overwrite any of the map or rp information
3003 * until we are sure we can commit to a new buffer.
3004 *
3005 * Callers depend upon this behavior and assume that
3006 * we leave everything unchanged if we fail.
3007 */
3008 skb = dev_alloc_skb(skb_size);
3009 if (skb == NULL)
3010 return -ENOMEM;
3011
3012 skb->dev = tp->dev;
3013 skb_reserve(skb, tp->rx_offset);
3014
3015 mapping = pci_map_single(tp->pdev, skb->data,
3016 skb_size - tp->rx_offset,
3017 PCI_DMA_FROMDEVICE);
3018
3019 map->skb = skb;
3020 pci_unmap_addr_set(map, mapping, mapping);
3021
3022 if (src_map != NULL)
3023 src_map->skb = NULL;
3024
3025 desc->addr_hi = ((u64)mapping >> 32);
3026 desc->addr_lo = ((u64)mapping & 0xffffffff);
3027
3028 return skb_size;
3029}
3030
3031/* We only need to move over in the address because the other
3032 * members of the RX descriptor are invariant. See notes above
3033 * tg3_alloc_rx_skb for full details.
3034 */
3035static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3036 int src_idx, u32 dest_idx_unmasked)
3037{
3038 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3039 struct ring_info *src_map, *dest_map;
3040 int dest_idx;
3041
3042 switch (opaque_key) {
3043 case RXD_OPAQUE_RING_STD:
3044 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3045 dest_desc = &tp->rx_std[dest_idx];
3046 dest_map = &tp->rx_std_buffers[dest_idx];
3047 src_desc = &tp->rx_std[src_idx];
3048 src_map = &tp->rx_std_buffers[src_idx];
3049 break;
3050
3051 case RXD_OPAQUE_RING_JUMBO:
3052 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3053 dest_desc = &tp->rx_jumbo[dest_idx];
3054 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3055 src_desc = &tp->rx_jumbo[src_idx];
3056 src_map = &tp->rx_jumbo_buffers[src_idx];
3057 break;
3058
3059 default:
3060 return;
3061 };
3062
3063 dest_map->skb = src_map->skb;
3064 pci_unmap_addr_set(dest_map, mapping,
3065 pci_unmap_addr(src_map, mapping));
3066 dest_desc->addr_hi = src_desc->addr_hi;
3067 dest_desc->addr_lo = src_desc->addr_lo;
3068
3069 src_map->skb = NULL;
3070}
3071
3072#if TG3_VLAN_TAG_USED
3073static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3074{
3075 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3076}
3077#endif
3078
3079/* The RX ring scheme is composed of multiple rings which post fresh
3080 * buffers to the chip, and one special ring the chip uses to report
3081 * status back to the host.
3082 *
3083 * The special ring reports the status of received packets to the
3084 * host. The chip does not write into the original descriptor the
3085 * RX buffer was obtained from. The chip simply takes the original
3086 * descriptor as provided by the host, updates the status and length
3087 * field, then writes this into the next status ring entry.
3088 *
3089 * Each ring the host uses to post buffers to the chip is described
3090 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3091 * it is first placed into the on-chip ram. When the packet's length
3092 * is known, it walks down the TG3_BDINFO entries to select the ring.
3093 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3094 * which is within the range of the new packet's length is chosen.
3095 *
3096 * The "separate ring for rx status" scheme may sound queer, but it makes
3097 * sense from a cache coherency perspective. If only the host writes
3098 * to the buffer post rings, and only the chip writes to the rx status
3099 * rings, then cache lines never move beyond shared-modified state.
3100 * If both the host and chip were to write into the same ring, cache line
3101 * eviction could occur since both entities want it in an exclusive state.
3102 */
3103static int tg3_rx(struct tg3 *tp, int budget)
3104{
3105 u32 work_mask;
Michael Chan483ba502005-04-25 15:14:03 -07003106 u32 sw_idx = tp->rx_rcb_ptr;
3107 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 int received;
3109
3110 hw_idx = tp->hw_status->idx[0].rx_producer;
3111 /*
3112 * We need to order the read of hw_idx and the read of
3113 * the opaque cookie.
3114 */
3115 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 work_mask = 0;
3117 received = 0;
3118 while (sw_idx != hw_idx && budget > 0) {
3119 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3120 unsigned int len;
3121 struct sk_buff *skb;
3122 dma_addr_t dma_addr;
3123 u32 opaque_key, desc_idx, *post_ptr;
3124
3125 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3126 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3127 if (opaque_key == RXD_OPAQUE_RING_STD) {
3128 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3129 mapping);
3130 skb = tp->rx_std_buffers[desc_idx].skb;
3131 post_ptr = &tp->rx_std_ptr;
3132 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3133 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3134 mapping);
3135 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3136 post_ptr = &tp->rx_jumbo_ptr;
3137 }
3138 else {
3139 goto next_pkt_nopost;
3140 }
3141
3142 work_mask |= opaque_key;
3143
3144 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3145 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3146 drop_it:
3147 tg3_recycle_rx(tp, opaque_key,
3148 desc_idx, *post_ptr);
3149 drop_it_no_recycle:
3150 /* Other statistics kept track of by card. */
3151 tp->net_stats.rx_dropped++;
3152 goto next_pkt;
3153 }
3154
3155 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3156
3157 if (len > RX_COPY_THRESHOLD
3158 && tp->rx_offset == 2
3159 /* rx_offset != 2 iff this is a 5701 card running
3160 * in PCI-X mode [see tg3_get_invariants()] */
3161 ) {
3162 int skb_size;
3163
3164 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3165 desc_idx, *post_ptr);
3166 if (skb_size < 0)
3167 goto drop_it;
3168
3169 pci_unmap_single(tp->pdev, dma_addr,
3170 skb_size - tp->rx_offset,
3171 PCI_DMA_FROMDEVICE);
3172
3173 skb_put(skb, len);
3174 } else {
3175 struct sk_buff *copy_skb;
3176
3177 tg3_recycle_rx(tp, opaque_key,
3178 desc_idx, *post_ptr);
3179
3180 copy_skb = dev_alloc_skb(len + 2);
3181 if (copy_skb == NULL)
3182 goto drop_it_no_recycle;
3183
3184 copy_skb->dev = tp->dev;
3185 skb_reserve(copy_skb, 2);
3186 skb_put(copy_skb, len);
3187 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3188 memcpy(copy_skb->data, skb->data, len);
3189 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3190
3191 /* We'll reuse the original ring buffer. */
3192 skb = copy_skb;
3193 }
3194
3195 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3196 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3197 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3198 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3199 skb->ip_summed = CHECKSUM_UNNECESSARY;
3200 else
3201 skb->ip_summed = CHECKSUM_NONE;
3202
3203 skb->protocol = eth_type_trans(skb, tp->dev);
3204#if TG3_VLAN_TAG_USED
3205 if (tp->vlgrp != NULL &&
3206 desc->type_flags & RXD_FLAG_VLAN) {
3207 tg3_vlan_rx(tp, skb,
3208 desc->err_vlan & RXD_VLAN_MASK);
3209 } else
3210#endif
3211 netif_receive_skb(skb);
3212
3213 tp->dev->last_rx = jiffies;
3214 received++;
3215 budget--;
3216
3217next_pkt:
3218 (*post_ptr)++;
3219next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07003220 sw_idx++;
3221 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
Michael Chan52f6d692005-04-25 15:14:32 -07003222
3223 /* Refresh hw_idx to see if there is new work */
3224 if (sw_idx == hw_idx) {
3225 hw_idx = tp->hw_status->idx[0].rx_producer;
3226 rmb();
3227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 }
3229
3230 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07003231 tp->rx_rcb_ptr = sw_idx;
3232 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233
3234 /* Refill RX ring(s). */
3235 if (work_mask & RXD_OPAQUE_RING_STD) {
3236 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3237 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3238 sw_idx);
3239 }
3240 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3241 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3242 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3243 sw_idx);
3244 }
3245 mmiowb();
3246
3247 return received;
3248}
3249
3250static int tg3_poll(struct net_device *netdev, int *budget)
3251{
3252 struct tg3 *tp = netdev_priv(netdev);
3253 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 int done;
3255
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 /* handle link change and other phy events */
3257 if (!(tp->tg3_flags &
3258 (TG3_FLAG_USE_LINKCHG_REG |
3259 TG3_FLAG_POLL_SERDES))) {
3260 if (sblk->status & SD_STATUS_LINK_CHG) {
3261 sblk->status = SD_STATUS_UPDATED |
3262 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07003263 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07003265 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 }
3267 }
3268
3269 /* run TX completion thread */
3270 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 tg3_tx(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 }
3273
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 /* run RX thread, within the bounds set by NAPI.
3275 * All RX "locking" is done by ensuring outside
3276 * code synchronizes with dev->poll()
3277 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3279 int orig_budget = *budget;
3280 int work_done;
3281
3282 if (orig_budget > netdev->quota)
3283 orig_budget = netdev->quota;
3284
3285 work_done = tg3_rx(tp, orig_budget);
3286
3287 *budget -= work_done;
3288 netdev->quota -= work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 }
3290
Michael Chan38f38432005-09-05 17:53:32 -07003291 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
David S. Millerf7383c22005-05-18 22:50:53 -07003292 tp->last_tag = sblk->status_tag;
Michael Chan38f38432005-09-05 17:53:32 -07003293 rmb();
3294 } else
3295 sblk->status &= ~SD_STATUS_UPDATED;
David S. Millerf7383c22005-05-18 22:50:53 -07003296
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 /* if no more work, tell net stack and NIC we're done */
David S. Millerf7383c22005-05-18 22:50:53 -07003298 done = !tg3_has_work(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 if (done) {
David S. Millerf47c11e2005-06-24 20:18:35 -07003300 netif_rx_complete(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 tg3_restart_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 }
3303
3304 return (done ? 0 : 1);
3305}
3306
David S. Millerf47c11e2005-06-24 20:18:35 -07003307static void tg3_irq_quiesce(struct tg3 *tp)
3308{
3309 BUG_ON(tp->irq_sync);
3310
3311 tp->irq_sync = 1;
3312 smp_mb();
3313
3314 synchronize_irq(tp->pdev->irq);
3315}
3316
3317static inline int tg3_irq_sync(struct tg3 *tp)
3318{
3319 return tp->irq_sync;
3320}
3321
3322/* Fully shutdown all tg3 driver activity elsewhere in the system.
3323 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3324 * with as well. Most of the time, this is not necessary except when
3325 * shutting down the device.
3326 */
3327static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3328{
3329 if (irq_sync)
3330 tg3_irq_quiesce(tp);
3331 spin_lock_bh(&tp->lock);
3332 spin_lock(&tp->tx_lock);
3333}
3334
3335static inline void tg3_full_unlock(struct tg3 *tp)
3336{
3337 spin_unlock(&tp->tx_lock);
3338 spin_unlock_bh(&tp->lock);
3339}
3340
Michael Chan88b06bc22005-04-21 17:13:25 -07003341/* MSI ISR - No need to check for interrupt sharing and no need to
3342 * flush status block and interrupt mailbox. PCI ordering rules
3343 * guarantee that MSI will arrive after the status block.
3344 */
3345static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3346{
3347 struct net_device *dev = dev_id;
3348 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc22005-04-21 17:13:25 -07003349
Michael Chan61487482005-09-05 17:53:19 -07003350 prefetch(tp->hw_status);
3351 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc22005-04-21 17:13:25 -07003352 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003353 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc22005-04-21 17:13:25 -07003354 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07003355 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc22005-04-21 17:13:25 -07003356 * NIC to stop sending us irqs, engaging "in-intr-handler"
3357 * event coalescing.
3358 */
3359 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07003360 if (likely(!tg3_irq_sync(tp)))
Michael Chan88b06bc22005-04-21 17:13:25 -07003361 netif_rx_schedule(dev); /* schedule NAPI poll */
Michael Chan61487482005-09-05 17:53:19 -07003362
Michael Chan88b06bc22005-04-21 17:13:25 -07003363 return IRQ_RETVAL(1);
3364}
3365
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3367{
3368 struct net_device *dev = dev_id;
3369 struct tg3 *tp = netdev_priv(dev);
3370 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 unsigned int handled = 1;
3372
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 /* In INTx mode, it is possible for the interrupt to arrive at
3374 * the CPU before the status block posted prior to the interrupt.
3375 * Reading the PCI State register will confirm whether the
3376 * interrupt is ours and will flush the status block.
3377 */
3378 if ((sblk->status & SD_STATUS_UPDATED) ||
3379 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3380 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003381 * Writing any value to intr-mbox-0 clears PCI INTA# and
3382 * chip-internal interrupt pending events.
3383 * Writing non-zero to intr-mbox-0 additional tells the
3384 * NIC to stop sending us irqs, engaging "in-intr-handler"
3385 * event coalescing.
3386 */
3387 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3388 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003389 if (tg3_irq_sync(tp))
3390 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07003391 sblk->status &= ~SD_STATUS_UPDATED;
Michael Chan61487482005-09-05 17:53:19 -07003392 if (likely(tg3_has_work(tp))) {
3393 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
David S. Millerfac9b832005-05-18 22:46:34 -07003394 netif_rx_schedule(dev); /* schedule NAPI poll */
Michael Chan61487482005-09-05 17:53:19 -07003395 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07003396 /* No work, shared interrupt perhaps? re-enable
3397 * interrupts, and flush that PCI write
3398 */
Michael Chan09ee9292005-08-09 20:17:00 -07003399 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
David S. Millerfac9b832005-05-18 22:46:34 -07003400 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07003401 }
3402 } else { /* shared interrupt */
3403 handled = 0;
3404 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003405out:
David S. Millerfac9b832005-05-18 22:46:34 -07003406 return IRQ_RETVAL(handled);
3407}
3408
3409static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3410{
3411 struct net_device *dev = dev_id;
3412 struct tg3 *tp = netdev_priv(dev);
3413 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07003414 unsigned int handled = 1;
3415
David S. Millerfac9b832005-05-18 22:46:34 -07003416 /* In INTx mode, it is possible for the interrupt to arrive at
3417 * the CPU before the status block posted prior to the interrupt.
3418 * Reading the PCI State register will confirm whether the
3419 * interrupt is ours and will flush the status block.
3420 */
Michael Chan38f38432005-09-05 17:53:32 -07003421 if ((sblk->status_tag != tp->last_tag) ||
David S. Millerfac9b832005-05-18 22:46:34 -07003422 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3423 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 * writing any value to intr-mbox-0 clears PCI INTA# and
3425 * chip-internal interrupt pending events.
3426 * writing non-zero to intr-mbox-0 additional tells the
3427 * NIC to stop sending us irqs, engaging "in-intr-handler"
3428 * event coalescing.
3429 */
3430 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3431 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003432 if (tg3_irq_sync(tp))
3433 goto out;
Michael Chan38f38432005-09-05 17:53:32 -07003434 if (netif_rx_schedule_prep(dev)) {
Michael Chan61487482005-09-05 17:53:19 -07003435 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan38f38432005-09-05 17:53:32 -07003436 /* Update last_tag to mark that this status has been
3437 * seen. Because interrupt may be shared, we may be
3438 * racing with tg3_poll(), so only update last_tag
3439 * if tg3_poll() is not scheduled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 */
Michael Chan38f38432005-09-05 17:53:32 -07003441 tp->last_tag = sblk->status_tag;
3442 __netif_rx_schedule(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 }
3444 } else { /* shared interrupt */
3445 handled = 0;
3446 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003447out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 return IRQ_RETVAL(handled);
3449}
3450
Michael Chan79381092005-04-21 17:13:59 -07003451/* ISR for interrupt test */
3452static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3453 struct pt_regs *regs)
3454{
3455 struct net_device *dev = dev_id;
3456 struct tg3 *tp = netdev_priv(dev);
3457 struct tg3_hw_status *sblk = tp->hw_status;
3458
Michael Chanf9804dd2005-09-27 12:13:10 -07003459 if ((sblk->status & SD_STATUS_UPDATED) ||
3460 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chan79381092005-04-21 17:13:59 -07003461 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3462 0x00000001);
3463 return IRQ_RETVAL(1);
3464 }
3465 return IRQ_RETVAL(0);
3466}
3467
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468static int tg3_init_hw(struct tg3 *);
Michael Chan944d9802005-05-29 14:57:48 -07003469static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470
3471#ifdef CONFIG_NET_POLL_CONTROLLER
3472static void tg3_poll_controller(struct net_device *dev)
3473{
Michael Chan88b06bc22005-04-21 17:13:25 -07003474 struct tg3 *tp = netdev_priv(dev);
3475
3476 tg3_interrupt(tp->pdev->irq, dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477}
3478#endif
3479
3480static void tg3_reset_task(void *_data)
3481{
3482 struct tg3 *tp = _data;
3483 unsigned int restart_timer;
3484
Michael Chan7faa0062006-02-02 17:29:28 -08003485 tg3_full_lock(tp, 0);
3486 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3487
3488 if (!netif_running(tp->dev)) {
3489 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3490 tg3_full_unlock(tp);
3491 return;
3492 }
3493
3494 tg3_full_unlock(tp);
3495
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 tg3_netif_stop(tp);
3497
David S. Millerf47c11e2005-06-24 20:18:35 -07003498 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499
3500 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3501 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3502
Michael Chan944d9802005-05-29 14:57:48 -07003503 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 tg3_init_hw(tp);
3505
3506 tg3_netif_start(tp);
3507
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 if (restart_timer)
3509 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08003510
3511 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3512
3513 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514}
3515
3516static void tg3_tx_timeout(struct net_device *dev)
3517{
3518 struct tg3 *tp = netdev_priv(dev);
3519
3520 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3521 dev->name);
3522
3523 schedule_work(&tp->reset_task);
3524}
3525
Michael Chanc58ec932005-09-17 00:46:27 -07003526/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3527static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3528{
3529 u32 base = (u32) mapping & 0xffffffff;
3530
3531 return ((base > 0xffffdcc0) &&
3532 (base + len + 8 < base));
3533}
3534
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3536
3537static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07003538 u32 last_plus_one, u32 *start,
3539 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540{
3541 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
Michael Chanc58ec932005-09-17 00:46:27 -07003542 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07003544 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545
3546 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07003547 ret = -1;
3548 } else {
3549 /* New SKB is guaranteed to be linear. */
3550 entry = *start;
3551 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3552 PCI_DMA_TODEVICE);
3553 /* Make sure new skb does not cross any 4G boundaries.
3554 * Drop the packet if it does.
3555 */
3556 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3557 ret = -1;
3558 dev_kfree_skb(new_skb);
3559 new_skb = NULL;
3560 } else {
3561 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3562 base_flags, 1 | (mss << 1));
3563 *start = NEXT_TX(entry);
3564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 }
3566
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567 /* Now clean up the sw ring entries. */
3568 i = 0;
3569 while (entry != last_plus_one) {
3570 int len;
3571
3572 if (i == 0)
3573 len = skb_headlen(skb);
3574 else
3575 len = skb_shinfo(skb)->frags[i-1].size;
3576 pci_unmap_single(tp->pdev,
3577 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3578 len, PCI_DMA_TODEVICE);
3579 if (i == 0) {
3580 tp->tx_buffers[entry].skb = new_skb;
3581 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3582 } else {
3583 tp->tx_buffers[entry].skb = NULL;
3584 }
3585 entry = NEXT_TX(entry);
3586 i++;
3587 }
3588
3589 dev_kfree_skb(skb);
3590
Michael Chanc58ec932005-09-17 00:46:27 -07003591 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592}
3593
3594static void tg3_set_txd(struct tg3 *tp, int entry,
3595 dma_addr_t mapping, int len, u32 flags,
3596 u32 mss_and_is_end)
3597{
3598 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3599 int is_end = (mss_and_is_end & 0x1);
3600 u32 mss = (mss_and_is_end >> 1);
3601 u32 vlan_tag = 0;
3602
3603 if (is_end)
3604 flags |= TXD_FLAG_END;
3605 if (flags & TXD_FLAG_VLAN) {
3606 vlan_tag = flags >> 16;
3607 flags &= 0xffff;
3608 }
3609 vlan_tag |= (mss << TXD_MSS_SHIFT);
3610
3611 txd->addr_hi = ((u64) mapping >> 32);
3612 txd->addr_lo = ((u64) mapping & 0xffffffff);
3613 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3614 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3615}
3616
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3618{
3619 struct tg3 *tp = netdev_priv(dev);
3620 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 u32 len, entry, base_flags, mss;
3622 int would_hit_hwbug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623
3624 len = skb_headlen(skb);
3625
3626 /* No BH disabling for tx_lock here. We are running in BH disabled
3627 * context and TX reclaim runs via tp->poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07003628 * interrupt. Furthermore, IRQ processing runs lockless so we have
3629 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 */
David S. Millerf47c11e2005-06-24 20:18:35 -07003631 if (!spin_trylock(&tp->tx_lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 return NETDEV_TX_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08003635 if (!netif_queue_stopped(dev)) {
3636 netif_stop_queue(dev);
3637
3638 /* This is a hard error, log it. */
3639 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3640 "queue awake!\n", dev->name);
3641 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003642 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 return NETDEV_TX_BUSY;
3644 }
3645
3646 entry = tp->tx_prod;
3647 base_flags = 0;
3648 if (skb->ip_summed == CHECKSUM_HW)
3649 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3650#if TG3_TSO_SUPPORT != 0
3651 mss = 0;
3652 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3653 (mss = skb_shinfo(skb)->tso_size) != 0) {
3654 int tcp_opt_len, ip_tcp_len;
3655
3656 if (skb_header_cloned(skb) &&
3657 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3658 dev_kfree_skb(skb);
3659 goto out_unlock;
3660 }
3661
3662 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3663 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3664
3665 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3666 TXD_FLAG_CPU_POST_DMA);
3667
3668 skb->nh.iph->check = 0;
Alexey Dobriyanfd303332006-01-03 14:19:25 -08003669 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3671 skb->h.th->check = 0;
3672 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3673 }
3674 else {
3675 skb->h.th->check =
3676 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3677 skb->nh.iph->daddr,
3678 0, IPPROTO_TCP, 0);
3679 }
3680
3681 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3682 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3683 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3684 int tsflags;
3685
3686 tsflags = ((skb->nh.iph->ihl - 5) +
3687 (tcp_opt_len >> 2));
3688 mss |= (tsflags << 11);
3689 }
3690 } else {
3691 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3692 int tsflags;
3693
3694 tsflags = ((skb->nh.iph->ihl - 5) +
3695 (tcp_opt_len >> 2));
3696 base_flags |= tsflags << 12;
3697 }
3698 }
3699 }
3700#else
3701 mss = 0;
3702#endif
3703#if TG3_VLAN_TAG_USED
3704 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3705 base_flags |= (TXD_FLAG_VLAN |
3706 (vlan_tx_tag_get(skb) << 16));
3707#endif
3708
3709 /* Queue skb data, a.k.a. the main skb fragment. */
3710 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3711
3712 tp->tx_buffers[entry].skb = skb;
3713 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3714
3715 would_hit_hwbug = 0;
3716
3717 if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07003718 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719
3720 tg3_set_txd(tp, entry, mapping, len, base_flags,
3721 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3722
3723 entry = NEXT_TX(entry);
3724
3725 /* Now loop through additional data fragments, and queue them. */
3726 if (skb_shinfo(skb)->nr_frags > 0) {
3727 unsigned int i, last;
3728
3729 last = skb_shinfo(skb)->nr_frags - 1;
3730 for (i = 0; i <= last; i++) {
3731 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3732
3733 len = frag->size;
3734 mapping = pci_map_page(tp->pdev,
3735 frag->page,
3736 frag->page_offset,
3737 len, PCI_DMA_TODEVICE);
3738
3739 tp->tx_buffers[entry].skb = NULL;
3740 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3741
Michael Chanc58ec932005-09-17 00:46:27 -07003742 if (tg3_4g_overflow_test(mapping, len))
3743 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744
3745 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3746 tg3_set_txd(tp, entry, mapping, len,
3747 base_flags, (i == last)|(mss << 1));
3748 else
3749 tg3_set_txd(tp, entry, mapping, len,
3750 base_flags, (i == last));
3751
3752 entry = NEXT_TX(entry);
3753 }
3754 }
3755
3756 if (would_hit_hwbug) {
3757 u32 last_plus_one = entry;
3758 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759
Michael Chanc58ec932005-09-17 00:46:27 -07003760 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3761 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
3763 /* If the workaround fails due to memory/mapping
3764 * failure, silently drop this packet.
3765 */
Michael Chanc58ec932005-09-17 00:46:27 -07003766 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3767 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 goto out_unlock;
3769
3770 entry = start;
3771 }
3772
3773 /* Packets are ready, update Tx producer idx local and on card. */
3774 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3775
3776 tp->tx_prod = entry;
Michael Chan51b91462005-09-01 17:41:28 -07003777 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778 netif_stop_queue(dev);
Michael Chan51b91462005-09-01 17:41:28 -07003779 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3780 netif_wake_queue(tp->dev);
3781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782
3783out_unlock:
3784 mmiowb();
David S. Millerf47c11e2005-06-24 20:18:35 -07003785 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
3787 dev->trans_start = jiffies;
3788
3789 return NETDEV_TX_OK;
3790}
3791
3792static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3793 int new_mtu)
3794{
3795 dev->mtu = new_mtu;
3796
Michael Chanef7f5ec2005-07-25 12:32:25 -07003797 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07003798 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07003799 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3800 ethtool_op_set_tso(dev, 0);
3801 }
3802 else
3803 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3804 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07003805 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07003806 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07003807 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07003808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809}
3810
3811static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3812{
3813 struct tg3 *tp = netdev_priv(dev);
3814
3815 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3816 return -EINVAL;
3817
3818 if (!netif_running(dev)) {
3819 /* We'll just catch it later when the
3820 * device is up'd.
3821 */
3822 tg3_set_mtu(dev, tp, new_mtu);
3823 return 0;
3824 }
3825
3826 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07003827
3828 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829
Michael Chan944d9802005-05-29 14:57:48 -07003830 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831
3832 tg3_set_mtu(dev, tp, new_mtu);
3833
3834 tg3_init_hw(tp);
3835
3836 tg3_netif_start(tp);
3837
David S. Millerf47c11e2005-06-24 20:18:35 -07003838 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839
3840 return 0;
3841}
3842
3843/* Free up pending packets in all rx/tx rings.
3844 *
3845 * The chip has been shut down and the driver detached from
3846 * the networking, so no interrupts or new tx packets will
3847 * end up in the driver. tp->{tx,}lock is not held and we are not
3848 * in an interrupt context and thus may sleep.
3849 */
3850static void tg3_free_rings(struct tg3 *tp)
3851{
3852 struct ring_info *rxp;
3853 int i;
3854
3855 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3856 rxp = &tp->rx_std_buffers[i];
3857
3858 if (rxp->skb == NULL)
3859 continue;
3860 pci_unmap_single(tp->pdev,
3861 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07003862 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 PCI_DMA_FROMDEVICE);
3864 dev_kfree_skb_any(rxp->skb);
3865 rxp->skb = NULL;
3866 }
3867
3868 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3869 rxp = &tp->rx_jumbo_buffers[i];
3870
3871 if (rxp->skb == NULL)
3872 continue;
3873 pci_unmap_single(tp->pdev,
3874 pci_unmap_addr(rxp, mapping),
3875 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3876 PCI_DMA_FROMDEVICE);
3877 dev_kfree_skb_any(rxp->skb);
3878 rxp->skb = NULL;
3879 }
3880
3881 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3882 struct tx_ring_info *txp;
3883 struct sk_buff *skb;
3884 int j;
3885
3886 txp = &tp->tx_buffers[i];
3887 skb = txp->skb;
3888
3889 if (skb == NULL) {
3890 i++;
3891 continue;
3892 }
3893
3894 pci_unmap_single(tp->pdev,
3895 pci_unmap_addr(txp, mapping),
3896 skb_headlen(skb),
3897 PCI_DMA_TODEVICE);
3898 txp->skb = NULL;
3899
3900 i++;
3901
3902 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3903 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3904 pci_unmap_page(tp->pdev,
3905 pci_unmap_addr(txp, mapping),
3906 skb_shinfo(skb)->frags[j].size,
3907 PCI_DMA_TODEVICE);
3908 i++;
3909 }
3910
3911 dev_kfree_skb_any(skb);
3912 }
3913}
3914
3915/* Initialize tx/rx rings for packet processing.
3916 *
3917 * The chip has been shut down and the driver detached from
3918 * the networking, so no interrupts or new tx packets will
3919 * end up in the driver. tp->{tx,}lock are held and thus
3920 * we may not sleep.
3921 */
3922static void tg3_init_rings(struct tg3 *tp)
3923{
3924 u32 i;
3925
3926 /* Free up all the SKBs. */
3927 tg3_free_rings(tp);
3928
3929 /* Zero out all descriptors. */
3930 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3931 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3932 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3933 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3934
Michael Chan7e72aad2005-07-25 12:31:17 -07003935 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07003936 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07003937 (tp->dev->mtu > ETH_DATA_LEN))
3938 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3939
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 /* Initialize invariants of the rings, we only set this
3941 * stuff once. This works because the card does not
3942 * write into the rx buffer posting rings.
3943 */
3944 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3945 struct tg3_rx_buffer_desc *rxd;
3946
3947 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07003948 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 << RXD_LEN_SHIFT;
3950 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3951 rxd->opaque = (RXD_OPAQUE_RING_STD |
3952 (i << RXD_OPAQUE_INDEX_SHIFT));
3953 }
3954
Michael Chan0f893dc2005-07-25 12:30:38 -07003955 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3957 struct tg3_rx_buffer_desc *rxd;
3958
3959 rxd = &tp->rx_jumbo[i];
3960 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3961 << RXD_LEN_SHIFT;
3962 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3963 RXD_FLAG_JUMBO;
3964 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3965 (i << RXD_OPAQUE_INDEX_SHIFT));
3966 }
3967 }
3968
3969 /* Now allocate fresh SKBs for each rx ring. */
3970 for (i = 0; i < tp->rx_pending; i++) {
3971 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3972 -1, i) < 0)
3973 break;
3974 }
3975
Michael Chan0f893dc2005-07-25 12:30:38 -07003976 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3978 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3979 -1, i) < 0)
3980 break;
3981 }
3982 }
3983}
3984
3985/*
3986 * Must not be invoked with interrupt sources disabled and
3987 * the hardware shutdown down.
3988 */
3989static void tg3_free_consistent(struct tg3 *tp)
3990{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04003991 kfree(tp->rx_std_buffers);
3992 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 if (tp->rx_std) {
3994 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3995 tp->rx_std, tp->rx_std_mapping);
3996 tp->rx_std = NULL;
3997 }
3998 if (tp->rx_jumbo) {
3999 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4000 tp->rx_jumbo, tp->rx_jumbo_mapping);
4001 tp->rx_jumbo = NULL;
4002 }
4003 if (tp->rx_rcb) {
4004 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4005 tp->rx_rcb, tp->rx_rcb_mapping);
4006 tp->rx_rcb = NULL;
4007 }
4008 if (tp->tx_ring) {
4009 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4010 tp->tx_ring, tp->tx_desc_mapping);
4011 tp->tx_ring = NULL;
4012 }
4013 if (tp->hw_status) {
4014 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4015 tp->hw_status, tp->status_mapping);
4016 tp->hw_status = NULL;
4017 }
4018 if (tp->hw_stats) {
4019 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4020 tp->hw_stats, tp->stats_mapping);
4021 tp->hw_stats = NULL;
4022 }
4023}
4024
4025/*
4026 * Must not be invoked with interrupt sources disabled and
4027 * the hardware shutdown down. Can sleep.
4028 */
4029static int tg3_alloc_consistent(struct tg3 *tp)
4030{
4031 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4032 (TG3_RX_RING_SIZE +
4033 TG3_RX_JUMBO_RING_SIZE)) +
4034 (sizeof(struct tx_ring_info) *
4035 TG3_TX_RING_SIZE),
4036 GFP_KERNEL);
4037 if (!tp->rx_std_buffers)
4038 return -ENOMEM;
4039
4040 memset(tp->rx_std_buffers, 0,
4041 (sizeof(struct ring_info) *
4042 (TG3_RX_RING_SIZE +
4043 TG3_RX_JUMBO_RING_SIZE)) +
4044 (sizeof(struct tx_ring_info) *
4045 TG3_TX_RING_SIZE));
4046
4047 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4048 tp->tx_buffers = (struct tx_ring_info *)
4049 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4050
4051 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4052 &tp->rx_std_mapping);
4053 if (!tp->rx_std)
4054 goto err_out;
4055
4056 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4057 &tp->rx_jumbo_mapping);
4058
4059 if (!tp->rx_jumbo)
4060 goto err_out;
4061
4062 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4063 &tp->rx_rcb_mapping);
4064 if (!tp->rx_rcb)
4065 goto err_out;
4066
4067 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4068 &tp->tx_desc_mapping);
4069 if (!tp->tx_ring)
4070 goto err_out;
4071
4072 tp->hw_status = pci_alloc_consistent(tp->pdev,
4073 TG3_HW_STATUS_SIZE,
4074 &tp->status_mapping);
4075 if (!tp->hw_status)
4076 goto err_out;
4077
4078 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4079 sizeof(struct tg3_hw_stats),
4080 &tp->stats_mapping);
4081 if (!tp->hw_stats)
4082 goto err_out;
4083
4084 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4085 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4086
4087 return 0;
4088
4089err_out:
4090 tg3_free_consistent(tp);
4091 return -ENOMEM;
4092}
4093
4094#define MAX_WAIT_CNT 1000
4095
4096/* To stop a block, clear the enable bit and poll till it
4097 * clears. tp->lock is held.
4098 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004099static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100{
4101 unsigned int i;
4102 u32 val;
4103
4104 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4105 switch (ofs) {
4106 case RCVLSC_MODE:
4107 case DMAC_MODE:
4108 case MBFREE_MODE:
4109 case BUFMGR_MODE:
4110 case MEMARB_MODE:
4111 /* We can't enable/disable these bits of the
4112 * 5705/5750, just say success.
4113 */
4114 return 0;
4115
4116 default:
4117 break;
4118 };
4119 }
4120
4121 val = tr32(ofs);
4122 val &= ~enable_bit;
4123 tw32_f(ofs, val);
4124
4125 for (i = 0; i < MAX_WAIT_CNT; i++) {
4126 udelay(100);
4127 val = tr32(ofs);
4128 if ((val & enable_bit) == 0)
4129 break;
4130 }
4131
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004132 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4134 "ofs=%lx enable_bit=%x\n",
4135 ofs, enable_bit);
4136 return -ENODEV;
4137 }
4138
4139 return 0;
4140}
4141
4142/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004143static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144{
4145 int i, err;
4146
4147 tg3_disable_ints(tp);
4148
4149 tp->rx_mode &= ~RX_MODE_ENABLE;
4150 tw32_f(MAC_RX_MODE, tp->rx_mode);
4151 udelay(10);
4152
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004153 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4154 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4155 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4156 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4157 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4158 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004160 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4161 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4162 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4163 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4164 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4165 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4166 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167
4168 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4169 tw32_f(MAC_MODE, tp->mac_mode);
4170 udelay(40);
4171
4172 tp->tx_mode &= ~TX_MODE_ENABLE;
4173 tw32_f(MAC_TX_MODE, tp->tx_mode);
4174
4175 for (i = 0; i < MAX_WAIT_CNT; i++) {
4176 udelay(100);
4177 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4178 break;
4179 }
4180 if (i >= MAX_WAIT_CNT) {
4181 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4182 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4183 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07004184 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 }
4186
Michael Chane6de8ad2005-05-05 14:42:41 -07004187 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004188 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4189 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190
4191 tw32(FTQ_RESET, 0xffffffff);
4192 tw32(FTQ_RESET, 0x00000000);
4193
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004194 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4195 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196
4197 if (tp->hw_status)
4198 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4199 if (tp->hw_stats)
4200 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 return err;
4203}
4204
4205/* tp->lock is held. */
4206static int tg3_nvram_lock(struct tg3 *tp)
4207{
4208 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4209 int i;
4210
Michael Chanec41c7d2006-01-17 02:40:55 -08004211 if (tp->nvram_lock_cnt == 0) {
4212 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4213 for (i = 0; i < 8000; i++) {
4214 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4215 break;
4216 udelay(20);
4217 }
4218 if (i == 8000) {
4219 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4220 return -ENODEV;
4221 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 }
Michael Chanec41c7d2006-01-17 02:40:55 -08004223 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 }
4225 return 0;
4226}
4227
4228/* tp->lock is held. */
4229static void tg3_nvram_unlock(struct tg3 *tp)
4230{
Michael Chanec41c7d2006-01-17 02:40:55 -08004231 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4232 if (tp->nvram_lock_cnt > 0)
4233 tp->nvram_lock_cnt--;
4234 if (tp->nvram_lock_cnt == 0)
4235 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237}
4238
4239/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07004240static void tg3_enable_nvram_access(struct tg3 *tp)
4241{
4242 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4243 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4244 u32 nvaccess = tr32(NVRAM_ACCESS);
4245
4246 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4247 }
4248}
4249
4250/* tp->lock is held. */
4251static void tg3_disable_nvram_access(struct tg3 *tp)
4252{
4253 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4254 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4255 u32 nvaccess = tr32(NVRAM_ACCESS);
4256
4257 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4258 }
4259}
4260
4261/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4263{
4264 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4265 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4266 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4267
4268 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4269 switch (kind) {
4270 case RESET_KIND_INIT:
4271 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4272 DRV_STATE_START);
4273 break;
4274
4275 case RESET_KIND_SHUTDOWN:
4276 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4277 DRV_STATE_UNLOAD);
4278 break;
4279
4280 case RESET_KIND_SUSPEND:
4281 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4282 DRV_STATE_SUSPEND);
4283 break;
4284
4285 default:
4286 break;
4287 };
4288 }
4289}
4290
4291/* tp->lock is held. */
4292static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4293{
4294 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4295 switch (kind) {
4296 case RESET_KIND_INIT:
4297 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4298 DRV_STATE_START_DONE);
4299 break;
4300
4301 case RESET_KIND_SHUTDOWN:
4302 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4303 DRV_STATE_UNLOAD_DONE);
4304 break;
4305
4306 default:
4307 break;
4308 };
4309 }
4310}
4311
4312/* tp->lock is held. */
4313static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4314{
4315 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4316 switch (kind) {
4317 case RESET_KIND_INIT:
4318 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4319 DRV_STATE_START);
4320 break;
4321
4322 case RESET_KIND_SHUTDOWN:
4323 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4324 DRV_STATE_UNLOAD);
4325 break;
4326
4327 case RESET_KIND_SUSPEND:
4328 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4329 DRV_STATE_SUSPEND);
4330 break;
4331
4332 default:
4333 break;
4334 };
4335 }
4336}
4337
4338static void tg3_stop_fw(struct tg3 *);
4339
4340/* tp->lock is held. */
4341static int tg3_chip_reset(struct tg3 *tp)
4342{
4343 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07004344 void (*write_op)(struct tg3 *, u32, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 int i;
4346
Michael Chanec41c7d2006-01-17 02:40:55 -08004347 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 tg3_nvram_lock(tp);
Michael Chanec41c7d2006-01-17 02:40:55 -08004349 /* No matching tg3_nvram_unlock() after this because
4350 * chip reset below will undo the nvram lock.
4351 */
4352 tp->nvram_lock_cnt = 0;
4353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354
4355 /*
4356 * We must avoid the readl() that normally takes place.
4357 * It locks machines, causes machine checks, and other
4358 * fun things. So, temporarily disable the 5701
4359 * hardware workaround, while we do the reset.
4360 */
Michael Chan1ee582d2005-08-09 20:16:46 -07004361 write_op = tp->write32;
4362 if (write_op == tg3_write_flush_reg32)
4363 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364
4365 /* do the reset */
4366 val = GRC_MISC_CFG_CORECLK_RESET;
4367
4368 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4369 if (tr32(0x7e2c) == 0x60) {
4370 tw32(0x7e2c, 0x20);
4371 }
4372 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4373 tw32(GRC_MISC_CFG, (1 << 29));
4374 val |= (1 << 29);
4375 }
4376 }
4377
4378 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4379 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4380 tw32(GRC_MISC_CFG, val);
4381
Michael Chan1ee582d2005-08-09 20:16:46 -07004382 /* restore 5701 hardware bug workaround write method */
4383 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384
4385 /* Unfortunately, we have to delay before the PCI read back.
4386 * Some 575X chips even will not respond to a PCI cfg access
4387 * when the reset command is given to the chip.
4388 *
4389 * How do these hardware designers expect things to work
4390 * properly if the PCI write is posted for a long period
4391 * of time? It is always necessary to have some method by
4392 * which a register read back can occur to push the write
4393 * out which does the reset.
4394 *
4395 * For most tg3 variants the trick below was working.
4396 * Ho hum...
4397 */
4398 udelay(120);
4399
4400 /* Flush PCI posted writes. The normal MMIO registers
4401 * are inaccessible at this time so this is the only
4402 * way to make this reliably (actually, this is no longer
4403 * the case, see above). I tried to use indirect
4404 * register read/write but this upset some 5701 variants.
4405 */
4406 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4407
4408 udelay(120);
4409
4410 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4411 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4412 int i;
4413 u32 cfg_val;
4414
4415 /* Wait for link training to complete. */
4416 for (i = 0; i < 5000; i++)
4417 udelay(100);
4418
4419 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4420 pci_write_config_dword(tp->pdev, 0xc4,
4421 cfg_val | (1 << 15));
4422 }
4423 /* Set PCIE max payload size and clear error status. */
4424 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4425 }
4426
4427 /* Re-enable indirect register accesses. */
4428 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4429 tp->misc_host_ctrl);
4430
4431 /* Set MAX PCI retry to zero. */
4432 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4433 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4434 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4435 val |= PCISTATE_RETRY_SAME_DMA;
4436 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4437
4438 pci_restore_state(tp->pdev);
4439
4440 /* Make sure PCI-X relaxed ordering bit is clear. */
4441 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4442 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4443 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4444
Michael Chana4e2b342005-10-26 15:46:52 -07004445 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chan4cf78e42005-07-25 12:29:19 -07004446 u32 val;
4447
4448 /* Chip reset on 5780 will reset MSI enable bit,
4449 * so need to restore it.
4450 */
4451 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4452 u16 ctrl;
4453
4454 pci_read_config_word(tp->pdev,
4455 tp->msi_cap + PCI_MSI_FLAGS,
4456 &ctrl);
4457 pci_write_config_word(tp->pdev,
4458 tp->msi_cap + PCI_MSI_FLAGS,
4459 ctrl | PCI_MSI_FLAGS_ENABLE);
4460 val = tr32(MSGINT_MODE);
4461 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4462 }
4463
4464 val = tr32(MEMARB_MODE);
4465 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4466
4467 } else
4468 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469
4470 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4471 tg3_stop_fw(tp);
4472 tw32(0x5000, 0x400);
4473 }
4474
4475 tw32(GRC_MODE, tp->grc_mode);
4476
4477 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4478 u32 val = tr32(0xc4);
4479
4480 tw32(0xc4, val | (1 << 15));
4481 }
4482
4483 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4485 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4486 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4487 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4488 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4489 }
4490
4491 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4492 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4493 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07004494 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4495 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4496 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 } else
4498 tw32_f(MAC_MODE, 0);
4499 udelay(40);
4500
4501 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4502 /* Wait for firmware initialization to complete. */
4503 for (i = 0; i < 100000; i++) {
4504 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4505 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4506 break;
4507 udelay(10);
4508 }
4509 if (i >= 100000) {
4510 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4511 "firmware will not restart magic=%08x\n",
4512 tp->dev->name, val);
4513 return -ENODEV;
4514 }
4515 }
4516
4517 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4518 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4519 u32 val = tr32(0x7c00);
4520
4521 tw32(0x7c00, val | (1 << 25));
4522 }
4523
4524 /* Reprobe ASF enable state. */
4525 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4526 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4527 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4528 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4529 u32 nic_cfg;
4530
4531 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4532 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4533 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07004534 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4536 }
4537 }
4538
4539 return 0;
4540}
4541
4542/* tp->lock is held. */
4543static void tg3_stop_fw(struct tg3 *tp)
4544{
4545 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4546 u32 val;
4547 int i;
4548
4549 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4550 val = tr32(GRC_RX_CPU_EVENT);
4551 val |= (1 << 14);
4552 tw32(GRC_RX_CPU_EVENT, val);
4553
4554 /* Wait for RX cpu to ACK the event. */
4555 for (i = 0; i < 100; i++) {
4556 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4557 break;
4558 udelay(1);
4559 }
4560 }
4561}
4562
4563/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07004564static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565{
4566 int err;
4567
4568 tg3_stop_fw(tp);
4569
Michael Chan944d9802005-05-29 14:57:48 -07004570 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004572 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 err = tg3_chip_reset(tp);
4574
Michael Chan944d9802005-05-29 14:57:48 -07004575 tg3_write_sig_legacy(tp, kind);
4576 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577
4578 if (err)
4579 return err;
4580
4581 return 0;
4582}
4583
4584#define TG3_FW_RELEASE_MAJOR 0x0
4585#define TG3_FW_RELASE_MINOR 0x0
4586#define TG3_FW_RELEASE_FIX 0x0
4587#define TG3_FW_START_ADDR 0x08000000
4588#define TG3_FW_TEXT_ADDR 0x08000000
4589#define TG3_FW_TEXT_LEN 0x9c0
4590#define TG3_FW_RODATA_ADDR 0x080009c0
4591#define TG3_FW_RODATA_LEN 0x60
4592#define TG3_FW_DATA_ADDR 0x08000a40
4593#define TG3_FW_DATA_LEN 0x20
4594#define TG3_FW_SBSS_ADDR 0x08000a60
4595#define TG3_FW_SBSS_LEN 0xc
4596#define TG3_FW_BSS_ADDR 0x08000a70
4597#define TG3_FW_BSS_LEN 0x10
4598
4599static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4600 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4601 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4602 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4603 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4604 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4605 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4606 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4607 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4608 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4609 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4610 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4611 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4612 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4613 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4614 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4615 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4616 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4617 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4618 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4619 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4620 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4621 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4622 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4623 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4624 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4625 0, 0, 0, 0, 0, 0,
4626 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4627 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4628 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4629 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4630 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4631 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4632 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4633 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4634 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4635 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4636 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4637 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4638 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4639 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4640 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4641 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4642 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4643 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4644 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4645 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4646 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4647 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4648 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4649 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4650 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4651 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4652 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4653 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4654 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4655 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4656 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4657 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4658 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4659 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4660 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4661 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4662 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4663 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4664 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4665 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4666 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4667 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4668 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4669 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4670 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4671 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4672 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4673 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4674 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4675 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4676 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4677 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4678 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4679 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4680 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4681 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4682 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4683 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4684 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4685 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4686 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4687 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4688 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4689 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4690 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4691};
4692
4693static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4694 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4695 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4696 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4697 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4698 0x00000000
4699};
4700
4701#if 0 /* All zeros, don't eat up space with it. */
4702u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4703 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4704 0x00000000, 0x00000000, 0x00000000, 0x00000000
4705};
4706#endif
4707
4708#define RX_CPU_SCRATCH_BASE 0x30000
4709#define RX_CPU_SCRATCH_SIZE 0x04000
4710#define TX_CPU_SCRATCH_BASE 0x34000
4711#define TX_CPU_SCRATCH_SIZE 0x04000
4712
4713/* tp->lock is held. */
4714static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4715{
4716 int i;
4717
4718 if (offset == TX_CPU_BASE &&
4719 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4720 BUG();
4721
4722 if (offset == RX_CPU_BASE) {
4723 for (i = 0; i < 10000; i++) {
4724 tw32(offset + CPU_STATE, 0xffffffff);
4725 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4726 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4727 break;
4728 }
4729
4730 tw32(offset + CPU_STATE, 0xffffffff);
4731 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4732 udelay(10);
4733 } else {
4734 for (i = 0; i < 10000; i++) {
4735 tw32(offset + CPU_STATE, 0xffffffff);
4736 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4737 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4738 break;
4739 }
4740 }
4741
4742 if (i >= 10000) {
4743 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4744 "and %s CPU\n",
4745 tp->dev->name,
4746 (offset == RX_CPU_BASE ? "RX" : "TX"));
4747 return -ENODEV;
4748 }
Michael Chanec41c7d2006-01-17 02:40:55 -08004749
4750 /* Clear firmware's nvram arbitration. */
4751 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4752 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753 return 0;
4754}
4755
4756struct fw_info {
4757 unsigned int text_base;
4758 unsigned int text_len;
4759 u32 *text_data;
4760 unsigned int rodata_base;
4761 unsigned int rodata_len;
4762 u32 *rodata_data;
4763 unsigned int data_base;
4764 unsigned int data_len;
4765 u32 *data_data;
4766};
4767
4768/* tp->lock is held. */
4769static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4770 int cpu_scratch_size, struct fw_info *info)
4771{
Michael Chanec41c7d2006-01-17 02:40:55 -08004772 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 void (*write_op)(struct tg3 *, u32, u32);
4774
4775 if (cpu_base == TX_CPU_BASE &&
4776 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4777 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4778 "TX cpu firmware on %s which is 5705.\n",
4779 tp->dev->name);
4780 return -EINVAL;
4781 }
4782
4783 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4784 write_op = tg3_write_mem;
4785 else
4786 write_op = tg3_write_indirect_reg32;
4787
Michael Chan1b628152005-05-29 14:59:49 -07004788 /* It is possible that bootcode is still loading at this point.
4789 * Get the nvram lock first before halting the cpu.
4790 */
Michael Chanec41c7d2006-01-17 02:40:55 -08004791 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08004793 if (!lock_err)
4794 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 if (err)
4796 goto out;
4797
4798 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4799 write_op(tp, cpu_scratch_base + i, 0);
4800 tw32(cpu_base + CPU_STATE, 0xffffffff);
4801 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4802 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4803 write_op(tp, (cpu_scratch_base +
4804 (info->text_base & 0xffff) +
4805 (i * sizeof(u32))),
4806 (info->text_data ?
4807 info->text_data[i] : 0));
4808 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4809 write_op(tp, (cpu_scratch_base +
4810 (info->rodata_base & 0xffff) +
4811 (i * sizeof(u32))),
4812 (info->rodata_data ?
4813 info->rodata_data[i] : 0));
4814 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4815 write_op(tp, (cpu_scratch_base +
4816 (info->data_base & 0xffff) +
4817 (i * sizeof(u32))),
4818 (info->data_data ?
4819 info->data_data[i] : 0));
4820
4821 err = 0;
4822
4823out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824 return err;
4825}
4826
4827/* tp->lock is held. */
4828static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4829{
4830 struct fw_info info;
4831 int err, i;
4832
4833 info.text_base = TG3_FW_TEXT_ADDR;
4834 info.text_len = TG3_FW_TEXT_LEN;
4835 info.text_data = &tg3FwText[0];
4836 info.rodata_base = TG3_FW_RODATA_ADDR;
4837 info.rodata_len = TG3_FW_RODATA_LEN;
4838 info.rodata_data = &tg3FwRodata[0];
4839 info.data_base = TG3_FW_DATA_ADDR;
4840 info.data_len = TG3_FW_DATA_LEN;
4841 info.data_data = NULL;
4842
4843 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4844 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4845 &info);
4846 if (err)
4847 return err;
4848
4849 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4850 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4851 &info);
4852 if (err)
4853 return err;
4854
4855 /* Now startup only the RX cpu. */
4856 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4857 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4858
4859 for (i = 0; i < 5; i++) {
4860 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4861 break;
4862 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4863 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4864 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4865 udelay(1000);
4866 }
4867 if (i >= 5) {
4868 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4869 "to set RX CPU PC, is %08x should be %08x\n",
4870 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4871 TG3_FW_TEXT_ADDR);
4872 return -ENODEV;
4873 }
4874 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4875 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4876
4877 return 0;
4878}
4879
4880#if TG3_TSO_SUPPORT != 0
4881
4882#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4883#define TG3_TSO_FW_RELASE_MINOR 0x6
4884#define TG3_TSO_FW_RELEASE_FIX 0x0
4885#define TG3_TSO_FW_START_ADDR 0x08000000
4886#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4887#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4888#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4889#define TG3_TSO_FW_RODATA_LEN 0x60
4890#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4891#define TG3_TSO_FW_DATA_LEN 0x30
4892#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4893#define TG3_TSO_FW_SBSS_LEN 0x2c
4894#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4895#define TG3_TSO_FW_BSS_LEN 0x894
4896
4897static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4898 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4899 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4900 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4901 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4902 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4903 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4904 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4905 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4906 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4907 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4908 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4909 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4910 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4911 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4912 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4913 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4914 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4915 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4916 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4917 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4918 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4919 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4920 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4921 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4922 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4923 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4924 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4925 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4926 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4927 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4928 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4929 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4930 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4931 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4932 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4933 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4934 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4935 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4936 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4937 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4938 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4939 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4940 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4941 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4942 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4943 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4944 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4945 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4946 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4947 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4948 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4949 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4950 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4951 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4952 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4953 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4954 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4955 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4956 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4957 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4958 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4959 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4960 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4961 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4962 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4963 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4964 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4965 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4966 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4967 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4968 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4969 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4970 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4971 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4972 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4973 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4974 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4975 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4976 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4977 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4978 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4979 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4980 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4981 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4982 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4983 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4984 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4985 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4986 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4987 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4988 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4989 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4990 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4991 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4992 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4993 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4994 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4995 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4996 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4997 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4998 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4999 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5000 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5001 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5002 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5003 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5004 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5005 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5006 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5007 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5008 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5009 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5010 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5011 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5012 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5013 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5014 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5015 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5016 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5017 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5018 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5019 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5020 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5021 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5022 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5023 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5024 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5025 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5026 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5027 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5028 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5029 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5030 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5031 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5032 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5033 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5034 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5035 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5036 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5037 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5038 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5039 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5040 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5041 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5042 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5043 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5044 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5045 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5046 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5047 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5048 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5049 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5050 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5051 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5052 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5053 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5054 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5055 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5056 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5057 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5058 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5059 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5060 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5061 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5062 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5063 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5064 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5065 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5066 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5067 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5068 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5069 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5070 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5071 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5072 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5073 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5074 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5075 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5076 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5077 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5078 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5079 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5080 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5081 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5082 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5083 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5084 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5085 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5086 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5087 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5088 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5089 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5090 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5091 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5092 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5093 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5094 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5095 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5096 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5097 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5098 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5099 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5100 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5101 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5102 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5103 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5104 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5105 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5106 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5107 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5108 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5109 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5110 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5111 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5112 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5113 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5114 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5115 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5116 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5117 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5118 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5119 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5120 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5121 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5122 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5123 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5124 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5125 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5126 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5127 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5128 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5129 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5130 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5131 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5132 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5133 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5134 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5135 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5136 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5137 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5138 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5139 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5140 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5141 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5142 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5143 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5144 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5145 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5146 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5147 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5148 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5149 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5150 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5151 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5152 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5153 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5154 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5155 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5156 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5157 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5158 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5159 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5160 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5161 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5162 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5163 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5164 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5165 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5166 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5167 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5168 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5169 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5170 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5171 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5172 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5173 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5174 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5175 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5176 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5177 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5178 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5179 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5180 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5181 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5182};
5183
5184static u32 tg3TsoFwRodata[] = {
5185 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5186 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5187 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5188 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5189 0x00000000,
5190};
5191
5192static u32 tg3TsoFwData[] = {
5193 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5194 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5195 0x00000000,
5196};
5197
5198/* 5705 needs a special version of the TSO firmware. */
5199#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5200#define TG3_TSO5_FW_RELASE_MINOR 0x2
5201#define TG3_TSO5_FW_RELEASE_FIX 0x0
5202#define TG3_TSO5_FW_START_ADDR 0x00010000
5203#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5204#define TG3_TSO5_FW_TEXT_LEN 0xe90
5205#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5206#define TG3_TSO5_FW_RODATA_LEN 0x50
5207#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5208#define TG3_TSO5_FW_DATA_LEN 0x20
5209#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5210#define TG3_TSO5_FW_SBSS_LEN 0x28
5211#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5212#define TG3_TSO5_FW_BSS_LEN 0x88
5213
5214static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5215 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5216 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5217 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5218 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5219 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5220 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5221 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5222 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5223 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5224 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5225 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5226 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5227 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5228 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5229 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5230 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5231 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5232 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5233 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5234 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5235 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5236 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5237 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5238 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5239 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5240 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5241 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5242 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5243 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5244 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5245 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5246 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5247 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5248 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5249 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5250 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5251 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5252 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5253 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5254 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5255 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5256 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5257 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5258 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5259 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5260 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5261 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5262 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5263 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5264 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5265 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5266 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5267 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5268 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5269 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5270 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5271 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5272 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5273 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5274 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5275 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5276 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5277 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5278 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5279 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5280 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5281 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5282 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5283 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5284 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5285 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5286 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5287 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5288 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5289 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5290 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5291 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5292 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5293 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5294 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5295 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5296 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5297 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5298 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5299 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5300 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5301 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5302 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5303 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5304 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5305 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5306 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5307 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5308 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5309 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5310 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5311 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5312 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5313 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5314 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5315 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5316 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5317 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5318 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5319 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5320 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5321 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5322 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5323 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5324 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5325 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5326 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5327 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5328 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5329 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5330 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5331 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5332 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5333 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5334 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5335 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5336 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5337 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5338 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5339 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5340 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5341 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5342 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5343 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5344 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5345 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5346 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5347 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5348 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5349 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5350 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5351 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5352 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5353 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5354 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5355 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5356 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5357 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5358 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5359 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5360 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5361 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5362 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5363 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5364 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5365 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5366 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5367 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5368 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5369 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5370 0x00000000, 0x00000000, 0x00000000,
5371};
5372
5373static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5374 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5375 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5376 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5377 0x00000000, 0x00000000, 0x00000000,
5378};
5379
5380static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5381 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5382 0x00000000, 0x00000000, 0x00000000,
5383};
5384
5385/* tp->lock is held. */
5386static int tg3_load_tso_firmware(struct tg3 *tp)
5387{
5388 struct fw_info info;
5389 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5390 int err, i;
5391
5392 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5393 return 0;
5394
5395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5396 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5397 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5398 info.text_data = &tg3Tso5FwText[0];
5399 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5400 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5401 info.rodata_data = &tg3Tso5FwRodata[0];
5402 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5403 info.data_len = TG3_TSO5_FW_DATA_LEN;
5404 info.data_data = &tg3Tso5FwData[0];
5405 cpu_base = RX_CPU_BASE;
5406 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5407 cpu_scratch_size = (info.text_len +
5408 info.rodata_len +
5409 info.data_len +
5410 TG3_TSO5_FW_SBSS_LEN +
5411 TG3_TSO5_FW_BSS_LEN);
5412 } else {
5413 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5414 info.text_len = TG3_TSO_FW_TEXT_LEN;
5415 info.text_data = &tg3TsoFwText[0];
5416 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5417 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5418 info.rodata_data = &tg3TsoFwRodata[0];
5419 info.data_base = TG3_TSO_FW_DATA_ADDR;
5420 info.data_len = TG3_TSO_FW_DATA_LEN;
5421 info.data_data = &tg3TsoFwData[0];
5422 cpu_base = TX_CPU_BASE;
5423 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5424 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5425 }
5426
5427 err = tg3_load_firmware_cpu(tp, cpu_base,
5428 cpu_scratch_base, cpu_scratch_size,
5429 &info);
5430 if (err)
5431 return err;
5432
5433 /* Now startup the cpu. */
5434 tw32(cpu_base + CPU_STATE, 0xffffffff);
5435 tw32_f(cpu_base + CPU_PC, info.text_base);
5436
5437 for (i = 0; i < 5; i++) {
5438 if (tr32(cpu_base + CPU_PC) == info.text_base)
5439 break;
5440 tw32(cpu_base + CPU_STATE, 0xffffffff);
5441 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5442 tw32_f(cpu_base + CPU_PC, info.text_base);
5443 udelay(1000);
5444 }
5445 if (i >= 5) {
5446 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5447 "to set CPU PC, is %08x should be %08x\n",
5448 tp->dev->name, tr32(cpu_base + CPU_PC),
5449 info.text_base);
5450 return -ENODEV;
5451 }
5452 tw32(cpu_base + CPU_STATE, 0xffffffff);
5453 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5454 return 0;
5455}
5456
5457#endif /* TG3_TSO_SUPPORT != 0 */
5458
5459/* tp->lock is held. */
5460static void __tg3_set_mac_addr(struct tg3 *tp)
5461{
5462 u32 addr_high, addr_low;
5463 int i;
5464
5465 addr_high = ((tp->dev->dev_addr[0] << 8) |
5466 tp->dev->dev_addr[1]);
5467 addr_low = ((tp->dev->dev_addr[2] << 24) |
5468 (tp->dev->dev_addr[3] << 16) |
5469 (tp->dev->dev_addr[4] << 8) |
5470 (tp->dev->dev_addr[5] << 0));
5471 for (i = 0; i < 4; i++) {
5472 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5473 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5474 }
5475
5476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5478 for (i = 0; i < 12; i++) {
5479 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5480 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5481 }
5482 }
5483
5484 addr_high = (tp->dev->dev_addr[0] +
5485 tp->dev->dev_addr[1] +
5486 tp->dev->dev_addr[2] +
5487 tp->dev->dev_addr[3] +
5488 tp->dev->dev_addr[4] +
5489 tp->dev->dev_addr[5]) &
5490 TX_BACKOFF_SEED_MASK;
5491 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5492}
5493
5494static int tg3_set_mac_addr(struct net_device *dev, void *p)
5495{
5496 struct tg3 *tp = netdev_priv(dev);
5497 struct sockaddr *addr = p;
5498
Michael Chanf9804dd2005-09-27 12:13:10 -07005499 if (!is_valid_ether_addr(addr->sa_data))
5500 return -EINVAL;
5501
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5503
David S. Millerf47c11e2005-06-24 20:18:35 -07005504 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505 __tg3_set_mac_addr(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005506 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005507
5508 return 0;
5509}
5510
5511/* tp->lock is held. */
5512static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5513 dma_addr_t mapping, u32 maxlen_flags,
5514 u32 nic_addr)
5515{
5516 tg3_write_mem(tp,
5517 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5518 ((u64) mapping >> 32));
5519 tg3_write_mem(tp,
5520 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5521 ((u64) mapping & 0xffffffff));
5522 tg3_write_mem(tp,
5523 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5524 maxlen_flags);
5525
5526 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5527 tg3_write_mem(tp,
5528 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5529 nic_addr);
5530}
5531
5532static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07005533static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07005534{
5535 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5536 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5537 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5538 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5539 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5540 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5541 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5542 }
5543 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5544 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5545 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5546 u32 val = ec->stats_block_coalesce_usecs;
5547
5548 if (!netif_carrier_ok(tp->dev))
5549 val = 0;
5550
5551 tw32(HOSTCC_STAT_COAL_TICKS, val);
5552 }
5553}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005554
5555/* tp->lock is held. */
5556static int tg3_reset_hw(struct tg3 *tp)
5557{
5558 u32 val, rdmac_mode;
5559 int i, err, limit;
5560
5561 tg3_disable_ints(tp);
5562
5563 tg3_stop_fw(tp);
5564
5565 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5566
5567 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07005568 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569 }
5570
5571 err = tg3_chip_reset(tp);
5572 if (err)
5573 return err;
5574
5575 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5576
5577 /* This works around an issue with Athlon chipsets on
5578 * B3 tigon3 silicon. This bit has no effect on any
5579 * other revision. But do not set this on PCI Express
5580 * chips.
5581 */
5582 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5583 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5584 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5585
5586 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5587 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5588 val = tr32(TG3PCI_PCISTATE);
5589 val |= PCISTATE_RETRY_SAME_DMA;
5590 tw32(TG3PCI_PCISTATE, val);
5591 }
5592
5593 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5594 /* Enable some hw fixes. */
5595 val = tr32(TG3PCI_MSI_DATA);
5596 val |= (1 << 26) | (1 << 28) | (1 << 29);
5597 tw32(TG3PCI_MSI_DATA, val);
5598 }
5599
5600 /* Descriptor ring init may make accesses to the
5601 * NIC SRAM area to setup the TX descriptors, so we
5602 * can only do this after the hardware has been
5603 * successfully reset.
5604 */
5605 tg3_init_rings(tp);
5606
5607 /* This value is determined during the probe time DMA
5608 * engine test, tg3_test_dma.
5609 */
5610 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5611
5612 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5613 GRC_MODE_4X_NIC_SEND_RINGS |
5614 GRC_MODE_NO_TX_PHDR_CSUM |
5615 GRC_MODE_NO_RX_PHDR_CSUM);
5616 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5617 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5618 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5619 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5620 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5621
5622 tw32(GRC_MODE,
5623 tp->grc_mode |
5624 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5625
5626 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5627 val = tr32(GRC_MISC_CFG);
5628 val &= ~0xff;
5629 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5630 tw32(GRC_MISC_CFG, val);
5631
5632 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07005633 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005634 /* Do nothing. */
5635 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5636 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5638 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5639 else
5640 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5641 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5642 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5643 }
5644#if TG3_TSO_SUPPORT != 0
5645 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5646 int fw_len;
5647
5648 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5649 TG3_TSO5_FW_RODATA_LEN +
5650 TG3_TSO5_FW_DATA_LEN +
5651 TG3_TSO5_FW_SBSS_LEN +
5652 TG3_TSO5_FW_BSS_LEN);
5653 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5654 tw32(BUFMGR_MB_POOL_ADDR,
5655 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5656 tw32(BUFMGR_MB_POOL_SIZE,
5657 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5658 }
5659#endif
5660
Michael Chan0f893dc2005-07-25 12:30:38 -07005661 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005662 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5663 tp->bufmgr_config.mbuf_read_dma_low_water);
5664 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5665 tp->bufmgr_config.mbuf_mac_rx_low_water);
5666 tw32(BUFMGR_MB_HIGH_WATER,
5667 tp->bufmgr_config.mbuf_high_water);
5668 } else {
5669 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5670 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5671 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5672 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5673 tw32(BUFMGR_MB_HIGH_WATER,
5674 tp->bufmgr_config.mbuf_high_water_jumbo);
5675 }
5676 tw32(BUFMGR_DMA_LOW_WATER,
5677 tp->bufmgr_config.dma_low_water);
5678 tw32(BUFMGR_DMA_HIGH_WATER,
5679 tp->bufmgr_config.dma_high_water);
5680
5681 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5682 for (i = 0; i < 2000; i++) {
5683 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5684 break;
5685 udelay(10);
5686 }
5687 if (i >= 2000) {
5688 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5689 tp->dev->name);
5690 return -ENODEV;
5691 }
5692
5693 /* Setup replenish threshold. */
5694 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5695
5696 /* Initialize TG3_BDINFO's at:
5697 * RCVDBDI_STD_BD: standard eth size rx ring
5698 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5699 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5700 *
5701 * like so:
5702 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5703 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5704 * ring attribute flags
5705 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5706 *
5707 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5708 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5709 *
5710 * The size of each ring is fixed in the firmware, but the location is
5711 * configurable.
5712 */
5713 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5714 ((u64) tp->rx_std_mapping >> 32));
5715 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5716 ((u64) tp->rx_std_mapping & 0xffffffff));
5717 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5718 NIC_SRAM_RX_BUFFER_DESC);
5719
5720 /* Don't even try to program the JUMBO/MINI buffer descriptor
5721 * configs on 5705.
5722 */
5723 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5724 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5725 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5726 } else {
5727 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5728 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5729
5730 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5731 BDINFO_FLAGS_DISABLED);
5732
5733 /* Setup replenish threshold. */
5734 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5735
Michael Chan0f893dc2005-07-25 12:30:38 -07005736 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005737 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5738 ((u64) tp->rx_jumbo_mapping >> 32));
5739 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5740 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5741 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5742 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5743 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5744 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5745 } else {
5746 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5747 BDINFO_FLAGS_DISABLED);
5748 }
5749
5750 }
5751
5752 /* There is only one send ring on 5705/5750, no need to explicitly
5753 * disable the others.
5754 */
5755 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5756 /* Clear out send RCB ring in SRAM. */
5757 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5758 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5759 BDINFO_FLAGS_DISABLED);
5760 }
5761
5762 tp->tx_prod = 0;
5763 tp->tx_cons = 0;
5764 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5765 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5766
5767 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5768 tp->tx_desc_mapping,
5769 (TG3_TX_RING_SIZE <<
5770 BDINFO_FLAGS_MAXLEN_SHIFT),
5771 NIC_SRAM_TX_BUFFER_DESC);
5772
5773 /* There is only one receive return ring on 5705/5750, no need
5774 * to explicitly disable the others.
5775 */
5776 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5777 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5778 i += TG3_BDINFO_SIZE) {
5779 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5780 BDINFO_FLAGS_DISABLED);
5781 }
5782 }
5783
5784 tp->rx_rcb_ptr = 0;
5785 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5786
5787 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5788 tp->rx_rcb_mapping,
5789 (TG3_RX_RCB_RING_SIZE(tp) <<
5790 BDINFO_FLAGS_MAXLEN_SHIFT),
5791 0);
5792
5793 tp->rx_std_ptr = tp->rx_pending;
5794 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5795 tp->rx_std_ptr);
5796
Michael Chan0f893dc2005-07-25 12:30:38 -07005797 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798 tp->rx_jumbo_pending : 0;
5799 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5800 tp->rx_jumbo_ptr);
5801
5802 /* Initialize MAC address and backoff seed. */
5803 __tg3_set_mac_addr(tp);
5804
5805 /* MTU + ethernet header + FCS + optional VLAN tag */
5806 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5807
5808 /* The slot time is changed by tg3_setup_phy if we
5809 * run at gigabit with half duplex.
5810 */
5811 tw32(MAC_TX_LENGTHS,
5812 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5813 (6 << TX_LENGTHS_IPG_SHIFT) |
5814 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5815
5816 /* Receive rules. */
5817 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5818 tw32(RCVLPC_CONFIG, 0x0181);
5819
5820 /* Calculate RDMAC_MODE setting early, we need it to determine
5821 * the RCVLPC_STATE_ENABLE mask.
5822 */
5823 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5824 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5825 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5826 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5827 RDMAC_MODE_LNGREAD_ENAB);
5828 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5829 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
Michael Chan85e94ce2005-04-21 17:05:28 -07005830
5831 /* If statement applies to 5705 and 5750 PCI devices only */
5832 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5833 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5834 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5836 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5837 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5838 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5839 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5840 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5841 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5842 }
5843 }
5844
Michael Chan85e94ce2005-04-21 17:05:28 -07005845 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5846 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5847
Linus Torvalds1da177e2005-04-16 15:20:36 -07005848#if TG3_TSO_SUPPORT != 0
5849 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5850 rdmac_mode |= (1 << 27);
5851#endif
5852
5853 /* Receive/send statistics. */
5854 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5855 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5856 val = tr32(RCVLPC_STATS_ENABLE);
5857 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5858 tw32(RCVLPC_STATS_ENABLE, val);
5859 } else {
5860 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5861 }
5862 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5863 tw32(SNDDATAI_STATSENAB, 0xffffff);
5864 tw32(SNDDATAI_STATSCTRL,
5865 (SNDDATAI_SCTRL_ENABLE |
5866 SNDDATAI_SCTRL_FASTUPD));
5867
5868 /* Setup host coalescing engine. */
5869 tw32(HOSTCC_MODE, 0);
5870 for (i = 0; i < 2000; i++) {
5871 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5872 break;
5873 udelay(10);
5874 }
5875
Michael Chand244c892005-07-05 14:42:33 -07005876 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005877
5878 /* set status block DMA address */
5879 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5880 ((u64) tp->status_mapping >> 32));
5881 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5882 ((u64) tp->status_mapping & 0xffffffff));
5883
5884 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5885 /* Status/statistics block address. See tg3_timer,
5886 * the tg3_periodic_fetch_stats call there, and
5887 * tg3_get_stats to see how this works for 5705/5750 chips.
5888 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005889 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5890 ((u64) tp->stats_mapping >> 32));
5891 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5892 ((u64) tp->stats_mapping & 0xffffffff));
5893 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5894 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5895 }
5896
5897 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5898
5899 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5900 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5901 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5902 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5903
5904 /* Clear statistics/status block in chip, and status block in ram. */
5905 for (i = NIC_SRAM_STATS_BLK;
5906 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5907 i += sizeof(u32)) {
5908 tg3_write_mem(tp, i, 0);
5909 udelay(40);
5910 }
5911 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5912
Michael Chanc94e3942005-09-27 12:12:42 -07005913 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5914 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5915 /* reset to prevent losing 1st rx packet intermittently */
5916 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5917 udelay(10);
5918 }
5919
Linus Torvalds1da177e2005-04-16 15:20:36 -07005920 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5921 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5922 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5923 udelay(40);
5924
Michael Chan314fba32005-04-21 17:07:04 -07005925 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5926 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5927 * register to preserve the GPIO settings for LOMs. The GPIOs,
5928 * whether used as inputs or outputs, are set by boot code after
5929 * reset.
5930 */
5931 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5932 u32 gpio_mask;
5933
5934 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5935 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07005936
5937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5938 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5939 GRC_LCLCTRL_GPIO_OUTPUT3;
5940
Michael Chan314fba32005-04-21 17:07:04 -07005941 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5942
5943 /* GPIO1 must be driven high for eeprom write protect */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005944 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5945 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07005946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005947 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5948 udelay(100);
5949
Michael Chan09ee9292005-08-09 20:17:00 -07005950 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07005951 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005952
5953 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5954 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5955 udelay(40);
5956 }
5957
5958 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5959 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5960 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5961 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5962 WDMAC_MODE_LNGREAD_ENAB);
5963
Michael Chan85e94ce2005-04-21 17:05:28 -07005964 /* If statement applies to 5705 and 5750 PCI devices only */
5965 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5966 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005968 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5969 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5970 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5971 /* nothing */
5972 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5973 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5974 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5975 val |= WDMAC_MODE_RX_ACCEL;
5976 }
5977 }
5978
5979 tw32_f(WDMAC_MODE, val);
5980 udelay(40);
5981
5982 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5983 val = tr32(TG3PCI_X_CAPS);
5984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5985 val &= ~PCIX_CAPS_BURST_MASK;
5986 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5987 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5988 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5989 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5990 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5991 val |= (tp->split_mode_max_reqs <<
5992 PCIX_CAPS_SPLIT_SHIFT);
5993 }
5994 tw32(TG3PCI_X_CAPS, val);
5995 }
5996
5997 tw32_f(RDMAC_MODE, rdmac_mode);
5998 udelay(40);
5999
6000 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6001 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6002 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6003 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6004 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6005 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6006 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6007 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6008#if TG3_TSO_SUPPORT != 0
6009 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6010 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6011#endif
6012 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6013 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6014
6015 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6016 err = tg3_load_5701_a0_firmware_fix(tp);
6017 if (err)
6018 return err;
6019 }
6020
6021#if TG3_TSO_SUPPORT != 0
6022 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6023 err = tg3_load_tso_firmware(tp);
6024 if (err)
6025 return err;
6026 }
6027#endif
6028
6029 tp->tx_mode = TX_MODE_ENABLE;
6030 tw32_f(MAC_TX_MODE, tp->tx_mode);
6031 udelay(100);
6032
6033 tp->rx_mode = RX_MODE_ENABLE;
6034 tw32_f(MAC_RX_MODE, tp->rx_mode);
6035 udelay(10);
6036
6037 if (tp->link_config.phy_is_low_power) {
6038 tp->link_config.phy_is_low_power = 0;
6039 tp->link_config.speed = tp->link_config.orig_speed;
6040 tp->link_config.duplex = tp->link_config.orig_duplex;
6041 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6042 }
6043
6044 tp->mi_mode = MAC_MI_MODE_BASE;
6045 tw32_f(MAC_MI_MODE, tp->mi_mode);
6046 udelay(80);
6047
6048 tw32(MAC_LED_CTRL, tp->led_ctrl);
6049
6050 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07006051 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006052 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6053 udelay(10);
6054 }
6055 tw32_f(MAC_RX_MODE, tp->rx_mode);
6056 udelay(10);
6057
6058 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6059 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6060 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6061 /* Set drive transmission level to 1.2V */
6062 /* only if the signal pre-emphasis bit is not set */
6063 val = tr32(MAC_SERDES_CFG);
6064 val &= 0xfffff000;
6065 val |= 0x880;
6066 tw32(MAC_SERDES_CFG, val);
6067 }
6068 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6069 tw32(MAC_SERDES_CFG, 0x616000);
6070 }
6071
6072 /* Prevent chip from dropping frames when flow control
6073 * is enabled.
6074 */
6075 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6076
6077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6078 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6079 /* Use hardware link auto-negotiation */
6080 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6081 }
6082
6083 err = tg3_setup_phy(tp, 1);
6084 if (err)
6085 return err;
6086
6087 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6088 u32 tmp;
6089
6090 /* Clear CRC stats. */
6091 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6092 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6093 tg3_readphy(tp, 0x14, &tmp);
6094 }
6095 }
6096
6097 __tg3_set_rx_mode(tp->dev);
6098
6099 /* Initialize receive rules. */
6100 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6101 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6102 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6103 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6104
Michael Chan4cf78e42005-07-25 12:29:19 -07006105 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07006106 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006107 limit = 8;
6108 else
6109 limit = 16;
6110 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6111 limit -= 4;
6112 switch (limit) {
6113 case 16:
6114 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6115 case 15:
6116 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6117 case 14:
6118 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6119 case 13:
6120 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6121 case 12:
6122 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6123 case 11:
6124 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6125 case 10:
6126 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6127 case 9:
6128 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6129 case 8:
6130 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6131 case 7:
6132 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6133 case 6:
6134 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6135 case 5:
6136 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6137 case 4:
6138 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6139 case 3:
6140 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6141 case 2:
6142 case 1:
6143
6144 default:
6145 break;
6146 };
6147
6148 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6149
Linus Torvalds1da177e2005-04-16 15:20:36 -07006150 return 0;
6151}
6152
6153/* Called at device open time to get the chip ready for
6154 * packet processing. Invoked with tp->lock held.
6155 */
6156static int tg3_init_hw(struct tg3 *tp)
6157{
6158 int err;
6159
6160 /* Force the chip into D0. */
6161 err = tg3_set_power_state(tp, 0);
6162 if (err)
6163 goto out;
6164
6165 tg3_switch_clocks(tp);
6166
6167 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6168
6169 err = tg3_reset_hw(tp);
6170
6171out:
6172 return err;
6173}
6174
6175#define TG3_STAT_ADD32(PSTAT, REG) \
6176do { u32 __val = tr32(REG); \
6177 (PSTAT)->low += __val; \
6178 if ((PSTAT)->low < __val) \
6179 (PSTAT)->high += 1; \
6180} while (0)
6181
6182static void tg3_periodic_fetch_stats(struct tg3 *tp)
6183{
6184 struct tg3_hw_stats *sp = tp->hw_stats;
6185
6186 if (!netif_carrier_ok(tp->dev))
6187 return;
6188
6189 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6190 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6191 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6192 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6193 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6194 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6195 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6196 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6197 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6198 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6199 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6200 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6201 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6202
6203 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6204 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6205 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6206 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6207 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6208 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6209 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6210 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6211 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6212 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6213 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6214 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6215 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6216 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6217}
6218
6219static void tg3_timer(unsigned long __opaque)
6220{
6221 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006222
David S. Millerf47c11e2005-06-24 20:18:35 -07006223 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006224
David S. Millerfac9b832005-05-18 22:46:34 -07006225 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6226 /* All of this garbage is because when using non-tagged
6227 * IRQ status the mailbox/status_block protocol the chip
6228 * uses with the cpu is race prone.
6229 */
6230 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6231 tw32(GRC_LOCAL_CTRL,
6232 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6233 } else {
6234 tw32(HOSTCC_MODE, tp->coalesce_mode |
6235 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006237
David S. Millerfac9b832005-05-18 22:46:34 -07006238 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6239 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07006240 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07006241 schedule_work(&tp->reset_task);
6242 return;
6243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006244 }
6245
Linus Torvalds1da177e2005-04-16 15:20:36 -07006246 /* This part only runs once per second. */
6247 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07006248 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6249 tg3_periodic_fetch_stats(tp);
6250
Linus Torvalds1da177e2005-04-16 15:20:36 -07006251 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6252 u32 mac_stat;
6253 int phy_event;
6254
6255 mac_stat = tr32(MAC_STATUS);
6256
6257 phy_event = 0;
6258 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6259 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6260 phy_event = 1;
6261 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6262 phy_event = 1;
6263
6264 if (phy_event)
6265 tg3_setup_phy(tp, 0);
6266 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6267 u32 mac_stat = tr32(MAC_STATUS);
6268 int need_setup = 0;
6269
6270 if (netif_carrier_ok(tp->dev) &&
6271 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6272 need_setup = 1;
6273 }
6274 if (! netif_carrier_ok(tp->dev) &&
6275 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6276 MAC_STATUS_SIGNAL_DET))) {
6277 need_setup = 1;
6278 }
6279 if (need_setup) {
6280 tw32_f(MAC_MODE,
6281 (tp->mac_mode &
6282 ~MAC_MODE_PORT_MODE_MASK));
6283 udelay(40);
6284 tw32_f(MAC_MODE, tp->mac_mode);
6285 udelay(40);
6286 tg3_setup_phy(tp, 0);
6287 }
Michael Chan747e8f82005-07-25 12:33:22 -07006288 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6289 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290
6291 tp->timer_counter = tp->timer_multiplier;
6292 }
6293
Michael Chan28fbef72005-10-26 15:48:35 -07006294 /* Heartbeat is only sent once every 2 seconds. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295 if (!--tp->asf_counter) {
6296 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6297 u32 val;
6298
Michael Chan28fbef72005-10-26 15:48:35 -07006299 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6300 FWCMD_NICDRV_ALIVE2);
6301 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6302 /* 5 seconds timeout */
6303 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006304 val = tr32(GRC_RX_CPU_EVENT);
6305 val |= (1 << 14);
6306 tw32(GRC_RX_CPU_EVENT, val);
6307 }
6308 tp->asf_counter = tp->asf_multiplier;
6309 }
6310
David S. Millerf47c11e2005-06-24 20:18:35 -07006311 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006312
6313 tp->timer.expires = jiffies + tp->timer_offset;
6314 add_timer(&tp->timer);
6315}
6316
Michael Chan79381092005-04-21 17:13:59 -07006317static int tg3_test_interrupt(struct tg3 *tp)
6318{
6319 struct net_device *dev = tp->dev;
6320 int err, i;
6321 u32 int_mbox = 0;
6322
Michael Chand4bc3922005-05-29 14:59:20 -07006323 if (!netif_running(dev))
6324 return -ENODEV;
6325
Michael Chan79381092005-04-21 17:13:59 -07006326 tg3_disable_ints(tp);
6327
6328 free_irq(tp->pdev->irq, dev);
6329
6330 err = request_irq(tp->pdev->irq, tg3_test_isr,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006331 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07006332 if (err)
6333 return err;
6334
Michael Chan38f38432005-09-05 17:53:32 -07006335 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07006336 tg3_enable_ints(tp);
6337
6338 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6339 HOSTCC_MODE_NOW);
6340
6341 for (i = 0; i < 5; i++) {
Michael Chan09ee9292005-08-09 20:17:00 -07006342 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6343 TG3_64BIT_REG_LOW);
Michael Chan79381092005-04-21 17:13:59 -07006344 if (int_mbox != 0)
6345 break;
6346 msleep(10);
6347 }
6348
6349 tg3_disable_ints(tp);
6350
6351 free_irq(tp->pdev->irq, dev);
6352
6353 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6354 err = request_irq(tp->pdev->irq, tg3_msi,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006355 SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006356 else {
6357 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6358 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6359 fn = tg3_interrupt_tagged;
6360 err = request_irq(tp->pdev->irq, fn,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006361 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006362 }
Michael Chan79381092005-04-21 17:13:59 -07006363
6364 if (err)
6365 return err;
6366
6367 if (int_mbox != 0)
6368 return 0;
6369
6370 return -EIO;
6371}
6372
6373/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6374 * successfully restored
6375 */
6376static int tg3_test_msi(struct tg3 *tp)
6377{
6378 struct net_device *dev = tp->dev;
6379 int err;
6380 u16 pci_cmd;
6381
6382 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6383 return 0;
6384
6385 /* Turn off SERR reporting in case MSI terminates with Master
6386 * Abort.
6387 */
6388 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6389 pci_write_config_word(tp->pdev, PCI_COMMAND,
6390 pci_cmd & ~PCI_COMMAND_SERR);
6391
6392 err = tg3_test_interrupt(tp);
6393
6394 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6395
6396 if (!err)
6397 return 0;
6398
6399 /* other failures */
6400 if (err != -EIO)
6401 return err;
6402
6403 /* MSI test failed, go back to INTx mode */
6404 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6405 "switching to INTx mode. Please report this failure to "
6406 "the PCI maintainer and include system chipset information.\n",
6407 tp->dev->name);
6408
6409 free_irq(tp->pdev->irq, dev);
6410 pci_disable_msi(tp->pdev);
6411
6412 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6413
David S. Millerfac9b832005-05-18 22:46:34 -07006414 {
6415 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6416 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6417 fn = tg3_interrupt_tagged;
Michael Chan79381092005-04-21 17:13:59 -07006418
David S. Millerfac9b832005-05-18 22:46:34 -07006419 err = request_irq(tp->pdev->irq, fn,
6420 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6421 }
Michael Chan79381092005-04-21 17:13:59 -07006422 if (err)
6423 return err;
6424
6425 /* Need to reset the chip because the MSI cycle may have terminated
6426 * with Master Abort.
6427 */
David S. Millerf47c11e2005-06-24 20:18:35 -07006428 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07006429
Michael Chan944d9802005-05-29 14:57:48 -07006430 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006431 err = tg3_init_hw(tp);
6432
David S. Millerf47c11e2005-06-24 20:18:35 -07006433 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006434
6435 if (err)
6436 free_irq(tp->pdev->irq, dev);
6437
6438 return err;
6439}
6440
Linus Torvalds1da177e2005-04-16 15:20:36 -07006441static int tg3_open(struct net_device *dev)
6442{
6443 struct tg3 *tp = netdev_priv(dev);
6444 int err;
6445
David S. Millerf47c11e2005-06-24 20:18:35 -07006446 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006447
6448 tg3_disable_ints(tp);
6449 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6450
David S. Millerf47c11e2005-06-24 20:18:35 -07006451 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006452
6453 /* The placement of this call is tied
6454 * to the setup and use of Host TX descriptors.
6455 */
6456 err = tg3_alloc_consistent(tp);
6457 if (err)
6458 return err;
6459
Michael Chan88b06bc22005-04-21 17:13:25 -07006460 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6461 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6462 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
David S. Millerfac9b832005-05-18 22:46:34 -07006463 /* All MSI supporting chips should support tagged
6464 * status. Assert that this is the case.
6465 */
6466 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6467 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6468 "Not using MSI.\n", tp->dev->name);
6469 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc22005-04-21 17:13:25 -07006470 u32 msi_mode;
6471
6472 msi_mode = tr32(MSGINT_MODE);
6473 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6474 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6475 }
6476 }
6477 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6478 err = request_irq(tp->pdev->irq, tg3_msi,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006479 SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006480 else {
6481 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6482 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6483 fn = tg3_interrupt_tagged;
6484
6485 err = request_irq(tp->pdev->irq, fn,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006486 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006488
6489 if (err) {
Michael Chan88b06bc22005-04-21 17:13:25 -07006490 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6491 pci_disable_msi(tp->pdev);
6492 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006494 tg3_free_consistent(tp);
6495 return err;
6496 }
6497
David S. Millerf47c11e2005-06-24 20:18:35 -07006498 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006499
6500 err = tg3_init_hw(tp);
6501 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07006502 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503 tg3_free_rings(tp);
6504 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07006505 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6506 tp->timer_offset = HZ;
6507 else
6508 tp->timer_offset = HZ / 10;
6509
6510 BUG_ON(tp->timer_offset > HZ);
6511 tp->timer_counter = tp->timer_multiplier =
6512 (HZ / tp->timer_offset);
6513 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07006514 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006515
6516 init_timer(&tp->timer);
6517 tp->timer.expires = jiffies + tp->timer_offset;
6518 tp->timer.data = (unsigned long) tp;
6519 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520 }
6521
David S. Millerf47c11e2005-06-24 20:18:35 -07006522 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006523
6524 if (err) {
Michael Chan88b06bc22005-04-21 17:13:25 -07006525 free_irq(tp->pdev->irq, dev);
6526 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6527 pci_disable_msi(tp->pdev);
6528 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530 tg3_free_consistent(tp);
6531 return err;
6532 }
6533
Michael Chan79381092005-04-21 17:13:59 -07006534 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6535 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07006536
Michael Chan79381092005-04-21 17:13:59 -07006537 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07006538 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07006539
6540 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6541 pci_disable_msi(tp->pdev);
6542 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6543 }
Michael Chan944d9802005-05-29 14:57:48 -07006544 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006545 tg3_free_rings(tp);
6546 tg3_free_consistent(tp);
6547
David S. Millerf47c11e2005-06-24 20:18:35 -07006548 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006549
6550 return err;
6551 }
6552 }
6553
David S. Millerf47c11e2005-06-24 20:18:35 -07006554 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006555
Michael Chan79381092005-04-21 17:13:59 -07006556 add_timer(&tp->timer);
6557 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558 tg3_enable_ints(tp);
6559
David S. Millerf47c11e2005-06-24 20:18:35 -07006560 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006561
6562 netif_start_queue(dev);
6563
6564 return 0;
6565}
6566
6567#if 0
6568/*static*/ void tg3_dump_state(struct tg3 *tp)
6569{
6570 u32 val32, val32_2, val32_3, val32_4, val32_5;
6571 u16 val16;
6572 int i;
6573
6574 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6575 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6576 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6577 val16, val32);
6578
6579 /* MAC block */
6580 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6581 tr32(MAC_MODE), tr32(MAC_STATUS));
6582 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6583 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6584 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6585 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6586 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6587 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6588
6589 /* Send data initiator control block */
6590 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6591 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6592 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6593 tr32(SNDDATAI_STATSCTRL));
6594
6595 /* Send data completion control block */
6596 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6597
6598 /* Send BD ring selector block */
6599 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6600 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6601
6602 /* Send BD initiator control block */
6603 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6604 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6605
6606 /* Send BD completion control block */
6607 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6608
6609 /* Receive list placement control block */
6610 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6611 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6612 printk(" RCVLPC_STATSCTRL[%08x]\n",
6613 tr32(RCVLPC_STATSCTRL));
6614
6615 /* Receive data and receive BD initiator control block */
6616 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6617 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6618
6619 /* Receive data completion control block */
6620 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6621 tr32(RCVDCC_MODE));
6622
6623 /* Receive BD initiator control block */
6624 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6625 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6626
6627 /* Receive BD completion control block */
6628 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6629 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6630
6631 /* Receive list selector control block */
6632 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6633 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6634
6635 /* Mbuf cluster free block */
6636 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6637 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6638
6639 /* Host coalescing control block */
6640 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6641 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6642 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6643 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6644 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6645 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6646 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6647 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6648 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6649 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6650 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6651 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6652
6653 /* Memory arbiter control block */
6654 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6655 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6656
6657 /* Buffer manager control block */
6658 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6659 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6660 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6661 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6662 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6663 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6664 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6665 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6666
6667 /* Read DMA control block */
6668 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6669 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6670
6671 /* Write DMA control block */
6672 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6673 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6674
6675 /* DMA completion block */
6676 printk("DEBUG: DMAC_MODE[%08x]\n",
6677 tr32(DMAC_MODE));
6678
6679 /* GRC block */
6680 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6681 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6682 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6683 tr32(GRC_LOCAL_CTRL));
6684
6685 /* TG3_BDINFOs */
6686 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6687 tr32(RCVDBDI_JUMBO_BD + 0x0),
6688 tr32(RCVDBDI_JUMBO_BD + 0x4),
6689 tr32(RCVDBDI_JUMBO_BD + 0x8),
6690 tr32(RCVDBDI_JUMBO_BD + 0xc));
6691 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6692 tr32(RCVDBDI_STD_BD + 0x0),
6693 tr32(RCVDBDI_STD_BD + 0x4),
6694 tr32(RCVDBDI_STD_BD + 0x8),
6695 tr32(RCVDBDI_STD_BD + 0xc));
6696 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6697 tr32(RCVDBDI_MINI_BD + 0x0),
6698 tr32(RCVDBDI_MINI_BD + 0x4),
6699 tr32(RCVDBDI_MINI_BD + 0x8),
6700 tr32(RCVDBDI_MINI_BD + 0xc));
6701
6702 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6703 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6704 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6705 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6706 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6707 val32, val32_2, val32_3, val32_4);
6708
6709 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6710 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6711 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6712 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6713 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6714 val32, val32_2, val32_3, val32_4);
6715
6716 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6717 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6718 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6719 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6720 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6721 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6722 val32, val32_2, val32_3, val32_4, val32_5);
6723
6724 /* SW status block */
6725 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6726 tp->hw_status->status,
6727 tp->hw_status->status_tag,
6728 tp->hw_status->rx_jumbo_consumer,
6729 tp->hw_status->rx_consumer,
6730 tp->hw_status->rx_mini_consumer,
6731 tp->hw_status->idx[0].rx_producer,
6732 tp->hw_status->idx[0].tx_consumer);
6733
6734 /* SW statistics block */
6735 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6736 ((u32 *)tp->hw_stats)[0],
6737 ((u32 *)tp->hw_stats)[1],
6738 ((u32 *)tp->hw_stats)[2],
6739 ((u32 *)tp->hw_stats)[3]);
6740
6741 /* Mailboxes */
6742 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07006743 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6744 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6745 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6746 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006747
6748 /* NIC side send descriptors. */
6749 for (i = 0; i < 6; i++) {
6750 unsigned long txd;
6751
6752 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6753 + (i * sizeof(struct tg3_tx_buffer_desc));
6754 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6755 i,
6756 readl(txd + 0x0), readl(txd + 0x4),
6757 readl(txd + 0x8), readl(txd + 0xc));
6758 }
6759
6760 /* NIC side RX descriptors. */
6761 for (i = 0; i < 6; i++) {
6762 unsigned long rxd;
6763
6764 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6765 + (i * sizeof(struct tg3_rx_buffer_desc));
6766 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6767 i,
6768 readl(rxd + 0x0), readl(rxd + 0x4),
6769 readl(rxd + 0x8), readl(rxd + 0xc));
6770 rxd += (4 * sizeof(u32));
6771 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6772 i,
6773 readl(rxd + 0x0), readl(rxd + 0x4),
6774 readl(rxd + 0x8), readl(rxd + 0xc));
6775 }
6776
6777 for (i = 0; i < 6; i++) {
6778 unsigned long rxd;
6779
6780 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6781 + (i * sizeof(struct tg3_rx_buffer_desc));
6782 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6783 i,
6784 readl(rxd + 0x0), readl(rxd + 0x4),
6785 readl(rxd + 0x8), readl(rxd + 0xc));
6786 rxd += (4 * sizeof(u32));
6787 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6788 i,
6789 readl(rxd + 0x0), readl(rxd + 0x4),
6790 readl(rxd + 0x8), readl(rxd + 0xc));
6791 }
6792}
6793#endif
6794
6795static struct net_device_stats *tg3_get_stats(struct net_device *);
6796static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6797
6798static int tg3_close(struct net_device *dev)
6799{
6800 struct tg3 *tp = netdev_priv(dev);
6801
Michael Chan7faa0062006-02-02 17:29:28 -08006802 /* Calling flush_scheduled_work() may deadlock because
6803 * linkwatch_event() may be on the workqueue and it will try to get
6804 * the rtnl_lock which we are holding.
6805 */
6806 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6807 msleep(1);
6808
Linus Torvalds1da177e2005-04-16 15:20:36 -07006809 netif_stop_queue(dev);
6810
6811 del_timer_sync(&tp->timer);
6812
David S. Millerf47c11e2005-06-24 20:18:35 -07006813 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006814#if 0
6815 tg3_dump_state(tp);
6816#endif
6817
6818 tg3_disable_ints(tp);
6819
Michael Chan944d9802005-05-29 14:57:48 -07006820 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006821 tg3_free_rings(tp);
6822 tp->tg3_flags &=
6823 ~(TG3_FLAG_INIT_COMPLETE |
6824 TG3_FLAG_GOT_SERDES_FLOWCTL);
6825 netif_carrier_off(tp->dev);
6826
David S. Millerf47c11e2005-06-24 20:18:35 -07006827 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006828
Michael Chan88b06bc22005-04-21 17:13:25 -07006829 free_irq(tp->pdev->irq, dev);
6830 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6831 pci_disable_msi(tp->pdev);
6832 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006834
6835 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6836 sizeof(tp->net_stats_prev));
6837 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6838 sizeof(tp->estats_prev));
6839
6840 tg3_free_consistent(tp);
6841
6842 return 0;
6843}
6844
6845static inline unsigned long get_stat64(tg3_stat64_t *val)
6846{
6847 unsigned long ret;
6848
6849#if (BITS_PER_LONG == 32)
6850 ret = val->low;
6851#else
6852 ret = ((u64)val->high << 32) | ((u64)val->low);
6853#endif
6854 return ret;
6855}
6856
6857static unsigned long calc_crc_errors(struct tg3 *tp)
6858{
6859 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6860
6861 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6862 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006864 u32 val;
6865
David S. Millerf47c11e2005-06-24 20:18:35 -07006866 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006867 if (!tg3_readphy(tp, 0x1e, &val)) {
6868 tg3_writephy(tp, 0x1e, val | 0x8000);
6869 tg3_readphy(tp, 0x14, &val);
6870 } else
6871 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07006872 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006873
6874 tp->phy_crc_errors += val;
6875
6876 return tp->phy_crc_errors;
6877 }
6878
6879 return get_stat64(&hw_stats->rx_fcs_errors);
6880}
6881
6882#define ESTAT_ADD(member) \
6883 estats->member = old_estats->member + \
6884 get_stat64(&hw_stats->member)
6885
6886static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6887{
6888 struct tg3_ethtool_stats *estats = &tp->estats;
6889 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6890 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6891
6892 if (!hw_stats)
6893 return old_estats;
6894
6895 ESTAT_ADD(rx_octets);
6896 ESTAT_ADD(rx_fragments);
6897 ESTAT_ADD(rx_ucast_packets);
6898 ESTAT_ADD(rx_mcast_packets);
6899 ESTAT_ADD(rx_bcast_packets);
6900 ESTAT_ADD(rx_fcs_errors);
6901 ESTAT_ADD(rx_align_errors);
6902 ESTAT_ADD(rx_xon_pause_rcvd);
6903 ESTAT_ADD(rx_xoff_pause_rcvd);
6904 ESTAT_ADD(rx_mac_ctrl_rcvd);
6905 ESTAT_ADD(rx_xoff_entered);
6906 ESTAT_ADD(rx_frame_too_long_errors);
6907 ESTAT_ADD(rx_jabbers);
6908 ESTAT_ADD(rx_undersize_packets);
6909 ESTAT_ADD(rx_in_length_errors);
6910 ESTAT_ADD(rx_out_length_errors);
6911 ESTAT_ADD(rx_64_or_less_octet_packets);
6912 ESTAT_ADD(rx_65_to_127_octet_packets);
6913 ESTAT_ADD(rx_128_to_255_octet_packets);
6914 ESTAT_ADD(rx_256_to_511_octet_packets);
6915 ESTAT_ADD(rx_512_to_1023_octet_packets);
6916 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6917 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6918 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6919 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6920 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6921
6922 ESTAT_ADD(tx_octets);
6923 ESTAT_ADD(tx_collisions);
6924 ESTAT_ADD(tx_xon_sent);
6925 ESTAT_ADD(tx_xoff_sent);
6926 ESTAT_ADD(tx_flow_control);
6927 ESTAT_ADD(tx_mac_errors);
6928 ESTAT_ADD(tx_single_collisions);
6929 ESTAT_ADD(tx_mult_collisions);
6930 ESTAT_ADD(tx_deferred);
6931 ESTAT_ADD(tx_excessive_collisions);
6932 ESTAT_ADD(tx_late_collisions);
6933 ESTAT_ADD(tx_collide_2times);
6934 ESTAT_ADD(tx_collide_3times);
6935 ESTAT_ADD(tx_collide_4times);
6936 ESTAT_ADD(tx_collide_5times);
6937 ESTAT_ADD(tx_collide_6times);
6938 ESTAT_ADD(tx_collide_7times);
6939 ESTAT_ADD(tx_collide_8times);
6940 ESTAT_ADD(tx_collide_9times);
6941 ESTAT_ADD(tx_collide_10times);
6942 ESTAT_ADD(tx_collide_11times);
6943 ESTAT_ADD(tx_collide_12times);
6944 ESTAT_ADD(tx_collide_13times);
6945 ESTAT_ADD(tx_collide_14times);
6946 ESTAT_ADD(tx_collide_15times);
6947 ESTAT_ADD(tx_ucast_packets);
6948 ESTAT_ADD(tx_mcast_packets);
6949 ESTAT_ADD(tx_bcast_packets);
6950 ESTAT_ADD(tx_carrier_sense_errors);
6951 ESTAT_ADD(tx_discards);
6952 ESTAT_ADD(tx_errors);
6953
6954 ESTAT_ADD(dma_writeq_full);
6955 ESTAT_ADD(dma_write_prioq_full);
6956 ESTAT_ADD(rxbds_empty);
6957 ESTAT_ADD(rx_discards);
6958 ESTAT_ADD(rx_errors);
6959 ESTAT_ADD(rx_threshold_hit);
6960
6961 ESTAT_ADD(dma_readq_full);
6962 ESTAT_ADD(dma_read_prioq_full);
6963 ESTAT_ADD(tx_comp_queue_full);
6964
6965 ESTAT_ADD(ring_set_send_prod_index);
6966 ESTAT_ADD(ring_status_update);
6967 ESTAT_ADD(nic_irqs);
6968 ESTAT_ADD(nic_avoided_irqs);
6969 ESTAT_ADD(nic_tx_threshold_hit);
6970
6971 return estats;
6972}
6973
6974static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6975{
6976 struct tg3 *tp = netdev_priv(dev);
6977 struct net_device_stats *stats = &tp->net_stats;
6978 struct net_device_stats *old_stats = &tp->net_stats_prev;
6979 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6980
6981 if (!hw_stats)
6982 return old_stats;
6983
6984 stats->rx_packets = old_stats->rx_packets +
6985 get_stat64(&hw_stats->rx_ucast_packets) +
6986 get_stat64(&hw_stats->rx_mcast_packets) +
6987 get_stat64(&hw_stats->rx_bcast_packets);
6988
6989 stats->tx_packets = old_stats->tx_packets +
6990 get_stat64(&hw_stats->tx_ucast_packets) +
6991 get_stat64(&hw_stats->tx_mcast_packets) +
6992 get_stat64(&hw_stats->tx_bcast_packets);
6993
6994 stats->rx_bytes = old_stats->rx_bytes +
6995 get_stat64(&hw_stats->rx_octets);
6996 stats->tx_bytes = old_stats->tx_bytes +
6997 get_stat64(&hw_stats->tx_octets);
6998
6999 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07007000 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007001 stats->tx_errors = old_stats->tx_errors +
7002 get_stat64(&hw_stats->tx_errors) +
7003 get_stat64(&hw_stats->tx_mac_errors) +
7004 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7005 get_stat64(&hw_stats->tx_discards);
7006
7007 stats->multicast = old_stats->multicast +
7008 get_stat64(&hw_stats->rx_mcast_packets);
7009 stats->collisions = old_stats->collisions +
7010 get_stat64(&hw_stats->tx_collisions);
7011
7012 stats->rx_length_errors = old_stats->rx_length_errors +
7013 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7014 get_stat64(&hw_stats->rx_undersize_packets);
7015
7016 stats->rx_over_errors = old_stats->rx_over_errors +
7017 get_stat64(&hw_stats->rxbds_empty);
7018 stats->rx_frame_errors = old_stats->rx_frame_errors +
7019 get_stat64(&hw_stats->rx_align_errors);
7020 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7021 get_stat64(&hw_stats->tx_discards);
7022 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7023 get_stat64(&hw_stats->tx_carrier_sense_errors);
7024
7025 stats->rx_crc_errors = old_stats->rx_crc_errors +
7026 calc_crc_errors(tp);
7027
John W. Linville4f63b872005-09-12 14:43:18 -07007028 stats->rx_missed_errors = old_stats->rx_missed_errors +
7029 get_stat64(&hw_stats->rx_discards);
7030
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031 return stats;
7032}
7033
7034static inline u32 calc_crc(unsigned char *buf, int len)
7035{
7036 u32 reg;
7037 u32 tmp;
7038 int j, k;
7039
7040 reg = 0xffffffff;
7041
7042 for (j = 0; j < len; j++) {
7043 reg ^= buf[j];
7044
7045 for (k = 0; k < 8; k++) {
7046 tmp = reg & 0x01;
7047
7048 reg >>= 1;
7049
7050 if (tmp) {
7051 reg ^= 0xedb88320;
7052 }
7053 }
7054 }
7055
7056 return ~reg;
7057}
7058
7059static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7060{
7061 /* accept or reject all multicast frames */
7062 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7063 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7064 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7065 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7066}
7067
7068static void __tg3_set_rx_mode(struct net_device *dev)
7069{
7070 struct tg3 *tp = netdev_priv(dev);
7071 u32 rx_mode;
7072
7073 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7074 RX_MODE_KEEP_VLAN_TAG);
7075
7076 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7077 * flag clear.
7078 */
7079#if TG3_VLAN_TAG_USED
7080 if (!tp->vlgrp &&
7081 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7082 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7083#else
7084 /* By definition, VLAN is disabled always in this
7085 * case.
7086 */
7087 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7088 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7089#endif
7090
7091 if (dev->flags & IFF_PROMISC) {
7092 /* Promiscuous mode. */
7093 rx_mode |= RX_MODE_PROMISC;
7094 } else if (dev->flags & IFF_ALLMULTI) {
7095 /* Accept all multicast. */
7096 tg3_set_multi (tp, 1);
7097 } else if (dev->mc_count < 1) {
7098 /* Reject all multicast. */
7099 tg3_set_multi (tp, 0);
7100 } else {
7101 /* Accept one or more multicast(s). */
7102 struct dev_mc_list *mclist;
7103 unsigned int i;
7104 u32 mc_filter[4] = { 0, };
7105 u32 regidx;
7106 u32 bit;
7107 u32 crc;
7108
7109 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7110 i++, mclist = mclist->next) {
7111
7112 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7113 bit = ~crc & 0x7f;
7114 regidx = (bit & 0x60) >> 5;
7115 bit &= 0x1f;
7116 mc_filter[regidx] |= (1 << bit);
7117 }
7118
7119 tw32(MAC_HASH_REG_0, mc_filter[0]);
7120 tw32(MAC_HASH_REG_1, mc_filter[1]);
7121 tw32(MAC_HASH_REG_2, mc_filter[2]);
7122 tw32(MAC_HASH_REG_3, mc_filter[3]);
7123 }
7124
7125 if (rx_mode != tp->rx_mode) {
7126 tp->rx_mode = rx_mode;
7127 tw32_f(MAC_RX_MODE, rx_mode);
7128 udelay(10);
7129 }
7130}
7131
7132static void tg3_set_rx_mode(struct net_device *dev)
7133{
7134 struct tg3 *tp = netdev_priv(dev);
7135
David S. Millerf47c11e2005-06-24 20:18:35 -07007136 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007137 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07007138 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007139}
7140
7141#define TG3_REGDUMP_LEN (32 * 1024)
7142
7143static int tg3_get_regs_len(struct net_device *dev)
7144{
7145 return TG3_REGDUMP_LEN;
7146}
7147
7148static void tg3_get_regs(struct net_device *dev,
7149 struct ethtool_regs *regs, void *_p)
7150{
7151 u32 *p = _p;
7152 struct tg3 *tp = netdev_priv(dev);
7153 u8 *orig_p = _p;
7154 int i;
7155
7156 regs->version = 0;
7157
7158 memset(p, 0, TG3_REGDUMP_LEN);
7159
David S. Millerf47c11e2005-06-24 20:18:35 -07007160 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007161
7162#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7163#define GET_REG32_LOOP(base,len) \
7164do { p = (u32 *)(orig_p + (base)); \
7165 for (i = 0; i < len; i += 4) \
7166 __GET_REG32((base) + i); \
7167} while (0)
7168#define GET_REG32_1(reg) \
7169do { p = (u32 *)(orig_p + (reg)); \
7170 __GET_REG32((reg)); \
7171} while (0)
7172
7173 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7174 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7175 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7176 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7177 GET_REG32_1(SNDDATAC_MODE);
7178 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7179 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7180 GET_REG32_1(SNDBDC_MODE);
7181 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7182 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7183 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7184 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7185 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7186 GET_REG32_1(RCVDCC_MODE);
7187 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7188 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7189 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7190 GET_REG32_1(MBFREE_MODE);
7191 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7192 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7193 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7194 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7195 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08007196 GET_REG32_1(RX_CPU_MODE);
7197 GET_REG32_1(RX_CPU_STATE);
7198 GET_REG32_1(RX_CPU_PGMCTR);
7199 GET_REG32_1(RX_CPU_HWBKPT);
7200 GET_REG32_1(TX_CPU_MODE);
7201 GET_REG32_1(TX_CPU_STATE);
7202 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007203 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7204 GET_REG32_LOOP(FTQ_RESET, 0x120);
7205 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7206 GET_REG32_1(DMAC_MODE);
7207 GET_REG32_LOOP(GRC_MODE, 0x4c);
7208 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7209 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7210
7211#undef __GET_REG32
7212#undef GET_REG32_LOOP
7213#undef GET_REG32_1
7214
David S. Millerf47c11e2005-06-24 20:18:35 -07007215 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007216}
7217
7218static int tg3_get_eeprom_len(struct net_device *dev)
7219{
7220 struct tg3 *tp = netdev_priv(dev);
7221
7222 return tp->nvram_size;
7223}
7224
7225static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7226
7227static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7228{
7229 struct tg3 *tp = netdev_priv(dev);
7230 int ret;
7231 u8 *pd;
7232 u32 i, offset, len, val, b_offset, b_count;
7233
7234 offset = eeprom->offset;
7235 len = eeprom->len;
7236 eeprom->len = 0;
7237
7238 eeprom->magic = TG3_EEPROM_MAGIC;
7239
7240 if (offset & 3) {
7241 /* adjustments to start on required 4 byte boundary */
7242 b_offset = offset & 3;
7243 b_count = 4 - b_offset;
7244 if (b_count > len) {
7245 /* i.e. offset=1 len=2 */
7246 b_count = len;
7247 }
7248 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7249 if (ret)
7250 return ret;
7251 val = cpu_to_le32(val);
7252 memcpy(data, ((char*)&val) + b_offset, b_count);
7253 len -= b_count;
7254 offset += b_count;
7255 eeprom->len += b_count;
7256 }
7257
7258 /* read bytes upto the last 4 byte boundary */
7259 pd = &data[eeprom->len];
7260 for (i = 0; i < (len - (len & 3)); i += 4) {
7261 ret = tg3_nvram_read(tp, offset + i, &val);
7262 if (ret) {
7263 eeprom->len += i;
7264 return ret;
7265 }
7266 val = cpu_to_le32(val);
7267 memcpy(pd + i, &val, 4);
7268 }
7269 eeprom->len += i;
7270
7271 if (len & 3) {
7272 /* read last bytes not ending on 4 byte boundary */
7273 pd = &data[eeprom->len];
7274 b_count = len & 3;
7275 b_offset = offset + len - b_count;
7276 ret = tg3_nvram_read(tp, b_offset, &val);
7277 if (ret)
7278 return ret;
7279 val = cpu_to_le32(val);
7280 memcpy(pd, ((char*)&val), b_count);
7281 eeprom->len += b_count;
7282 }
7283 return 0;
7284}
7285
7286static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7287
7288static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7289{
7290 struct tg3 *tp = netdev_priv(dev);
7291 int ret;
7292 u32 offset, len, b_offset, odd_len, start, end;
7293 u8 *buf;
7294
7295 if (eeprom->magic != TG3_EEPROM_MAGIC)
7296 return -EINVAL;
7297
7298 offset = eeprom->offset;
7299 len = eeprom->len;
7300
7301 if ((b_offset = (offset & 3))) {
7302 /* adjustments to start on required 4 byte boundary */
7303 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7304 if (ret)
7305 return ret;
7306 start = cpu_to_le32(start);
7307 len += b_offset;
7308 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07007309 if (len < 4)
7310 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007311 }
7312
7313 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07007314 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007315 /* adjustments to end on required 4 byte boundary */
7316 odd_len = 1;
7317 len = (len + 3) & ~3;
7318 ret = tg3_nvram_read(tp, offset+len-4, &end);
7319 if (ret)
7320 return ret;
7321 end = cpu_to_le32(end);
7322 }
7323
7324 buf = data;
7325 if (b_offset || odd_len) {
7326 buf = kmalloc(len, GFP_KERNEL);
7327 if (buf == 0)
7328 return -ENOMEM;
7329 if (b_offset)
7330 memcpy(buf, &start, 4);
7331 if (odd_len)
7332 memcpy(buf+len-4, &end, 4);
7333 memcpy(buf + b_offset, data, eeprom->len);
7334 }
7335
7336 ret = tg3_nvram_write_block(tp, offset, len, buf);
7337
7338 if (buf != data)
7339 kfree(buf);
7340
7341 return ret;
7342}
7343
7344static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7345{
7346 struct tg3 *tp = netdev_priv(dev);
7347
7348 cmd->supported = (SUPPORTED_Autoneg);
7349
7350 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7351 cmd->supported |= (SUPPORTED_1000baseT_Half |
7352 SUPPORTED_1000baseT_Full);
7353
Michael Chana4e2b342005-10-26 15:46:52 -07007354 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007355 cmd->supported |= (SUPPORTED_100baseT_Half |
7356 SUPPORTED_100baseT_Full |
7357 SUPPORTED_10baseT_Half |
7358 SUPPORTED_10baseT_Full |
7359 SUPPORTED_MII);
7360 else
7361 cmd->supported |= SUPPORTED_FIBRE;
7362
7363 cmd->advertising = tp->link_config.advertising;
7364 if (netif_running(dev)) {
7365 cmd->speed = tp->link_config.active_speed;
7366 cmd->duplex = tp->link_config.active_duplex;
7367 }
7368 cmd->port = 0;
7369 cmd->phy_address = PHY_ADDR;
7370 cmd->transceiver = 0;
7371 cmd->autoneg = tp->link_config.autoneg;
7372 cmd->maxtxpkt = 0;
7373 cmd->maxrxpkt = 0;
7374 return 0;
7375}
7376
7377static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7378{
7379 struct tg3 *tp = netdev_priv(dev);
7380
Michael Chan37ff2382005-10-26 15:49:51 -07007381 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007382 /* These are the only valid advertisement bits allowed. */
7383 if (cmd->autoneg == AUTONEG_ENABLE &&
7384 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7385 ADVERTISED_1000baseT_Full |
7386 ADVERTISED_Autoneg |
7387 ADVERTISED_FIBRE)))
7388 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07007389 /* Fiber can only do SPEED_1000. */
7390 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7391 (cmd->speed != SPEED_1000))
7392 return -EINVAL;
7393 /* Copper cannot force SPEED_1000. */
7394 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7395 (cmd->speed == SPEED_1000))
7396 return -EINVAL;
7397 else if ((cmd->speed == SPEED_1000) &&
7398 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7399 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007400
David S. Millerf47c11e2005-06-24 20:18:35 -07007401 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402
7403 tp->link_config.autoneg = cmd->autoneg;
7404 if (cmd->autoneg == AUTONEG_ENABLE) {
7405 tp->link_config.advertising = cmd->advertising;
7406 tp->link_config.speed = SPEED_INVALID;
7407 tp->link_config.duplex = DUPLEX_INVALID;
7408 } else {
7409 tp->link_config.advertising = 0;
7410 tp->link_config.speed = cmd->speed;
7411 tp->link_config.duplex = cmd->duplex;
7412 }
7413
7414 if (netif_running(dev))
7415 tg3_setup_phy(tp, 1);
7416
David S. Millerf47c11e2005-06-24 20:18:35 -07007417 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007418
7419 return 0;
7420}
7421
7422static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7423{
7424 struct tg3 *tp = netdev_priv(dev);
7425
7426 strcpy(info->driver, DRV_MODULE_NAME);
7427 strcpy(info->version, DRV_MODULE_VERSION);
7428 strcpy(info->bus_info, pci_name(tp->pdev));
7429}
7430
7431static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7432{
7433 struct tg3 *tp = netdev_priv(dev);
7434
7435 wol->supported = WAKE_MAGIC;
7436 wol->wolopts = 0;
7437 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7438 wol->wolopts = WAKE_MAGIC;
7439 memset(&wol->sopass, 0, sizeof(wol->sopass));
7440}
7441
7442static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7443{
7444 struct tg3 *tp = netdev_priv(dev);
7445
7446 if (wol->wolopts & ~WAKE_MAGIC)
7447 return -EINVAL;
7448 if ((wol->wolopts & WAKE_MAGIC) &&
7449 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7450 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7451 return -EINVAL;
7452
David S. Millerf47c11e2005-06-24 20:18:35 -07007453 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454 if (wol->wolopts & WAKE_MAGIC)
7455 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7456 else
7457 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
David S. Millerf47c11e2005-06-24 20:18:35 -07007458 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459
7460 return 0;
7461}
7462
7463static u32 tg3_get_msglevel(struct net_device *dev)
7464{
7465 struct tg3 *tp = netdev_priv(dev);
7466 return tp->msg_enable;
7467}
7468
7469static void tg3_set_msglevel(struct net_device *dev, u32 value)
7470{
7471 struct tg3 *tp = netdev_priv(dev);
7472 tp->msg_enable = value;
7473}
7474
7475#if TG3_TSO_SUPPORT != 0
7476static int tg3_set_tso(struct net_device *dev, u32 value)
7477{
7478 struct tg3 *tp = netdev_priv(dev);
7479
7480 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7481 if (value)
7482 return -EINVAL;
7483 return 0;
7484 }
7485 return ethtool_op_set_tso(dev, value);
7486}
7487#endif
7488
7489static int tg3_nway_reset(struct net_device *dev)
7490{
7491 struct tg3 *tp = netdev_priv(dev);
7492 u32 bmcr;
7493 int r;
7494
7495 if (!netif_running(dev))
7496 return -EAGAIN;
7497
Michael Chanc94e3942005-09-27 12:12:42 -07007498 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7499 return -EINVAL;
7500
David S. Millerf47c11e2005-06-24 20:18:35 -07007501 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007502 r = -EINVAL;
7503 tg3_readphy(tp, MII_BMCR, &bmcr);
7504 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
Michael Chanc94e3942005-09-27 12:12:42 -07007505 ((bmcr & BMCR_ANENABLE) ||
7506 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7507 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7508 BMCR_ANENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509 r = 0;
7510 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007511 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512
7513 return r;
7514}
7515
7516static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7517{
7518 struct tg3 *tp = netdev_priv(dev);
7519
7520 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7521 ering->rx_mini_max_pending = 0;
7522 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7523
7524 ering->rx_pending = tp->rx_pending;
7525 ering->rx_mini_pending = 0;
7526 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7527 ering->tx_pending = tp->tx_pending;
7528}
7529
7530static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7531{
7532 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007533 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007534
7535 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7536 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7537 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7538 return -EINVAL;
7539
Michael Chanbbe832c2005-06-24 20:20:04 -07007540 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007541 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007542 irq_sync = 1;
7543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544
Michael Chanbbe832c2005-06-24 20:20:04 -07007545 tg3_full_lock(tp, irq_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546
7547 tp->rx_pending = ering->rx_pending;
7548
7549 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7550 tp->rx_pending > 63)
7551 tp->rx_pending = 63;
7552 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7553 tp->tx_pending = ering->tx_pending;
7554
7555 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007556 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007557 tg3_init_hw(tp);
7558 tg3_netif_start(tp);
7559 }
7560
David S. Millerf47c11e2005-06-24 20:18:35 -07007561 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007562
7563 return 0;
7564}
7565
7566static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7567{
7568 struct tg3 *tp = netdev_priv(dev);
7569
7570 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7571 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7572 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7573}
7574
7575static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7576{
7577 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007578 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007579
Michael Chanbbe832c2005-06-24 20:20:04 -07007580 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007581 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007582 irq_sync = 1;
7583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007584
Michael Chanbbe832c2005-06-24 20:20:04 -07007585 tg3_full_lock(tp, irq_sync);
David S. Millerf47c11e2005-06-24 20:18:35 -07007586
Linus Torvalds1da177e2005-04-16 15:20:36 -07007587 if (epause->autoneg)
7588 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7589 else
7590 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7591 if (epause->rx_pause)
7592 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7593 else
7594 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7595 if (epause->tx_pause)
7596 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7597 else
7598 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7599
7600 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007601 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602 tg3_init_hw(tp);
7603 tg3_netif_start(tp);
7604 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007605
7606 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007607
7608 return 0;
7609}
7610
7611static u32 tg3_get_rx_csum(struct net_device *dev)
7612{
7613 struct tg3 *tp = netdev_priv(dev);
7614 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7615}
7616
7617static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7618{
7619 struct tg3 *tp = netdev_priv(dev);
7620
7621 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7622 if (data != 0)
7623 return -EINVAL;
7624 return 0;
7625 }
7626
David S. Millerf47c11e2005-06-24 20:18:35 -07007627 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007628 if (data)
7629 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7630 else
7631 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07007632 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007633
7634 return 0;
7635}
7636
7637static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7638{
7639 struct tg3 *tp = netdev_priv(dev);
7640
7641 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7642 if (data != 0)
7643 return -EINVAL;
7644 return 0;
7645 }
7646
7647 if (data)
7648 dev->features |= NETIF_F_IP_CSUM;
7649 else
7650 dev->features &= ~NETIF_F_IP_CSUM;
7651
7652 return 0;
7653}
7654
7655static int tg3_get_stats_count (struct net_device *dev)
7656{
7657 return TG3_NUM_STATS;
7658}
7659
Michael Chan4cafd3f2005-05-29 14:56:34 -07007660static int tg3_get_test_count (struct net_device *dev)
7661{
7662 return TG3_NUM_TEST;
7663}
7664
Linus Torvalds1da177e2005-04-16 15:20:36 -07007665static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7666{
7667 switch (stringset) {
7668 case ETH_SS_STATS:
7669 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7670 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07007671 case ETH_SS_TEST:
7672 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7673 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007674 default:
7675 WARN_ON(1); /* we need a WARN() */
7676 break;
7677 }
7678}
7679
Michael Chan4009a932005-09-05 17:52:54 -07007680static int tg3_phys_id(struct net_device *dev, u32 data)
7681{
7682 struct tg3 *tp = netdev_priv(dev);
7683 int i;
7684
7685 if (!netif_running(tp->dev))
7686 return -EAGAIN;
7687
7688 if (data == 0)
7689 data = 2;
7690
7691 for (i = 0; i < (data * 2); i++) {
7692 if ((i % 2) == 0)
7693 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7694 LED_CTRL_1000MBPS_ON |
7695 LED_CTRL_100MBPS_ON |
7696 LED_CTRL_10MBPS_ON |
7697 LED_CTRL_TRAFFIC_OVERRIDE |
7698 LED_CTRL_TRAFFIC_BLINK |
7699 LED_CTRL_TRAFFIC_LED);
7700
7701 else
7702 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7703 LED_CTRL_TRAFFIC_OVERRIDE);
7704
7705 if (msleep_interruptible(500))
7706 break;
7707 }
7708 tw32(MAC_LED_CTRL, tp->led_ctrl);
7709 return 0;
7710}
7711
Linus Torvalds1da177e2005-04-16 15:20:36 -07007712static void tg3_get_ethtool_stats (struct net_device *dev,
7713 struct ethtool_stats *estats, u64 *tmp_stats)
7714{
7715 struct tg3 *tp = netdev_priv(dev);
7716 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7717}
7718
Michael Chan566f86a2005-05-29 14:56:58 -07007719#define NVRAM_TEST_SIZE 0x100
7720
7721static int tg3_test_nvram(struct tg3 *tp)
7722{
7723 u32 *buf, csum;
7724 int i, j, err = 0;
7725
7726 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7727 if (buf == NULL)
7728 return -ENOMEM;
7729
7730 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7731 u32 val;
7732
7733 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7734 break;
7735 buf[j] = cpu_to_le32(val);
7736 }
7737 if (i < NVRAM_TEST_SIZE)
7738 goto out;
7739
7740 err = -EIO;
7741 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7742 goto out;
7743
7744 /* Bootstrap checksum at offset 0x10 */
7745 csum = calc_crc((unsigned char *) buf, 0x10);
7746 if(csum != cpu_to_le32(buf[0x10/4]))
7747 goto out;
7748
7749 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7750 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7751 if (csum != cpu_to_le32(buf[0xfc/4]))
7752 goto out;
7753
7754 err = 0;
7755
7756out:
7757 kfree(buf);
7758 return err;
7759}
7760
Michael Chanca430072005-05-29 14:57:23 -07007761#define TG3_SERDES_TIMEOUT_SEC 2
7762#define TG3_COPPER_TIMEOUT_SEC 6
7763
7764static int tg3_test_link(struct tg3 *tp)
7765{
7766 int i, max;
7767
7768 if (!netif_running(tp->dev))
7769 return -ENODEV;
7770
Michael Chan4c987482005-09-05 17:52:38 -07007771 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07007772 max = TG3_SERDES_TIMEOUT_SEC;
7773 else
7774 max = TG3_COPPER_TIMEOUT_SEC;
7775
7776 for (i = 0; i < max; i++) {
7777 if (netif_carrier_ok(tp->dev))
7778 return 0;
7779
7780 if (msleep_interruptible(1000))
7781 break;
7782 }
7783
7784 return -EIO;
7785}
7786
Michael Chana71116d2005-05-29 14:58:11 -07007787/* Only test the commonly used registers */
7788static int tg3_test_registers(struct tg3 *tp)
7789{
7790 int i, is_5705;
7791 u32 offset, read_mask, write_mask, val, save_val, read_val;
7792 static struct {
7793 u16 offset;
7794 u16 flags;
7795#define TG3_FL_5705 0x1
7796#define TG3_FL_NOT_5705 0x2
7797#define TG3_FL_NOT_5788 0x4
7798 u32 read_mask;
7799 u32 write_mask;
7800 } reg_tbl[] = {
7801 /* MAC Control Registers */
7802 { MAC_MODE, TG3_FL_NOT_5705,
7803 0x00000000, 0x00ef6f8c },
7804 { MAC_MODE, TG3_FL_5705,
7805 0x00000000, 0x01ef6b8c },
7806 { MAC_STATUS, TG3_FL_NOT_5705,
7807 0x03800107, 0x00000000 },
7808 { MAC_STATUS, TG3_FL_5705,
7809 0x03800100, 0x00000000 },
7810 { MAC_ADDR_0_HIGH, 0x0000,
7811 0x00000000, 0x0000ffff },
7812 { MAC_ADDR_0_LOW, 0x0000,
7813 0x00000000, 0xffffffff },
7814 { MAC_RX_MTU_SIZE, 0x0000,
7815 0x00000000, 0x0000ffff },
7816 { MAC_TX_MODE, 0x0000,
7817 0x00000000, 0x00000070 },
7818 { MAC_TX_LENGTHS, 0x0000,
7819 0x00000000, 0x00003fff },
7820 { MAC_RX_MODE, TG3_FL_NOT_5705,
7821 0x00000000, 0x000007fc },
7822 { MAC_RX_MODE, TG3_FL_5705,
7823 0x00000000, 0x000007dc },
7824 { MAC_HASH_REG_0, 0x0000,
7825 0x00000000, 0xffffffff },
7826 { MAC_HASH_REG_1, 0x0000,
7827 0x00000000, 0xffffffff },
7828 { MAC_HASH_REG_2, 0x0000,
7829 0x00000000, 0xffffffff },
7830 { MAC_HASH_REG_3, 0x0000,
7831 0x00000000, 0xffffffff },
7832
7833 /* Receive Data and Receive BD Initiator Control Registers. */
7834 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7835 0x00000000, 0xffffffff },
7836 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7837 0x00000000, 0xffffffff },
7838 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7839 0x00000000, 0x00000003 },
7840 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7841 0x00000000, 0xffffffff },
7842 { RCVDBDI_STD_BD+0, 0x0000,
7843 0x00000000, 0xffffffff },
7844 { RCVDBDI_STD_BD+4, 0x0000,
7845 0x00000000, 0xffffffff },
7846 { RCVDBDI_STD_BD+8, 0x0000,
7847 0x00000000, 0xffff0002 },
7848 { RCVDBDI_STD_BD+0xc, 0x0000,
7849 0x00000000, 0xffffffff },
7850
7851 /* Receive BD Initiator Control Registers. */
7852 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7853 0x00000000, 0xffffffff },
7854 { RCVBDI_STD_THRESH, TG3_FL_5705,
7855 0x00000000, 0x000003ff },
7856 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7857 0x00000000, 0xffffffff },
7858
7859 /* Host Coalescing Control Registers. */
7860 { HOSTCC_MODE, TG3_FL_NOT_5705,
7861 0x00000000, 0x00000004 },
7862 { HOSTCC_MODE, TG3_FL_5705,
7863 0x00000000, 0x000000f6 },
7864 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7865 0x00000000, 0xffffffff },
7866 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7867 0x00000000, 0x000003ff },
7868 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7869 0x00000000, 0xffffffff },
7870 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7871 0x00000000, 0x000003ff },
7872 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7873 0x00000000, 0xffffffff },
7874 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7875 0x00000000, 0x000000ff },
7876 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7877 0x00000000, 0xffffffff },
7878 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7879 0x00000000, 0x000000ff },
7880 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7881 0x00000000, 0xffffffff },
7882 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7883 0x00000000, 0xffffffff },
7884 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7885 0x00000000, 0xffffffff },
7886 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7887 0x00000000, 0x000000ff },
7888 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7889 0x00000000, 0xffffffff },
7890 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7891 0x00000000, 0x000000ff },
7892 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7893 0x00000000, 0xffffffff },
7894 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7895 0x00000000, 0xffffffff },
7896 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7897 0x00000000, 0xffffffff },
7898 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7899 0x00000000, 0xffffffff },
7900 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7901 0x00000000, 0xffffffff },
7902 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7903 0xffffffff, 0x00000000 },
7904 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7905 0xffffffff, 0x00000000 },
7906
7907 /* Buffer Manager Control Registers. */
7908 { BUFMGR_MB_POOL_ADDR, 0x0000,
7909 0x00000000, 0x007fff80 },
7910 { BUFMGR_MB_POOL_SIZE, 0x0000,
7911 0x00000000, 0x007fffff },
7912 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7913 0x00000000, 0x0000003f },
7914 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7915 0x00000000, 0x000001ff },
7916 { BUFMGR_MB_HIGH_WATER, 0x0000,
7917 0x00000000, 0x000001ff },
7918 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7919 0xffffffff, 0x00000000 },
7920 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7921 0xffffffff, 0x00000000 },
7922
7923 /* Mailbox Registers */
7924 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7925 0x00000000, 0x000001ff },
7926 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7927 0x00000000, 0x000001ff },
7928 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7929 0x00000000, 0x000007ff },
7930 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7931 0x00000000, 0x000001ff },
7932
7933 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7934 };
7935
7936 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7937 is_5705 = 1;
7938 else
7939 is_5705 = 0;
7940
7941 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7942 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7943 continue;
7944
7945 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7946 continue;
7947
7948 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7949 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7950 continue;
7951
7952 offset = (u32) reg_tbl[i].offset;
7953 read_mask = reg_tbl[i].read_mask;
7954 write_mask = reg_tbl[i].write_mask;
7955
7956 /* Save the original register content */
7957 save_val = tr32(offset);
7958
7959 /* Determine the read-only value. */
7960 read_val = save_val & read_mask;
7961
7962 /* Write zero to the register, then make sure the read-only bits
7963 * are not changed and the read/write bits are all zeros.
7964 */
7965 tw32(offset, 0);
7966
7967 val = tr32(offset);
7968
7969 /* Test the read-only and read/write bits. */
7970 if (((val & read_mask) != read_val) || (val & write_mask))
7971 goto out;
7972
7973 /* Write ones to all the bits defined by RdMask and WrMask, then
7974 * make sure the read-only bits are not changed and the
7975 * read/write bits are all ones.
7976 */
7977 tw32(offset, read_mask | write_mask);
7978
7979 val = tr32(offset);
7980
7981 /* Test the read-only bits. */
7982 if ((val & read_mask) != read_val)
7983 goto out;
7984
7985 /* Test the read/write bits. */
7986 if ((val & write_mask) != write_mask)
7987 goto out;
7988
7989 tw32(offset, save_val);
7990 }
7991
7992 return 0;
7993
7994out:
7995 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7996 tw32(offset, save_val);
7997 return -EIO;
7998}
7999
Michael Chan7942e1d2005-05-29 14:58:36 -07008000static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8001{
8002 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8003 int i;
8004 u32 j;
8005
8006 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8007 for (j = 0; j < len; j += 4) {
8008 u32 val;
8009
8010 tg3_write_mem(tp, offset + j, test_pattern[i]);
8011 tg3_read_mem(tp, offset + j, &val);
8012 if (val != test_pattern[i])
8013 return -EIO;
8014 }
8015 }
8016 return 0;
8017}
8018
8019static int tg3_test_memory(struct tg3 *tp)
8020{
8021 static struct mem_entry {
8022 u32 offset;
8023 u32 len;
8024 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08008025 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07008026 { 0x00002000, 0x1c000},
8027 { 0xffffffff, 0x00000}
8028 }, mem_tbl_5705[] = {
8029 { 0x00000100, 0x0000c},
8030 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07008031 { 0x00004000, 0x00800},
8032 { 0x00006000, 0x01000},
8033 { 0x00008000, 0x02000},
8034 { 0x00010000, 0x0e000},
8035 { 0xffffffff, 0x00000}
8036 };
8037 struct mem_entry *mem_tbl;
8038 int err = 0;
8039 int i;
8040
8041 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8042 mem_tbl = mem_tbl_5705;
8043 else
8044 mem_tbl = mem_tbl_570x;
8045
8046 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8047 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8048 mem_tbl[i].len)) != 0)
8049 break;
8050 }
8051
8052 return err;
8053}
8054
Michael Chan9f40dea2005-09-05 17:53:06 -07008055#define TG3_MAC_LOOPBACK 0
8056#define TG3_PHY_LOOPBACK 1
8057
8058static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07008059{
Michael Chan9f40dea2005-09-05 17:53:06 -07008060 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07008061 u32 desc_idx;
8062 struct sk_buff *skb, *rx_skb;
8063 u8 *tx_data;
8064 dma_addr_t map;
8065 int num_pkts, tx_len, rx_len, i, err;
8066 struct tg3_rx_buffer_desc *desc;
8067
Michael Chan9f40dea2005-09-05 17:53:06 -07008068 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07008069 /* HW errata - mac loopback fails in some cases on 5780.
8070 * Normal traffic and PHY loopback are not affected by
8071 * errata.
8072 */
8073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8074 return 0;
8075
Michael Chan9f40dea2005-09-05 17:53:06 -07008076 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8077 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8078 MAC_MODE_PORT_MODE_GMII;
8079 tw32(MAC_MODE, mac_mode);
8080 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07008081 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8082 BMCR_SPEED1000);
8083 udelay(40);
8084 /* reset to prevent losing 1st rx packet intermittently */
8085 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8086 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8087 udelay(10);
8088 tw32_f(MAC_RX_MODE, tp->rx_mode);
8089 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008090 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8091 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8092 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8093 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8094 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -07008095 }
8096 else
8097 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -07008098
8099 err = -EIO;
8100
Michael Chanc76949a2005-05-29 14:58:59 -07008101 tx_len = 1514;
8102 skb = dev_alloc_skb(tx_len);
8103 tx_data = skb_put(skb, tx_len);
8104 memcpy(tx_data, tp->dev->dev_addr, 6);
8105 memset(tx_data + 6, 0x0, 8);
8106
8107 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8108
8109 for (i = 14; i < tx_len; i++)
8110 tx_data[i] = (u8) (i & 0xff);
8111
8112 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8113
8114 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8115 HOSTCC_MODE_NOW);
8116
8117 udelay(10);
8118
8119 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8120
Michael Chanc76949a2005-05-29 14:58:59 -07008121 num_pkts = 0;
8122
Michael Chan9f40dea2005-09-05 17:53:06 -07008123 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -07008124
Michael Chan9f40dea2005-09-05 17:53:06 -07008125 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -07008126 num_pkts++;
8127
Michael Chan9f40dea2005-09-05 17:53:06 -07008128 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8129 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -07008130 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -07008131
8132 udelay(10);
8133
8134 for (i = 0; i < 10; i++) {
8135 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8136 HOSTCC_MODE_NOW);
8137
8138 udelay(10);
8139
8140 tx_idx = tp->hw_status->idx[0].tx_consumer;
8141 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -07008142 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -07008143 (rx_idx == (rx_start_idx + num_pkts)))
8144 break;
8145 }
8146
8147 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8148 dev_kfree_skb(skb);
8149
Michael Chan9f40dea2005-09-05 17:53:06 -07008150 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -07008151 goto out;
8152
8153 if (rx_idx != rx_start_idx + num_pkts)
8154 goto out;
8155
8156 desc = &tp->rx_rcb[rx_start_idx];
8157 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8158 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8159 if (opaque_key != RXD_OPAQUE_RING_STD)
8160 goto out;
8161
8162 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8163 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8164 goto out;
8165
8166 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8167 if (rx_len != tx_len)
8168 goto out;
8169
8170 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8171
8172 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8173 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8174
8175 for (i = 14; i < tx_len; i++) {
8176 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8177 goto out;
8178 }
8179 err = 0;
8180
8181 /* tg3_free_rings will unmap and free the rx_skb */
8182out:
8183 return err;
8184}
8185
Michael Chan9f40dea2005-09-05 17:53:06 -07008186#define TG3_MAC_LOOPBACK_FAILED 1
8187#define TG3_PHY_LOOPBACK_FAILED 2
8188#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8189 TG3_PHY_LOOPBACK_FAILED)
8190
8191static int tg3_test_loopback(struct tg3 *tp)
8192{
8193 int err = 0;
8194
8195 if (!netif_running(tp->dev))
8196 return TG3_LOOPBACK_FAILED;
8197
8198 tg3_reset_hw(tp);
8199
8200 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8201 err |= TG3_MAC_LOOPBACK_FAILED;
8202 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8203 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8204 err |= TG3_PHY_LOOPBACK_FAILED;
8205 }
8206
8207 return err;
8208}
8209
Michael Chan4cafd3f2005-05-29 14:56:34 -07008210static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8211 u64 *data)
8212{
Michael Chan566f86a2005-05-29 14:56:58 -07008213 struct tg3 *tp = netdev_priv(dev);
8214
8215 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8216
8217 if (tg3_test_nvram(tp) != 0) {
8218 etest->flags |= ETH_TEST_FL_FAILED;
8219 data[0] = 1;
8220 }
Michael Chanca430072005-05-29 14:57:23 -07008221 if (tg3_test_link(tp) != 0) {
8222 etest->flags |= ETH_TEST_FL_FAILED;
8223 data[1] = 1;
8224 }
Michael Chana71116d2005-05-29 14:58:11 -07008225 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chanec41c7d2006-01-17 02:40:55 -08008226 int err, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -07008227
Michael Chanbbe832c2005-06-24 20:20:04 -07008228 if (netif_running(dev)) {
8229 tg3_netif_stop(tp);
8230 irq_sync = 1;
8231 }
8232
8233 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -07008234
8235 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -08008236 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008237 tg3_halt_cpu(tp, RX_CPU_BASE);
8238 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8239 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08008240 if (!err)
8241 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008242
8243 if (tg3_test_registers(tp) != 0) {
8244 etest->flags |= ETH_TEST_FL_FAILED;
8245 data[2] = 1;
8246 }
Michael Chan7942e1d2005-05-29 14:58:36 -07008247 if (tg3_test_memory(tp) != 0) {
8248 etest->flags |= ETH_TEST_FL_FAILED;
8249 data[3] = 1;
8250 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008251 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -07008252 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -07008253
David S. Millerf47c11e2005-06-24 20:18:35 -07008254 tg3_full_unlock(tp);
8255
Michael Chand4bc3922005-05-29 14:59:20 -07008256 if (tg3_test_interrupt(tp) != 0) {
8257 etest->flags |= ETH_TEST_FL_FAILED;
8258 data[5] = 1;
8259 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008260
8261 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -07008262
Michael Chana71116d2005-05-29 14:58:11 -07008263 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8264 if (netif_running(dev)) {
8265 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8266 tg3_init_hw(tp);
8267 tg3_netif_start(tp);
8268 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008269
8270 tg3_full_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008271 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07008272}
8273
Linus Torvalds1da177e2005-04-16 15:20:36 -07008274static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8275{
8276 struct mii_ioctl_data *data = if_mii(ifr);
8277 struct tg3 *tp = netdev_priv(dev);
8278 int err;
8279
8280 switch(cmd) {
8281 case SIOCGMIIPHY:
8282 data->phy_id = PHY_ADDR;
8283
8284 /* fallthru */
8285 case SIOCGMIIREG: {
8286 u32 mii_regval;
8287
8288 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8289 break; /* We have no PHY */
8290
David S. Millerf47c11e2005-06-24 20:18:35 -07008291 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008292 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -07008293 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008294
8295 data->val_out = mii_regval;
8296
8297 return err;
8298 }
8299
8300 case SIOCSMIIREG:
8301 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8302 break; /* We have no PHY */
8303
8304 if (!capable(CAP_NET_ADMIN))
8305 return -EPERM;
8306
David S. Millerf47c11e2005-06-24 20:18:35 -07008307 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -07008309 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008310
8311 return err;
8312
8313 default:
8314 /* do nothing */
8315 break;
8316 }
8317 return -EOPNOTSUPP;
8318}
8319
8320#if TG3_VLAN_TAG_USED
8321static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8322{
8323 struct tg3 *tp = netdev_priv(dev);
8324
David S. Millerf47c11e2005-06-24 20:18:35 -07008325 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008326
8327 tp->vlgrp = grp;
8328
8329 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8330 __tg3_set_rx_mode(dev);
8331
David S. Millerf47c11e2005-06-24 20:18:35 -07008332 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008333}
8334
8335static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8336{
8337 struct tg3 *tp = netdev_priv(dev);
8338
David S. Millerf47c11e2005-06-24 20:18:35 -07008339 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008340 if (tp->vlgrp)
8341 tp->vlgrp->vlan_devices[vid] = NULL;
David S. Millerf47c11e2005-06-24 20:18:35 -07008342 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008343}
8344#endif
8345
David S. Miller15f98502005-05-18 22:49:26 -07008346static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8347{
8348 struct tg3 *tp = netdev_priv(dev);
8349
8350 memcpy(ec, &tp->coal, sizeof(*ec));
8351 return 0;
8352}
8353
Michael Chand244c892005-07-05 14:42:33 -07008354static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8355{
8356 struct tg3 *tp = netdev_priv(dev);
8357 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8358 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8359
8360 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8361 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8362 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8363 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8364 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8365 }
8366
8367 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8368 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8369 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8370 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8371 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8372 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8373 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8374 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8375 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8376 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8377 return -EINVAL;
8378
8379 /* No rx interrupts will be generated if both are zero */
8380 if ((ec->rx_coalesce_usecs == 0) &&
8381 (ec->rx_max_coalesced_frames == 0))
8382 return -EINVAL;
8383
8384 /* No tx interrupts will be generated if both are zero */
8385 if ((ec->tx_coalesce_usecs == 0) &&
8386 (ec->tx_max_coalesced_frames == 0))
8387 return -EINVAL;
8388
8389 /* Only copy relevant parameters, ignore all others. */
8390 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8391 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8392 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8393 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8394 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8395 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8396 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8397 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8398 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8399
8400 if (netif_running(dev)) {
8401 tg3_full_lock(tp, 0);
8402 __tg3_set_coalesce(tp, &tp->coal);
8403 tg3_full_unlock(tp);
8404 }
8405 return 0;
8406}
8407
Linus Torvalds1da177e2005-04-16 15:20:36 -07008408static struct ethtool_ops tg3_ethtool_ops = {
8409 .get_settings = tg3_get_settings,
8410 .set_settings = tg3_set_settings,
8411 .get_drvinfo = tg3_get_drvinfo,
8412 .get_regs_len = tg3_get_regs_len,
8413 .get_regs = tg3_get_regs,
8414 .get_wol = tg3_get_wol,
8415 .set_wol = tg3_set_wol,
8416 .get_msglevel = tg3_get_msglevel,
8417 .set_msglevel = tg3_set_msglevel,
8418 .nway_reset = tg3_nway_reset,
8419 .get_link = ethtool_op_get_link,
8420 .get_eeprom_len = tg3_get_eeprom_len,
8421 .get_eeprom = tg3_get_eeprom,
8422 .set_eeprom = tg3_set_eeprom,
8423 .get_ringparam = tg3_get_ringparam,
8424 .set_ringparam = tg3_set_ringparam,
8425 .get_pauseparam = tg3_get_pauseparam,
8426 .set_pauseparam = tg3_set_pauseparam,
8427 .get_rx_csum = tg3_get_rx_csum,
8428 .set_rx_csum = tg3_set_rx_csum,
8429 .get_tx_csum = ethtool_op_get_tx_csum,
8430 .set_tx_csum = tg3_set_tx_csum,
8431 .get_sg = ethtool_op_get_sg,
8432 .set_sg = ethtool_op_set_sg,
8433#if TG3_TSO_SUPPORT != 0
8434 .get_tso = ethtool_op_get_tso,
8435 .set_tso = tg3_set_tso,
8436#endif
Michael Chan4cafd3f2005-05-29 14:56:34 -07008437 .self_test_count = tg3_get_test_count,
8438 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008439 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -07008440 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008441 .get_stats_count = tg3_get_stats_count,
8442 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -07008443 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -07008444 .set_coalesce = tg3_set_coalesce,
John W. Linville2ff43692005-09-12 14:44:20 -07008445 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008446};
8447
8448static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8449{
8450 u32 cursize, val;
8451
8452 tp->nvram_size = EEPROM_CHIP_SIZE;
8453
8454 if (tg3_nvram_read(tp, 0, &val) != 0)
8455 return;
8456
8457 if (swab32(val) != TG3_EEPROM_MAGIC)
8458 return;
8459
8460 /*
8461 * Size the chip by reading offsets at increasing powers of two.
8462 * When we encounter our validation signature, we know the addressing
8463 * has wrapped around, and thus have our chip size.
8464 */
8465 cursize = 0x800;
8466
8467 while (cursize < tp->nvram_size) {
8468 if (tg3_nvram_read(tp, cursize, &val) != 0)
8469 return;
8470
8471 if (swab32(val) == TG3_EEPROM_MAGIC)
8472 break;
8473
8474 cursize <<= 1;
8475 }
8476
8477 tp->nvram_size = cursize;
8478}
8479
8480static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8481{
8482 u32 val;
8483
8484 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8485 if (val != 0) {
8486 tp->nvram_size = (val >> 16) * 1024;
8487 return;
8488 }
8489 }
8490 tp->nvram_size = 0x20000;
8491}
8492
8493static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8494{
8495 u32 nvcfg1;
8496
8497 nvcfg1 = tr32(NVRAM_CFG1);
8498 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8499 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8500 }
8501 else {
8502 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8503 tw32(NVRAM_CFG1, nvcfg1);
8504 }
8505
Michael Chan4c987482005-09-05 17:52:38 -07008506 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -07008507 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008508 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8509 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8510 tp->nvram_jedecnum = JEDEC_ATMEL;
8511 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8512 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8513 break;
8514 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8515 tp->nvram_jedecnum = JEDEC_ATMEL;
8516 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8517 break;
8518 case FLASH_VENDOR_ATMEL_EEPROM:
8519 tp->nvram_jedecnum = JEDEC_ATMEL;
8520 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8521 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8522 break;
8523 case FLASH_VENDOR_ST:
8524 tp->nvram_jedecnum = JEDEC_ST;
8525 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8526 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8527 break;
8528 case FLASH_VENDOR_SAIFUN:
8529 tp->nvram_jedecnum = JEDEC_SAIFUN;
8530 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8531 break;
8532 case FLASH_VENDOR_SST_SMALL:
8533 case FLASH_VENDOR_SST_LARGE:
8534 tp->nvram_jedecnum = JEDEC_SST;
8535 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8536 break;
8537 }
8538 }
8539 else {
8540 tp->nvram_jedecnum = JEDEC_ATMEL;
8541 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8542 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8543 }
8544}
8545
Michael Chan361b4ac2005-04-21 17:11:21 -07008546static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8547{
8548 u32 nvcfg1;
8549
8550 nvcfg1 = tr32(NVRAM_CFG1);
8551
Michael Chane6af3012005-04-21 17:12:05 -07008552 /* NVRAM protection for TPM */
8553 if (nvcfg1 & (1 << 27))
8554 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8555
Michael Chan361b4ac2005-04-21 17:11:21 -07008556 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8557 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8558 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8559 tp->nvram_jedecnum = JEDEC_ATMEL;
8560 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8561 break;
8562 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8563 tp->nvram_jedecnum = JEDEC_ATMEL;
8564 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8565 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8566 break;
8567 case FLASH_5752VENDOR_ST_M45PE10:
8568 case FLASH_5752VENDOR_ST_M45PE20:
8569 case FLASH_5752VENDOR_ST_M45PE40:
8570 tp->nvram_jedecnum = JEDEC_ST;
8571 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8572 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8573 break;
8574 }
8575
8576 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8577 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8578 case FLASH_5752PAGE_SIZE_256:
8579 tp->nvram_pagesize = 256;
8580 break;
8581 case FLASH_5752PAGE_SIZE_512:
8582 tp->nvram_pagesize = 512;
8583 break;
8584 case FLASH_5752PAGE_SIZE_1K:
8585 tp->nvram_pagesize = 1024;
8586 break;
8587 case FLASH_5752PAGE_SIZE_2K:
8588 tp->nvram_pagesize = 2048;
8589 break;
8590 case FLASH_5752PAGE_SIZE_4K:
8591 tp->nvram_pagesize = 4096;
8592 break;
8593 case FLASH_5752PAGE_SIZE_264:
8594 tp->nvram_pagesize = 264;
8595 break;
8596 }
8597 }
8598 else {
8599 /* For eeprom, set pagesize to maximum eeprom size */
8600 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8601
8602 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8603 tw32(NVRAM_CFG1, nvcfg1);
8604 }
8605}
8606
Linus Torvalds1da177e2005-04-16 15:20:36 -07008607/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8608static void __devinit tg3_nvram_init(struct tg3 *tp)
8609{
8610 int j;
8611
8612 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8613 return;
8614
8615 tw32_f(GRC_EEPROM_ADDR,
8616 (EEPROM_ADDR_FSM_RESET |
8617 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8618 EEPROM_ADDR_CLKPERD_SHIFT)));
8619
8620 /* XXX schedule_timeout() ... */
8621 for (j = 0; j < 100; j++)
8622 udelay(10);
8623
8624 /* Enable seeprom accesses. */
8625 tw32_f(GRC_LOCAL_CTRL,
8626 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8627 udelay(100);
8628
8629 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8630 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8631 tp->tg3_flags |= TG3_FLAG_NVRAM;
8632
Michael Chanec41c7d2006-01-17 02:40:55 -08008633 if (tg3_nvram_lock(tp)) {
8634 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8635 "tg3_nvram_init failed.\n", tp->dev->name);
8636 return;
8637 }
Michael Chane6af3012005-04-21 17:12:05 -07008638 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008639
Michael Chan361b4ac2005-04-21 17:11:21 -07008640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8641 tg3_get_5752_nvram_info(tp);
8642 else
8643 tg3_get_nvram_info(tp);
8644
Linus Torvalds1da177e2005-04-16 15:20:36 -07008645 tg3_get_nvram_size(tp);
8646
Michael Chane6af3012005-04-21 17:12:05 -07008647 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -08008648 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008649
8650 } else {
8651 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8652
8653 tg3_get_eeprom_size(tp);
8654 }
8655}
8656
8657static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8658 u32 offset, u32 *val)
8659{
8660 u32 tmp;
8661 int i;
8662
8663 if (offset > EEPROM_ADDR_ADDR_MASK ||
8664 (offset % 4) != 0)
8665 return -EINVAL;
8666
8667 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8668 EEPROM_ADDR_DEVID_MASK |
8669 EEPROM_ADDR_READ);
8670 tw32(GRC_EEPROM_ADDR,
8671 tmp |
8672 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8673 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8674 EEPROM_ADDR_ADDR_MASK) |
8675 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8676
8677 for (i = 0; i < 10000; i++) {
8678 tmp = tr32(GRC_EEPROM_ADDR);
8679
8680 if (tmp & EEPROM_ADDR_COMPLETE)
8681 break;
8682 udelay(100);
8683 }
8684 if (!(tmp & EEPROM_ADDR_COMPLETE))
8685 return -EBUSY;
8686
8687 *val = tr32(GRC_EEPROM_DATA);
8688 return 0;
8689}
8690
8691#define NVRAM_CMD_TIMEOUT 10000
8692
8693static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8694{
8695 int i;
8696
8697 tw32(NVRAM_CMD, nvram_cmd);
8698 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8699 udelay(10);
8700 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8701 udelay(10);
8702 break;
8703 }
8704 }
8705 if (i == NVRAM_CMD_TIMEOUT) {
8706 return -EBUSY;
8707 }
8708 return 0;
8709}
8710
8711static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8712{
8713 int ret;
8714
8715 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8716 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8717 return -EINVAL;
8718 }
8719
8720 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8721 return tg3_nvram_read_using_eeprom(tp, offset, val);
8722
8723 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8724 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8725 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8726
8727 offset = ((offset / tp->nvram_pagesize) <<
8728 ATMEL_AT45DB0X1B_PAGE_POS) +
8729 (offset % tp->nvram_pagesize);
8730 }
8731
8732 if (offset > NVRAM_ADDR_MSK)
8733 return -EINVAL;
8734
Michael Chanec41c7d2006-01-17 02:40:55 -08008735 ret = tg3_nvram_lock(tp);
8736 if (ret)
8737 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008738
Michael Chane6af3012005-04-21 17:12:05 -07008739 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008740
8741 tw32(NVRAM_ADDR, offset);
8742 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8743 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8744
8745 if (ret == 0)
8746 *val = swab32(tr32(NVRAM_RDDATA));
8747
Michael Chane6af3012005-04-21 17:12:05 -07008748 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008749
Michael Chan381291b2005-12-13 21:08:21 -08008750 tg3_nvram_unlock(tp);
8751
Linus Torvalds1da177e2005-04-16 15:20:36 -07008752 return ret;
8753}
8754
8755static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8756 u32 offset, u32 len, u8 *buf)
8757{
8758 int i, j, rc = 0;
8759 u32 val;
8760
8761 for (i = 0; i < len; i += 4) {
8762 u32 addr, data;
8763
8764 addr = offset + i;
8765
8766 memcpy(&data, buf + i, 4);
8767
8768 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8769
8770 val = tr32(GRC_EEPROM_ADDR);
8771 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8772
8773 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8774 EEPROM_ADDR_READ);
8775 tw32(GRC_EEPROM_ADDR, val |
8776 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8777 (addr & EEPROM_ADDR_ADDR_MASK) |
8778 EEPROM_ADDR_START |
8779 EEPROM_ADDR_WRITE);
8780
8781 for (j = 0; j < 10000; j++) {
8782 val = tr32(GRC_EEPROM_ADDR);
8783
8784 if (val & EEPROM_ADDR_COMPLETE)
8785 break;
8786 udelay(100);
8787 }
8788 if (!(val & EEPROM_ADDR_COMPLETE)) {
8789 rc = -EBUSY;
8790 break;
8791 }
8792 }
8793
8794 return rc;
8795}
8796
8797/* offset and length are dword aligned */
8798static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8799 u8 *buf)
8800{
8801 int ret = 0;
8802 u32 pagesize = tp->nvram_pagesize;
8803 u32 pagemask = pagesize - 1;
8804 u32 nvram_cmd;
8805 u8 *tmp;
8806
8807 tmp = kmalloc(pagesize, GFP_KERNEL);
8808 if (tmp == NULL)
8809 return -ENOMEM;
8810
8811 while (len) {
8812 int j;
Michael Chane6af3012005-04-21 17:12:05 -07008813 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008814
8815 phy_addr = offset & ~pagemask;
8816
8817 for (j = 0; j < pagesize; j += 4) {
8818 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8819 (u32 *) (tmp + j))))
8820 break;
8821 }
8822 if (ret)
8823 break;
8824
8825 page_off = offset & pagemask;
8826 size = pagesize;
8827 if (len < size)
8828 size = len;
8829
8830 len -= size;
8831
8832 memcpy(tmp + page_off, buf, size);
8833
8834 offset = offset + (pagesize - page_off);
8835
Michael Chane6af3012005-04-21 17:12:05 -07008836 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008837
8838 /*
8839 * Before we can erase the flash page, we need
8840 * to issue a special "write enable" command.
8841 */
8842 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8843
8844 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8845 break;
8846
8847 /* Erase the target page */
8848 tw32(NVRAM_ADDR, phy_addr);
8849
8850 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8851 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8852
8853 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8854 break;
8855
8856 /* Issue another write enable to start the write. */
8857 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8858
8859 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8860 break;
8861
8862 for (j = 0; j < pagesize; j += 4) {
8863 u32 data;
8864
8865 data = *((u32 *) (tmp + j));
8866 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8867
8868 tw32(NVRAM_ADDR, phy_addr + j);
8869
8870 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8871 NVRAM_CMD_WR;
8872
8873 if (j == 0)
8874 nvram_cmd |= NVRAM_CMD_FIRST;
8875 else if (j == (pagesize - 4))
8876 nvram_cmd |= NVRAM_CMD_LAST;
8877
8878 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8879 break;
8880 }
8881 if (ret)
8882 break;
8883 }
8884
8885 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8886 tg3_nvram_exec_cmd(tp, nvram_cmd);
8887
8888 kfree(tmp);
8889
8890 return ret;
8891}
8892
8893/* offset and length are dword aligned */
8894static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8895 u8 *buf)
8896{
8897 int i, ret = 0;
8898
8899 for (i = 0; i < len; i += 4, offset += 4) {
8900 u32 data, page_off, phy_addr, nvram_cmd;
8901
8902 memcpy(&data, buf + i, 4);
8903 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8904
8905 page_off = offset % tp->nvram_pagesize;
8906
8907 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8908 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8909
8910 phy_addr = ((offset / tp->nvram_pagesize) <<
8911 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8912 }
8913 else {
8914 phy_addr = offset;
8915 }
8916
8917 tw32(NVRAM_ADDR, phy_addr);
8918
8919 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8920
8921 if ((page_off == 0) || (i == 0))
8922 nvram_cmd |= NVRAM_CMD_FIRST;
8923 else if (page_off == (tp->nvram_pagesize - 4))
8924 nvram_cmd |= NVRAM_CMD_LAST;
8925
8926 if (i == (len - 4))
8927 nvram_cmd |= NVRAM_CMD_LAST;
8928
Michael Chan4c987482005-09-05 17:52:38 -07008929 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8930 (tp->nvram_jedecnum == JEDEC_ST) &&
8931 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932
8933 if ((ret = tg3_nvram_exec_cmd(tp,
8934 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8935 NVRAM_CMD_DONE)))
8936
8937 break;
8938 }
8939 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8940 /* We always do complete word writes to eeprom. */
8941 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8942 }
8943
8944 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8945 break;
8946 }
8947 return ret;
8948}
8949
8950/* offset and length are dword aligned */
8951static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8952{
8953 int ret;
8954
8955 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8956 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8957 return -EINVAL;
8958 }
8959
8960 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07008961 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8962 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008963 udelay(40);
8964 }
8965
8966 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8967 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8968 }
8969 else {
8970 u32 grc_mode;
8971
Michael Chanec41c7d2006-01-17 02:40:55 -08008972 ret = tg3_nvram_lock(tp);
8973 if (ret)
8974 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008975
Michael Chane6af3012005-04-21 17:12:05 -07008976 tg3_enable_nvram_access(tp);
8977 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8978 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008979 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008980
8981 grc_mode = tr32(GRC_MODE);
8982 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8983
8984 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8985 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8986
8987 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8988 buf);
8989 }
8990 else {
8991 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8992 buf);
8993 }
8994
8995 grc_mode = tr32(GRC_MODE);
8996 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8997
Michael Chane6af3012005-04-21 17:12:05 -07008998 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008999 tg3_nvram_unlock(tp);
9000 }
9001
9002 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07009003 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009004 udelay(40);
9005 }
9006
9007 return ret;
9008}
9009
9010struct subsys_tbl_ent {
9011 u16 subsys_vendor, subsys_devid;
9012 u32 phy_id;
9013};
9014
9015static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9016 /* Broadcom boards. */
9017 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9018 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9019 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9020 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9021 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9022 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9023 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9024 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9025 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9026 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9027 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9028
9029 /* 3com boards. */
9030 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9031 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9032 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9033 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9034 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9035
9036 /* DELL boards. */
9037 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9038 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9039 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9040 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9041
9042 /* Compaq boards. */
9043 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9044 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9045 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9046 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9047 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9048
9049 /* IBM boards. */
9050 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9051};
9052
9053static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9054{
9055 int i;
9056
9057 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9058 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9059 tp->pdev->subsystem_vendor) &&
9060 (subsys_id_to_phy_id[i].subsys_devid ==
9061 tp->pdev->subsystem_device))
9062 return &subsys_id_to_phy_id[i];
9063 }
9064 return NULL;
9065}
9066
Michael Chan7d0c41e2005-04-21 17:06:20 -07009067/* Since this function may be called in D3-hot power state during
9068 * tg3_init_one(), only config cycles are allowed.
9069 */
9070static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009071{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009072 u32 val;
Michael Chan7d0c41e2005-04-21 17:06:20 -07009073
9074 /* Make sure register accesses (indirect or otherwise)
9075 * will function correctly.
9076 */
9077 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9078 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009079
9080 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -07009081 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9082
Linus Torvalds1da177e2005-04-16 15:20:36 -07009083 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9084 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9085 u32 nic_cfg, led_cfg;
Michael Chan7d0c41e2005-04-21 17:06:20 -07009086 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9087 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009088
9089 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9090 tp->nic_sram_data_cfg = nic_cfg;
9091
9092 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9093 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9094 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9095 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9096 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9097 (ver > 0) && (ver < 0x100))
9098 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9099
Linus Torvalds1da177e2005-04-16 15:20:36 -07009100 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9101 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9102 eeprom_phy_serdes = 1;
9103
9104 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9105 if (nic_phy_id != 0) {
9106 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9107 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9108
9109 eeprom_phy_id = (id1 >> 16) << 10;
9110 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9111 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9112 } else
9113 eeprom_phy_id = 0;
9114
Michael Chan7d0c41e2005-04-21 17:06:20 -07009115 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -07009116 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -07009117 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -07009118 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9119 else
9120 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9121 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07009122
John W. Linvillecbf46852005-04-21 17:01:29 -07009123 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009124 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9125 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -07009126 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07009127 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9128
9129 switch (led_cfg) {
9130 default:
9131 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9132 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9133 break;
9134
9135 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9136 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9137 break;
9138
9139 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9140 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -07009141
9142 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9143 * read on some older 5700/5701 bootcode.
9144 */
9145 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9146 ASIC_REV_5700 ||
9147 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9148 ASIC_REV_5701)
9149 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9150
Linus Torvalds1da177e2005-04-16 15:20:36 -07009151 break;
9152
9153 case SHASTA_EXT_LED_SHARED:
9154 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9155 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9156 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9157 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9158 LED_CTRL_MODE_PHY_2);
9159 break;
9160
9161 case SHASTA_EXT_LED_MAC:
9162 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9163 break;
9164
9165 case SHASTA_EXT_LED_COMBO:
9166 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9167 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9168 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9169 LED_CTRL_MODE_PHY_2);
9170 break;
9171
9172 };
9173
9174 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9176 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9177 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9178
9179 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9180 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9181 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9182 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9183
9184 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9185 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07009186 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009187 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9188 }
9189 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9190 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9191
9192 if (cfg2 & (1 << 17))
9193 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9194
9195 /* serdes signal pre-emphasis in register 0x590 set by */
9196 /* bootcode if bit 18 is set */
9197 if (cfg2 & (1 << 18))
9198 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9199 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07009200}
9201
9202static int __devinit tg3_phy_probe(struct tg3 *tp)
9203{
9204 u32 hw_phy_id_1, hw_phy_id_2;
9205 u32 hw_phy_id, hw_phy_id_masked;
9206 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009207
9208 /* Reading the PHY ID register can conflict with ASF
9209 * firwmare access to the PHY hardware.
9210 */
9211 err = 0;
9212 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9213 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9214 } else {
9215 /* Now read the physical PHY_ID from the chip and verify
9216 * that it is sane. If it doesn't look good, we fall back
9217 * to either the hard-coded table based PHY_ID and failing
9218 * that the value found in the eeprom area.
9219 */
9220 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9221 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9222
9223 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9224 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9225 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9226
9227 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9228 }
9229
9230 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9231 tp->phy_id = hw_phy_id;
9232 if (hw_phy_id_masked == PHY_ID_BCM8002)
9233 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -07009234 else
9235 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009236 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -07009237 if (tp->phy_id != PHY_ID_INVALID) {
9238 /* Do nothing, phy ID already set up in
9239 * tg3_get_eeprom_hw_cfg().
9240 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009241 } else {
9242 struct subsys_tbl_ent *p;
9243
9244 /* No eeprom signature? Try the hardcoded
9245 * subsys device table.
9246 */
9247 p = lookup_by_subsys(tp);
9248 if (!p)
9249 return -ENODEV;
9250
9251 tp->phy_id = p->phy_id;
9252 if (!tp->phy_id ||
9253 tp->phy_id == PHY_ID_BCM8002)
9254 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9255 }
9256 }
9257
Michael Chan747e8f82005-07-25 12:33:22 -07009258 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009259 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9260 u32 bmsr, adv_reg, tg3_ctrl;
9261
9262 tg3_readphy(tp, MII_BMSR, &bmsr);
9263 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9264 (bmsr & BMSR_LSTATUS))
9265 goto skip_phy_reset;
9266
9267 err = tg3_phy_reset(tp);
9268 if (err)
9269 return err;
9270
9271 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9272 ADVERTISE_100HALF | ADVERTISE_100FULL |
9273 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9274 tg3_ctrl = 0;
9275 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9276 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9277 MII_TG3_CTRL_ADV_1000_FULL);
9278 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9279 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9280 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9281 MII_TG3_CTRL_ENABLE_AS_MASTER);
9282 }
9283
9284 if (!tg3_copper_is_advertising_all(tp)) {
9285 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9286
9287 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9288 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9289
9290 tg3_writephy(tp, MII_BMCR,
9291 BMCR_ANENABLE | BMCR_ANRESTART);
9292 }
9293 tg3_phy_set_wirespeed(tp);
9294
9295 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9296 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9297 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9298 }
9299
9300skip_phy_reset:
9301 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9302 err = tg3_init_5401phy_dsp(tp);
9303 if (err)
9304 return err;
9305 }
9306
9307 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9308 err = tg3_init_5401phy_dsp(tp);
9309 }
9310
Michael Chan747e8f82005-07-25 12:33:22 -07009311 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009312 tp->link_config.advertising =
9313 (ADVERTISED_1000baseT_Half |
9314 ADVERTISED_1000baseT_Full |
9315 ADVERTISED_Autoneg |
9316 ADVERTISED_FIBRE);
9317 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9318 tp->link_config.advertising &=
9319 ~(ADVERTISED_1000baseT_Half |
9320 ADVERTISED_1000baseT_Full);
9321
9322 return err;
9323}
9324
9325static void __devinit tg3_read_partno(struct tg3 *tp)
9326{
9327 unsigned char vpd_data[256];
9328 int i;
9329
9330 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9331 /* Sun decided not to put the necessary bits in the
9332 * NVRAM of their onboard tg3 parts :(
9333 */
9334 strcpy(tp->board_part_number, "Sun 570X");
9335 return;
9336 }
9337
9338 for (i = 0; i < 256; i += 4) {
9339 u32 tmp;
9340
9341 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9342 goto out_not_found;
9343
9344 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9345 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9346 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9347 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9348 }
9349
9350 /* Now parse and find the part number. */
9351 for (i = 0; i < 256; ) {
9352 unsigned char val = vpd_data[i];
9353 int block_end;
9354
9355 if (val == 0x82 || val == 0x91) {
9356 i = (i + 3 +
9357 (vpd_data[i + 1] +
9358 (vpd_data[i + 2] << 8)));
9359 continue;
9360 }
9361
9362 if (val != 0x90)
9363 goto out_not_found;
9364
9365 block_end = (i + 3 +
9366 (vpd_data[i + 1] +
9367 (vpd_data[i + 2] << 8)));
9368 i += 3;
9369 while (i < block_end) {
9370 if (vpd_data[i + 0] == 'P' &&
9371 vpd_data[i + 1] == 'N') {
9372 int partno_len = vpd_data[i + 2];
9373
9374 if (partno_len > 24)
9375 goto out_not_found;
9376
9377 memcpy(tp->board_part_number,
9378 &vpd_data[i + 3],
9379 partno_len);
9380
9381 /* Success. */
9382 return;
9383 }
9384 }
9385
9386 /* Part number not found. */
9387 goto out_not_found;
9388 }
9389
9390out_not_found:
9391 strcpy(tp->board_part_number, "none");
9392}
9393
9394#ifdef CONFIG_SPARC64
9395static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9396{
9397 struct pci_dev *pdev = tp->pdev;
9398 struct pcidev_cookie *pcp = pdev->sysdata;
9399
9400 if (pcp != NULL) {
9401 int node = pcp->prom_node;
9402 u32 venid;
9403 int err;
9404
9405 err = prom_getproperty(node, "subsystem-vendor-id",
9406 (char *) &venid, sizeof(venid));
9407 if (err == 0 || err == -1)
9408 return 0;
9409 if (venid == PCI_VENDOR_ID_SUN)
9410 return 1;
9411 }
9412 return 0;
9413}
9414#endif
9415
9416static int __devinit tg3_get_invariants(struct tg3 *tp)
9417{
9418 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009419 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9420 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
Michael Chan399de502005-10-03 14:02:39 -07009421 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9422 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009423 { },
9424 };
9425 u32 misc_ctrl_reg;
9426 u32 cacheline_sz_reg;
9427 u32 pci_state_reg, grc_misc_cfg;
9428 u32 val;
9429 u16 pci_cmd;
9430 int err;
9431
9432#ifdef CONFIG_SPARC64
9433 if (tg3_is_sun_570X(tp))
9434 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9435#endif
9436
Linus Torvalds1da177e2005-04-16 15:20:36 -07009437 /* Force memory write invalidate off. If we leave it on,
9438 * then on 5700_BX chips we have to enable a workaround.
9439 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9440 * to match the cacheline size. The Broadcom driver have this
9441 * workaround but turns MWI off all the times so never uses
9442 * it. This seems to suggest that the workaround is insufficient.
9443 */
9444 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9445 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9446 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9447
9448 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9449 * has the register indirect write enable bit set before
9450 * we try to access any of the MMIO registers. It is also
9451 * critical that the PCI-X hw workaround situation is decided
9452 * before that as well.
9453 */
9454 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9455 &misc_ctrl_reg);
9456
9457 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9458 MISC_HOST_CTRL_CHIPREV_SHIFT);
9459
Michael Chanff645be2005-04-21 17:09:53 -07009460 /* Wrong chip ID in 5752 A0. This code can be removed later
9461 * as A0 is not in production.
9462 */
9463 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9464 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9465
Michael Chan68929142005-08-09 20:17:14 -07009466 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9467 * we need to disable memory and use config. cycles
9468 * only to access all registers. The 5702/03 chips
9469 * can mistakenly decode the special cycles from the
9470 * ICH chipsets as memory write cycles, causing corruption
9471 * of register and memory space. Only certain ICH bridges
9472 * will drive special cycles with non-zero data during the
9473 * address phase which can fall within the 5703's address
9474 * range. This is not an ICH bug as the PCI spec allows
9475 * non-zero address during special cycles. However, only
9476 * these ICH bridges are known to drive non-zero addresses
9477 * during special cycles.
9478 *
9479 * Since special cycles do not cross PCI bridges, we only
9480 * enable this workaround if the 5703 is on the secondary
9481 * bus of these ICH bridges.
9482 */
9483 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9484 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9485 static struct tg3_dev_id {
9486 u32 vendor;
9487 u32 device;
9488 u32 rev;
9489 } ich_chipsets[] = {
9490 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9491 PCI_ANY_ID },
9492 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9493 PCI_ANY_ID },
9494 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9495 0xa },
9496 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9497 PCI_ANY_ID },
9498 { },
9499 };
9500 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9501 struct pci_dev *bridge = NULL;
9502
9503 while (pci_id->vendor != 0) {
9504 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9505 bridge);
9506 if (!bridge) {
9507 pci_id++;
9508 continue;
9509 }
9510 if (pci_id->rev != PCI_ANY_ID) {
9511 u8 rev;
9512
9513 pci_read_config_byte(bridge, PCI_REVISION_ID,
9514 &rev);
9515 if (rev > pci_id->rev)
9516 continue;
9517 }
9518 if (bridge->subordinate &&
9519 (bridge->subordinate->number ==
9520 tp->pdev->bus->number)) {
9521
9522 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9523 pci_dev_put(bridge);
9524 break;
9525 }
9526 }
9527 }
9528
Michael Chan4cf78e42005-07-25 12:29:19 -07009529 /* Find msi capability. */
Michael Chana4e2b342005-10-26 15:46:52 -07009530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9532 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4cf78e42005-07-25 12:29:19 -07009533 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -07009534 }
Michael Chan4cf78e42005-07-25 12:29:19 -07009535
Linus Torvalds1da177e2005-04-16 15:20:36 -07009536 /* Initialize misc host control in PCI block. */
9537 tp->misc_host_ctrl |= (misc_ctrl_reg &
9538 MISC_HOST_CTRL_CHIPREV);
9539 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9540 tp->misc_host_ctrl);
9541
9542 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9543 &cacheline_sz_reg);
9544
9545 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9546 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9547 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9548 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9549
John W. Linville2052da92005-04-21 16:56:08 -07009550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -07009551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chana4e2b342005-10-26 15:46:52 -07009552 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -07009553 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9554
John W. Linville1b440c562005-04-21 17:03:18 -07009555 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9556 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9557 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9558
John W. Linvillebb7064d2005-04-21 17:02:41 -07009559 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009560 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9561
Michael Chan0f893dc2005-07-25 12:30:38 -07009562 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9563 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9564 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9565 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9566
Linus Torvalds1da177e2005-04-16 15:20:36 -07009567 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9568 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9569
Michael Chan399de502005-10-03 14:02:39 -07009570 /* If we have an AMD 762 or VIA K8T800 chipset, write
9571 * reordering to the mailbox registers done by the host
9572 * controller can cause major troubles. We read back from
9573 * every mailbox register write to force the writes to be
9574 * posted to the chip in order.
9575 */
9576 if (pci_dev_present(write_reorder_chipsets) &&
9577 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9578 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9579
Linus Torvalds1da177e2005-04-16 15:20:36 -07009580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9581 tp->pci_lat_timer < 64) {
9582 tp->pci_lat_timer = 64;
9583
9584 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9585 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9586 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9587 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9588
9589 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9590 cacheline_sz_reg);
9591 }
9592
9593 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9594 &pci_state_reg);
9595
9596 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9597 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9598
9599 /* If this is a 5700 BX chipset, and we are in PCI-X
9600 * mode, enable register write workaround.
9601 *
9602 * The workaround is to use indirect register accesses
9603 * for all chip writes not to mailbox registers.
9604 */
9605 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9606 u32 pm_reg;
9607 u16 pci_cmd;
9608
9609 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9610
9611 /* The chip can have it's power management PCI config
9612 * space registers clobbered due to this bug.
9613 * So explicitly force the chip into D0 here.
9614 */
9615 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9616 &pm_reg);
9617 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9618 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9619 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9620 pm_reg);
9621
9622 /* Also, force SERR#/PERR# in PCI command. */
9623 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9624 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9625 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9626 }
9627 }
9628
Michael Chan087fe252005-08-09 20:17:41 -07009629 /* 5700 BX chips need to have their TX producer index mailboxes
9630 * written twice to workaround a bug.
9631 */
9632 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9633 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9634
Linus Torvalds1da177e2005-04-16 15:20:36 -07009635 /* Back to back register writes can cause problems on this chip,
9636 * the workaround is to read back all reg writes except those to
9637 * mailbox regs. See tg3_write_indirect_reg32().
9638 *
9639 * PCI Express 5750_A0 rev chips need this workaround too.
9640 */
9641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9642 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9643 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9644 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9645
9646 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9647 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9648 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9649 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9650
9651 /* Chip-specific fixup from Broadcom driver */
9652 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9653 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9654 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9655 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9656 }
9657
Michael Chan1ee582d2005-08-09 20:16:46 -07009658 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -07009659 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -07009660 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -07009661 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -07009662 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -07009663 tp->write32_tx_mbox = tg3_write32;
9664 tp->write32_rx_mbox = tg3_write32;
9665
9666 /* Various workaround register access methods */
9667 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9668 tp->write32 = tg3_write_indirect_reg32;
9669 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9670 tp->write32 = tg3_write_flush_reg32;
9671
9672 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9673 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9674 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9675 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9676 tp->write32_rx_mbox = tg3_write_flush_reg32;
9677 }
Michael Chan20094932005-08-09 20:16:32 -07009678
Michael Chan68929142005-08-09 20:17:14 -07009679 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9680 tp->read32 = tg3_read_indirect_reg32;
9681 tp->write32 = tg3_write_indirect_reg32;
9682 tp->read32_mbox = tg3_read_indirect_mbox;
9683 tp->write32_mbox = tg3_write_indirect_mbox;
9684 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9685 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9686
9687 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -07009688 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -07009689
9690 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9691 pci_cmd &= ~PCI_COMMAND_MEMORY;
9692 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9693 }
9694
Michael Chan7d0c41e2005-04-21 17:06:20 -07009695 /* Get eeprom hw config before calling tg3_set_power_state().
9696 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9697 * determined before calling tg3_set_power_state() so that
9698 * we know whether or not to switch out of Vaux power.
9699 * When the flag is set, it means that GPIO1 is used for eeprom
9700 * write protect and also implies that it is a LOM where GPIOs
9701 * are not used to switch power.
9702 */
9703 tg3_get_eeprom_hw_cfg(tp);
9704
Michael Chan314fba32005-04-21 17:07:04 -07009705 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9706 * GPIO1 driven high will bring 5700's external PHY out of reset.
9707 * It is also used as eeprom write protect on LOMs.
9708 */
9709 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9710 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9711 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9712 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9713 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -07009714 /* Unused GPIO3 must be driven as output on 5752 because there
9715 * are no pull-up resistors on unused GPIO pins.
9716 */
9717 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9718 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -07009719
Linus Torvalds1da177e2005-04-16 15:20:36 -07009720 /* Force the chip into D0. */
9721 err = tg3_set_power_state(tp, 0);
9722 if (err) {
9723 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9724 pci_name(tp->pdev));
9725 return err;
9726 }
9727
9728 /* 5700 B0 chips do not support checksumming correctly due
9729 * to hardware bugs.
9730 */
9731 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9732 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9733
9734 /* Pseudo-header checksum is done by hardware logic and not
9735 * the offload processers, so make the chip do the pseudo-
9736 * header checksums on receive. For transmit it is more
9737 * convenient to do the pseudo-header checksum in software
9738 * as Linux does that on transmit for us in all cases.
9739 */
9740 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9741 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9742
9743 /* Derive initial jumbo mode from MTU assigned in
9744 * ether_setup() via the alloc_etherdev() call
9745 */
Michael Chan0f893dc2005-07-25 12:30:38 -07009746 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -07009747 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -07009748 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009749
9750 /* Determine WakeOnLan speed to use. */
9751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9752 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9753 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9754 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9755 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9756 } else {
9757 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9758 }
9759
9760 /* A few boards don't want Ethernet@WireSpeed phy feature */
9761 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9762 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9763 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -07009764 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9765 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009766 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9767
9768 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9769 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9770 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9771 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9772 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9773
John W. Linvillebb7064d2005-04-21 17:02:41 -07009774 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009775 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9776
Linus Torvalds1da177e2005-04-16 15:20:36 -07009777 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009778 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9779 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9780 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9781
9782 /* Initialize MAC MI mode, polling disabled. */
9783 tw32_f(MAC_MI_MODE, tp->mi_mode);
9784 udelay(80);
9785
9786 /* Initialize data/descriptor byte/word swapping. */
9787 val = tr32(GRC_MODE);
9788 val &= GRC_MODE_HOST_STACKUP;
9789 tw32(GRC_MODE, val | tp->grc_mode);
9790
9791 tg3_switch_clocks(tp);
9792
9793 /* Clear this out for sanity. */
9794 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9795
9796 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9797 &pci_state_reg);
9798 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9799 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9800 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9801
9802 if (chiprevid == CHIPREV_ID_5701_A0 ||
9803 chiprevid == CHIPREV_ID_5701_B0 ||
9804 chiprevid == CHIPREV_ID_5701_B2 ||
9805 chiprevid == CHIPREV_ID_5701_B5) {
9806 void __iomem *sram_base;
9807
9808 /* Write some dummy words into the SRAM status block
9809 * area, see if it reads back correctly. If the return
9810 * value is bad, force enable the PCIX workaround.
9811 */
9812 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9813
9814 writel(0x00000000, sram_base);
9815 writel(0x00000000, sram_base + 4);
9816 writel(0xffffffff, sram_base + 4);
9817 if (readl(sram_base) != 0x00000000)
9818 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9819 }
9820 }
9821
9822 udelay(50);
9823 tg3_nvram_init(tp);
9824
9825 grc_misc_cfg = tr32(GRC_MISC_CFG);
9826 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9827
9828 /* Broadcom's driver says that CIOBE multisplit has a bug */
9829#if 0
9830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9831 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9832 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9833 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9834 }
9835#endif
9836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9837 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9838 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9839 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9840
David S. Millerfac9b832005-05-18 22:46:34 -07009841 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9842 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9843 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9844 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9845 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9846 HOSTCC_MODE_CLRTICK_TXBD);
9847
9848 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9849 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9850 tp->misc_host_ctrl);
9851 }
9852
Linus Torvalds1da177e2005-04-16 15:20:36 -07009853 /* these are limited to 10/100 only */
9854 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9855 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9856 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9857 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9858 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9859 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9860 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9861 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9862 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9863 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9864 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9865
9866 err = tg3_phy_probe(tp);
9867 if (err) {
9868 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9869 pci_name(tp->pdev), err);
9870 /* ... but do not return immediately ... */
9871 }
9872
9873 tg3_read_partno(tp);
9874
9875 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9876 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9877 } else {
9878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9879 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9880 else
9881 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9882 }
9883
9884 /* 5700 {AX,BX} chips have a broken status block link
9885 * change bit implementation, so we must use the
9886 * status register in those cases.
9887 */
9888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9889 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9890 else
9891 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9892
9893 /* The led_ctrl is set during tg3_phy_probe, here we might
9894 * have to force the link status polling mechanism based
9895 * upon subsystem IDs.
9896 */
9897 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9898 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9899 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9900 TG3_FLAG_USE_LINKCHG_REG);
9901 }
9902
9903 /* For all SERDES we poll the MAC status register. */
9904 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9905 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9906 else
9907 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9908
Linus Torvalds1da177e2005-04-16 15:20:36 -07009909 /* It seems all chips can get confused if TX buffers
9910 * straddle the 4GB address boundary in some cases.
9911 */
9912 tp->dev->hard_start_xmit = tg3_start_xmit;
9913
9914 tp->rx_offset = 2;
9915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9916 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9917 tp->rx_offset = 0;
9918
9919 /* By default, disable wake-on-lan. User can change this
9920 * using ETHTOOL_SWOL.
9921 */
9922 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9923
9924 return err;
9925}
9926
9927#ifdef CONFIG_SPARC64
9928static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9929{
9930 struct net_device *dev = tp->dev;
9931 struct pci_dev *pdev = tp->pdev;
9932 struct pcidev_cookie *pcp = pdev->sysdata;
9933
9934 if (pcp != NULL) {
9935 int node = pcp->prom_node;
9936
9937 if (prom_getproplen(node, "local-mac-address") == 6) {
9938 prom_getproperty(node, "local-mac-address",
9939 dev->dev_addr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -07009940 memcpy(dev->perm_addr, dev->dev_addr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009941 return 0;
9942 }
9943 }
9944 return -ENODEV;
9945}
9946
9947static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9948{
9949 struct net_device *dev = tp->dev;
9950
9951 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -07009952 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009953 return 0;
9954}
9955#endif
9956
9957static int __devinit tg3_get_device_address(struct tg3 *tp)
9958{
9959 struct net_device *dev = tp->dev;
9960 u32 hi, lo, mac_offset;
9961
9962#ifdef CONFIG_SPARC64
9963 if (!tg3_get_macaddr_sparc(tp))
9964 return 0;
9965#endif
9966
9967 mac_offset = 0x7c;
Michael Chan4cf78e42005-07-25 12:29:19 -07009968 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9969 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
Michael Chana4e2b342005-10-26 15:46:52 -07009970 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009971 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9972 mac_offset = 0xcc;
9973 if (tg3_nvram_lock(tp))
9974 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9975 else
9976 tg3_nvram_unlock(tp);
9977 }
9978
9979 /* First try to get it from MAC address mailbox. */
9980 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9981 if ((hi >> 16) == 0x484b) {
9982 dev->dev_addr[0] = (hi >> 8) & 0xff;
9983 dev->dev_addr[1] = (hi >> 0) & 0xff;
9984
9985 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9986 dev->dev_addr[2] = (lo >> 24) & 0xff;
9987 dev->dev_addr[3] = (lo >> 16) & 0xff;
9988 dev->dev_addr[4] = (lo >> 8) & 0xff;
9989 dev->dev_addr[5] = (lo >> 0) & 0xff;
9990 }
9991 /* Next, try NVRAM. */
9992 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9993 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9994 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9995 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9996 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9997 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9998 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9999 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10000 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10001 }
10002 /* Finally just fetch it out of the MAC control regs. */
10003 else {
10004 hi = tr32(MAC_ADDR_0_HIGH);
10005 lo = tr32(MAC_ADDR_0_LOW);
10006
10007 dev->dev_addr[5] = lo & 0xff;
10008 dev->dev_addr[4] = (lo >> 8) & 0xff;
10009 dev->dev_addr[3] = (lo >> 16) & 0xff;
10010 dev->dev_addr[2] = (lo >> 24) & 0xff;
10011 dev->dev_addr[1] = hi & 0xff;
10012 dev->dev_addr[0] = (hi >> 8) & 0xff;
10013 }
10014
10015 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10016#ifdef CONFIG_SPARC64
10017 if (!tg3_get_default_macaddr_sparc(tp))
10018 return 0;
10019#endif
10020 return -EINVAL;
10021 }
John W. Linville2ff43692005-09-12 14:44:20 -070010022 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010023 return 0;
10024}
10025
David S. Miller59e6b432005-05-18 22:50:10 -070010026#define BOUNDARY_SINGLE_CACHELINE 1
10027#define BOUNDARY_MULTI_CACHELINE 2
10028
10029static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10030{
10031 int cacheline_size;
10032 u8 byte;
10033 int goal;
10034
10035 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10036 if (byte == 0)
10037 cacheline_size = 1024;
10038 else
10039 cacheline_size = (int) byte * 4;
10040
10041 /* On 5703 and later chips, the boundary bits have no
10042 * effect.
10043 */
10044 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10045 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10046 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10047 goto out;
10048
10049#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10050 goal = BOUNDARY_MULTI_CACHELINE;
10051#else
10052#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10053 goal = BOUNDARY_SINGLE_CACHELINE;
10054#else
10055 goal = 0;
10056#endif
10057#endif
10058
10059 if (!goal)
10060 goto out;
10061
10062 /* PCI controllers on most RISC systems tend to disconnect
10063 * when a device tries to burst across a cache-line boundary.
10064 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10065 *
10066 * Unfortunately, for PCI-E there are only limited
10067 * write-side controls for this, and thus for reads
10068 * we will still get the disconnects. We'll also waste
10069 * these PCI cycles for both read and write for chips
10070 * other than 5700 and 5701 which do not implement the
10071 * boundary bits.
10072 */
10073 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10074 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10075 switch (cacheline_size) {
10076 case 16:
10077 case 32:
10078 case 64:
10079 case 128:
10080 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10081 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10082 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10083 } else {
10084 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10085 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10086 }
10087 break;
10088
10089 case 256:
10090 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10091 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10092 break;
10093
10094 default:
10095 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10096 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10097 break;
10098 };
10099 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10100 switch (cacheline_size) {
10101 case 16:
10102 case 32:
10103 case 64:
10104 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10105 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10106 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10107 break;
10108 }
10109 /* fallthrough */
10110 case 128:
10111 default:
10112 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10113 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10114 break;
10115 };
10116 } else {
10117 switch (cacheline_size) {
10118 case 16:
10119 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10120 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10121 DMA_RWCTRL_WRITE_BNDRY_16);
10122 break;
10123 }
10124 /* fallthrough */
10125 case 32:
10126 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10127 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10128 DMA_RWCTRL_WRITE_BNDRY_32);
10129 break;
10130 }
10131 /* fallthrough */
10132 case 64:
10133 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10134 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10135 DMA_RWCTRL_WRITE_BNDRY_64);
10136 break;
10137 }
10138 /* fallthrough */
10139 case 128:
10140 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10141 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10142 DMA_RWCTRL_WRITE_BNDRY_128);
10143 break;
10144 }
10145 /* fallthrough */
10146 case 256:
10147 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10148 DMA_RWCTRL_WRITE_BNDRY_256);
10149 break;
10150 case 512:
10151 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10152 DMA_RWCTRL_WRITE_BNDRY_512);
10153 break;
10154 case 1024:
10155 default:
10156 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10157 DMA_RWCTRL_WRITE_BNDRY_1024);
10158 break;
10159 };
10160 }
10161
10162out:
10163 return val;
10164}
10165
Linus Torvalds1da177e2005-04-16 15:20:36 -070010166static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10167{
10168 struct tg3_internal_buffer_desc test_desc;
10169 u32 sram_dma_descs;
10170 int i, ret;
10171
10172 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10173
10174 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10175 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10176 tw32(RDMAC_STATUS, 0);
10177 tw32(WDMAC_STATUS, 0);
10178
10179 tw32(BUFMGR_MODE, 0);
10180 tw32(FTQ_RESET, 0);
10181
10182 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10183 test_desc.addr_lo = buf_dma & 0xffffffff;
10184 test_desc.nic_mbuf = 0x00002100;
10185 test_desc.len = size;
10186
10187 /*
10188 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10189 * the *second* time the tg3 driver was getting loaded after an
10190 * initial scan.
10191 *
10192 * Broadcom tells me:
10193 * ...the DMA engine is connected to the GRC block and a DMA
10194 * reset may affect the GRC block in some unpredictable way...
10195 * The behavior of resets to individual blocks has not been tested.
10196 *
10197 * Broadcom noted the GRC reset will also reset all sub-components.
10198 */
10199 if (to_device) {
10200 test_desc.cqid_sqid = (13 << 8) | 2;
10201
10202 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10203 udelay(40);
10204 } else {
10205 test_desc.cqid_sqid = (16 << 8) | 7;
10206
10207 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10208 udelay(40);
10209 }
10210 test_desc.flags = 0x00000005;
10211
10212 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10213 u32 val;
10214
10215 val = *(((u32 *)&test_desc) + i);
10216 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10217 sram_dma_descs + (i * sizeof(u32)));
10218 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10219 }
10220 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10221
10222 if (to_device) {
10223 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10224 } else {
10225 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10226 }
10227
10228 ret = -ENODEV;
10229 for (i = 0; i < 40; i++) {
10230 u32 val;
10231
10232 if (to_device)
10233 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10234 else
10235 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10236 if ((val & 0xffff) == sram_dma_descs) {
10237 ret = 0;
10238 break;
10239 }
10240
10241 udelay(100);
10242 }
10243
10244 return ret;
10245}
10246
David S. Millerded73402005-05-23 13:59:47 -070010247#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070010248
10249static int __devinit tg3_test_dma(struct tg3 *tp)
10250{
10251 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070010252 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010253 int ret;
10254
10255 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10256 if (!buf) {
10257 ret = -ENOMEM;
10258 goto out_nofree;
10259 }
10260
10261 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10262 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10263
David S. Miller59e6b432005-05-18 22:50:10 -070010264 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010265
10266 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10267 /* DMA read watermark not used on PCIE */
10268 tp->dma_rwctrl |= 0x00180000;
10269 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070010270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10271 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010272 tp->dma_rwctrl |= 0x003f0000;
10273 else
10274 tp->dma_rwctrl |= 0x003f000f;
10275 } else {
10276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10277 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10278 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10279
10280 if (ccval == 0x6 || ccval == 0x7)
10281 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10282
David S. Miller59e6b432005-05-18 22:50:10 -070010283 /* Set bit 23 to enable PCIX hw bug fix */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010284 tp->dma_rwctrl |= 0x009f0000;
Michael Chan4cf78e42005-07-25 12:29:19 -070010285 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10286 /* 5780 always in PCIX mode */
10287 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070010288 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10289 /* 5714 always in PCIX mode */
10290 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010291 } else {
10292 tp->dma_rwctrl |= 0x001b000f;
10293 }
10294 }
10295
10296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10298 tp->dma_rwctrl &= 0xfffffff0;
10299
10300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10302 /* Remove this if it causes problems for some boards. */
10303 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10304
10305 /* On 5700/5701 chips, we need to set this bit.
10306 * Otherwise the chip will issue cacheline transactions
10307 * to streamable DMA memory with not all the byte
10308 * enables turned on. This is an error on several
10309 * RISC PCI controllers, in particular sparc64.
10310 *
10311 * On 5703/5704 chips, this bit has been reassigned
10312 * a different meaning. In particular, it is used
10313 * on those chips to enable a PCI-X workaround.
10314 */
10315 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10316 }
10317
10318 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10319
10320#if 0
10321 /* Unneeded, already done by tg3_get_invariants. */
10322 tg3_switch_clocks(tp);
10323#endif
10324
10325 ret = 0;
10326 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10327 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10328 goto out;
10329
David S. Miller59e6b432005-05-18 22:50:10 -070010330 /* It is best to perform DMA test with maximum write burst size
10331 * to expose the 5700/5701 write DMA bug.
10332 */
10333 saved_dma_rwctrl = tp->dma_rwctrl;
10334 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10335 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10336
Linus Torvalds1da177e2005-04-16 15:20:36 -070010337 while (1) {
10338 u32 *p = buf, i;
10339
10340 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10341 p[i] = i;
10342
10343 /* Send the buffer to the chip. */
10344 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10345 if (ret) {
10346 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10347 break;
10348 }
10349
10350#if 0
10351 /* validate data reached card RAM correctly. */
10352 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10353 u32 val;
10354 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10355 if (le32_to_cpu(val) != p[i]) {
10356 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10357 /* ret = -ENODEV here? */
10358 }
10359 p[i] = 0;
10360 }
10361#endif
10362 /* Now read it back. */
10363 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10364 if (ret) {
10365 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10366
10367 break;
10368 }
10369
10370 /* Verify it. */
10371 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10372 if (p[i] == i)
10373 continue;
10374
David S. Miller59e6b432005-05-18 22:50:10 -070010375 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10376 DMA_RWCTRL_WRITE_BNDRY_16) {
10377 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010378 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10379 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10380 break;
10381 } else {
10382 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10383 ret = -ENODEV;
10384 goto out;
10385 }
10386 }
10387
10388 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10389 /* Success. */
10390 ret = 0;
10391 break;
10392 }
10393 }
David S. Miller59e6b432005-05-18 22:50:10 -070010394 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10395 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070010396 static struct pci_device_id dma_wait_state_chipsets[] = {
10397 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10398 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10399 { },
10400 };
10401
David S. Miller59e6b432005-05-18 22:50:10 -070010402 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070010403 * now look for chipsets that are known to expose the
10404 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070010405 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070010406 if (pci_dev_present(dma_wait_state_chipsets)) {
10407 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10408 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10409 }
10410 else
10411 /* Safe to use the calculated DMA boundary. */
10412 tp->dma_rwctrl = saved_dma_rwctrl;
10413
David S. Miller59e6b432005-05-18 22:50:10 -070010414 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010416
10417out:
10418 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10419out_nofree:
10420 return ret;
10421}
10422
10423static void __devinit tg3_init_link_config(struct tg3 *tp)
10424{
10425 tp->link_config.advertising =
10426 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10427 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10428 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10429 ADVERTISED_Autoneg | ADVERTISED_MII);
10430 tp->link_config.speed = SPEED_INVALID;
10431 tp->link_config.duplex = DUPLEX_INVALID;
10432 tp->link_config.autoneg = AUTONEG_ENABLE;
10433 netif_carrier_off(tp->dev);
10434 tp->link_config.active_speed = SPEED_INVALID;
10435 tp->link_config.active_duplex = DUPLEX_INVALID;
10436 tp->link_config.phy_is_low_power = 0;
10437 tp->link_config.orig_speed = SPEED_INVALID;
10438 tp->link_config.orig_duplex = DUPLEX_INVALID;
10439 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10440}
10441
10442static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10443{
Michael Chanfdfec1722005-07-25 12:31:48 -070010444 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10445 tp->bufmgr_config.mbuf_read_dma_low_water =
10446 DEFAULT_MB_RDMA_LOW_WATER_5705;
10447 tp->bufmgr_config.mbuf_mac_rx_low_water =
10448 DEFAULT_MB_MACRX_LOW_WATER_5705;
10449 tp->bufmgr_config.mbuf_high_water =
10450 DEFAULT_MB_HIGH_WATER_5705;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010451
Michael Chanfdfec1722005-07-25 12:31:48 -070010452 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10453 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10454 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10455 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10456 tp->bufmgr_config.mbuf_high_water_jumbo =
10457 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10458 } else {
10459 tp->bufmgr_config.mbuf_read_dma_low_water =
10460 DEFAULT_MB_RDMA_LOW_WATER;
10461 tp->bufmgr_config.mbuf_mac_rx_low_water =
10462 DEFAULT_MB_MACRX_LOW_WATER;
10463 tp->bufmgr_config.mbuf_high_water =
10464 DEFAULT_MB_HIGH_WATER;
10465
10466 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10467 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10468 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10469 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10470 tp->bufmgr_config.mbuf_high_water_jumbo =
10471 DEFAULT_MB_HIGH_WATER_JUMBO;
10472 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010473
10474 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10475 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10476}
10477
10478static char * __devinit tg3_phy_string(struct tg3 *tp)
10479{
10480 switch (tp->phy_id & PHY_ID_MASK) {
10481 case PHY_ID_BCM5400: return "5400";
10482 case PHY_ID_BCM5401: return "5401";
10483 case PHY_ID_BCM5411: return "5411";
10484 case PHY_ID_BCM5701: return "5701";
10485 case PHY_ID_BCM5703: return "5703";
10486 case PHY_ID_BCM5704: return "5704";
10487 case PHY_ID_BCM5705: return "5705";
10488 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070010489 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070010490 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070010491 case PHY_ID_BCM5780: return "5780";
Linus Torvalds1da177e2005-04-16 15:20:36 -070010492 case PHY_ID_BCM8002: return "8002/serdes";
10493 case 0: return "serdes";
10494 default: return "unknown";
10495 };
10496}
10497
Michael Chanf9804dd2005-09-27 12:13:10 -070010498static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10499{
10500 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10501 strcpy(str, "PCI Express");
10502 return str;
10503 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10504 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10505
10506 strcpy(str, "PCIX:");
10507
10508 if ((clock_ctrl == 7) ||
10509 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10510 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10511 strcat(str, "133MHz");
10512 else if (clock_ctrl == 0)
10513 strcat(str, "33MHz");
10514 else if (clock_ctrl == 2)
10515 strcat(str, "50MHz");
10516 else if (clock_ctrl == 4)
10517 strcat(str, "66MHz");
10518 else if (clock_ctrl == 6)
10519 strcat(str, "100MHz");
10520 else if (clock_ctrl == 7)
10521 strcat(str, "133MHz");
10522 } else {
10523 strcpy(str, "PCI:");
10524 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10525 strcat(str, "66MHz");
10526 else
10527 strcat(str, "33MHz");
10528 }
10529 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10530 strcat(str, ":32-bit");
10531 else
10532 strcat(str, ":64-bit");
10533 return str;
10534}
10535
Michael Chan8c2dc7e2005-12-19 16:26:02 -080010536static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010537{
10538 struct pci_dev *peer;
10539 unsigned int func, devnr = tp->pdev->devfn & ~7;
10540
10541 for (func = 0; func < 8; func++) {
10542 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10543 if (peer && peer != tp->pdev)
10544 break;
10545 pci_dev_put(peer);
10546 }
Michael Chan16fe9d72005-12-13 21:09:54 -080010547 /* 5704 can be configured in single-port mode, set peer to
10548 * tp->pdev in that case.
10549 */
10550 if (!peer) {
10551 peer = tp->pdev;
10552 return peer;
10553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010554
10555 /*
10556 * We don't need to keep the refcount elevated; there's no way
10557 * to remove one half of this device without removing the other
10558 */
10559 pci_dev_put(peer);
10560
10561 return peer;
10562}
10563
David S. Miller15f98502005-05-18 22:49:26 -070010564static void __devinit tg3_init_coal(struct tg3 *tp)
10565{
10566 struct ethtool_coalesce *ec = &tp->coal;
10567
10568 memset(ec, 0, sizeof(*ec));
10569 ec->cmd = ETHTOOL_GCOALESCE;
10570 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10571 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10572 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10573 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10574 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10575 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10576 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10577 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10578 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10579
10580 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10581 HOSTCC_MODE_CLRTICK_TXBD)) {
10582 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10583 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10584 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10585 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10586 }
Michael Chand244c892005-07-05 14:42:33 -070010587
10588 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10589 ec->rx_coalesce_usecs_irq = 0;
10590 ec->tx_coalesce_usecs_irq = 0;
10591 ec->stats_block_coalesce_usecs = 0;
10592 }
David S. Miller15f98502005-05-18 22:49:26 -070010593}
10594
Linus Torvalds1da177e2005-04-16 15:20:36 -070010595static int __devinit tg3_init_one(struct pci_dev *pdev,
10596 const struct pci_device_id *ent)
10597{
10598 static int tg3_version_printed = 0;
10599 unsigned long tg3reg_base, tg3reg_len;
10600 struct net_device *dev;
10601 struct tg3 *tp;
10602 int i, err, pci_using_dac, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070010603 char str[40];
Linus Torvalds1da177e2005-04-16 15:20:36 -070010604
10605 if (tg3_version_printed++ == 0)
10606 printk(KERN_INFO "%s", version);
10607
10608 err = pci_enable_device(pdev);
10609 if (err) {
10610 printk(KERN_ERR PFX "Cannot enable PCI device, "
10611 "aborting.\n");
10612 return err;
10613 }
10614
10615 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10616 printk(KERN_ERR PFX "Cannot find proper PCI device "
10617 "base address, aborting.\n");
10618 err = -ENODEV;
10619 goto err_out_disable_pdev;
10620 }
10621
10622 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10623 if (err) {
10624 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10625 "aborting.\n");
10626 goto err_out_disable_pdev;
10627 }
10628
10629 pci_set_master(pdev);
10630
10631 /* Find power-management capability. */
10632 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10633 if (pm_cap == 0) {
10634 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10635 "aborting.\n");
10636 err = -EIO;
10637 goto err_out_free_res;
10638 }
10639
10640 /* Configure DMA attributes. */
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020010641 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010642 if (!err) {
10643 pci_using_dac = 1;
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020010644 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010645 if (err < 0) {
10646 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10647 "for consistent allocations\n");
10648 goto err_out_free_res;
10649 }
10650 } else {
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020010651 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010652 if (err) {
10653 printk(KERN_ERR PFX "No usable DMA configuration, "
10654 "aborting.\n");
10655 goto err_out_free_res;
10656 }
10657 pci_using_dac = 0;
10658 }
10659
10660 tg3reg_base = pci_resource_start(pdev, 0);
10661 tg3reg_len = pci_resource_len(pdev, 0);
10662
10663 dev = alloc_etherdev(sizeof(*tp));
10664 if (!dev) {
10665 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10666 err = -ENOMEM;
10667 goto err_out_free_res;
10668 }
10669
10670 SET_MODULE_OWNER(dev);
10671 SET_NETDEV_DEV(dev, &pdev->dev);
10672
10673 if (pci_using_dac)
10674 dev->features |= NETIF_F_HIGHDMA;
10675 dev->features |= NETIF_F_LLTX;
10676#if TG3_VLAN_TAG_USED
10677 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10678 dev->vlan_rx_register = tg3_vlan_rx_register;
10679 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10680#endif
10681
10682 tp = netdev_priv(dev);
10683 tp->pdev = pdev;
10684 tp->dev = dev;
10685 tp->pm_cap = pm_cap;
10686 tp->mac_mode = TG3_DEF_MAC_MODE;
10687 tp->rx_mode = TG3_DEF_RX_MODE;
10688 tp->tx_mode = TG3_DEF_TX_MODE;
10689 tp->mi_mode = MAC_MI_MODE_BASE;
10690 if (tg3_debug > 0)
10691 tp->msg_enable = tg3_debug;
10692 else
10693 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10694
10695 /* The word/byte swap controls here control register access byte
10696 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10697 * setting below.
10698 */
10699 tp->misc_host_ctrl =
10700 MISC_HOST_CTRL_MASK_PCI_INT |
10701 MISC_HOST_CTRL_WORD_SWAP |
10702 MISC_HOST_CTRL_INDIR_ACCESS |
10703 MISC_HOST_CTRL_PCISTATE_RW;
10704
10705 /* The NONFRM (non-frame) byte/word swap controls take effect
10706 * on descriptor entries, anything which isn't packet data.
10707 *
10708 * The StrongARM chips on the board (one for tx, one for rx)
10709 * are running in big-endian mode.
10710 */
10711 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10712 GRC_MODE_WSWAP_NONFRM_DATA);
10713#ifdef __BIG_ENDIAN
10714 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10715#endif
10716 spin_lock_init(&tp->lock);
10717 spin_lock_init(&tp->tx_lock);
10718 spin_lock_init(&tp->indirect_lock);
10719 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10720
10721 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10722 if (tp->regs == 0UL) {
10723 printk(KERN_ERR PFX "Cannot map device registers, "
10724 "aborting.\n");
10725 err = -ENOMEM;
10726 goto err_out_free_dev;
10727 }
10728
10729 tg3_init_link_config(tp);
10730
Linus Torvalds1da177e2005-04-16 15:20:36 -070010731 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10732 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10733 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10734
10735 dev->open = tg3_open;
10736 dev->stop = tg3_close;
10737 dev->get_stats = tg3_get_stats;
10738 dev->set_multicast_list = tg3_set_rx_mode;
10739 dev->set_mac_address = tg3_set_mac_addr;
10740 dev->do_ioctl = tg3_ioctl;
10741 dev->tx_timeout = tg3_tx_timeout;
10742 dev->poll = tg3_poll;
10743 dev->ethtool_ops = &tg3_ethtool_ops;
10744 dev->weight = 64;
10745 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10746 dev->change_mtu = tg3_change_mtu;
10747 dev->irq = pdev->irq;
10748#ifdef CONFIG_NET_POLL_CONTROLLER
10749 dev->poll_controller = tg3_poll_controller;
10750#endif
10751
10752 err = tg3_get_invariants(tp);
10753 if (err) {
10754 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10755 "aborting.\n");
10756 goto err_out_iounmap;
10757 }
10758
Michael Chanfdfec1722005-07-25 12:31:48 -070010759 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010760
10761#if TG3_TSO_SUPPORT != 0
10762 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10763 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10764 }
10765 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10767 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10768 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10769 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10770 } else {
10771 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10772 }
10773
10774 /* TSO is off by default, user can enable using ethtool. */
10775#if 0
10776 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10777 dev->features |= NETIF_F_TSO;
10778#endif
10779
10780#endif
10781
10782 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10783 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10784 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10785 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10786 tp->rx_pending = 63;
10787 }
10788
Michael Chan8c2dc7e2005-12-19 16:26:02 -080010789 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10790 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10791 tp->pdev_peer = tg3_find_peer(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010792
10793 err = tg3_get_device_address(tp);
10794 if (err) {
10795 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10796 "aborting.\n");
10797 goto err_out_iounmap;
10798 }
10799
10800 /*
10801 * Reset chip in case UNDI or EFI driver did not shutdown
10802 * DMA self test will enable WDMAC and we'll see (spurious)
10803 * pending DMA on the PCI bus at that point.
10804 */
10805 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10806 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10807 pci_save_state(tp->pdev);
10808 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Michael Chan944d9802005-05-29 14:57:48 -070010809 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010810 }
10811
10812 err = tg3_test_dma(tp);
10813 if (err) {
10814 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10815 goto err_out_iounmap;
10816 }
10817
10818 /* Tigon3 can do ipv4 only... and some chips have buggy
10819 * checksumming.
10820 */
10821 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10822 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10823 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10824 } else
10825 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10826
10827 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10828 dev->features &= ~NETIF_F_HIGHDMA;
10829
10830 /* flow control autonegotiation is default behavior */
10831 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10832
David S. Miller15f98502005-05-18 22:49:26 -070010833 tg3_init_coal(tp);
10834
David S. Miller7d3f4c92005-08-06 06:35:48 -070010835 /* Now that we have fully setup the chip, save away a snapshot
10836 * of the PCI config space. We need to restore this after
10837 * GRC_MISC_CFG core clock resets and some resume events.
10838 */
10839 pci_save_state(tp->pdev);
10840
Linus Torvalds1da177e2005-04-16 15:20:36 -070010841 err = register_netdev(dev);
10842 if (err) {
10843 printk(KERN_ERR PFX "Cannot register net device, "
10844 "aborting.\n");
10845 goto err_out_iounmap;
10846 }
10847
10848 pci_set_drvdata(pdev, dev);
10849
Michael Chanf9804dd2005-09-27 12:13:10 -070010850 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
Linus Torvalds1da177e2005-04-16 15:20:36 -070010851 dev->name,
10852 tp->board_part_number,
10853 tp->pci_chip_rev_id,
10854 tg3_phy_string(tp),
Michael Chanf9804dd2005-09-27 12:13:10 -070010855 tg3_bus_string(tp, str),
Linus Torvalds1da177e2005-04-16 15:20:36 -070010856 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10857
10858 for (i = 0; i < 6; i++)
10859 printk("%2.2x%c", dev->dev_addr[i],
10860 i == 5 ? '\n' : ':');
10861
10862 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10863 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10864 "TSOcap[%d] \n",
10865 dev->name,
10866 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10867 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10868 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10869 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10870 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10871 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10872 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
David S. Miller59e6b432005-05-18 22:50:10 -070010873 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10874 dev->name, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010875
10876 return 0;
10877
10878err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070010879 if (tp->regs) {
10880 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070010881 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070010882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010883
10884err_out_free_dev:
10885 free_netdev(dev);
10886
10887err_out_free_res:
10888 pci_release_regions(pdev);
10889
10890err_out_disable_pdev:
10891 pci_disable_device(pdev);
10892 pci_set_drvdata(pdev, NULL);
10893 return err;
10894}
10895
10896static void __devexit tg3_remove_one(struct pci_dev *pdev)
10897{
10898 struct net_device *dev = pci_get_drvdata(pdev);
10899
10900 if (dev) {
10901 struct tg3 *tp = netdev_priv(dev);
10902
Michael Chan7faa0062006-02-02 17:29:28 -080010903 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010904 unregister_netdev(dev);
Michael Chan68929142005-08-09 20:17:14 -070010905 if (tp->regs) {
10906 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070010907 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070010908 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010909 free_netdev(dev);
10910 pci_release_regions(pdev);
10911 pci_disable_device(pdev);
10912 pci_set_drvdata(pdev, NULL);
10913 }
10914}
10915
10916static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10917{
10918 struct net_device *dev = pci_get_drvdata(pdev);
10919 struct tg3 *tp = netdev_priv(dev);
10920 int err;
10921
10922 if (!netif_running(dev))
10923 return 0;
10924
Michael Chan7faa0062006-02-02 17:29:28 -080010925 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010926 tg3_netif_stop(tp);
10927
10928 del_timer_sync(&tp->timer);
10929
David S. Millerf47c11e2005-06-24 20:18:35 -070010930 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010931 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070010932 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010933
10934 netif_device_detach(dev);
10935
David S. Millerf47c11e2005-06-24 20:18:35 -070010936 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070010937 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080010938 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070010939 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010940
10941 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10942 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070010943 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010944
Michael Chan6a9eba12005-12-13 21:08:58 -080010945 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010946 tg3_init_hw(tp);
10947
10948 tp->timer.expires = jiffies + tp->timer_offset;
10949 add_timer(&tp->timer);
10950
10951 netif_device_attach(dev);
10952 tg3_netif_start(tp);
10953
David S. Millerf47c11e2005-06-24 20:18:35 -070010954 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010955 }
10956
10957 return err;
10958}
10959
10960static int tg3_resume(struct pci_dev *pdev)
10961{
10962 struct net_device *dev = pci_get_drvdata(pdev);
10963 struct tg3 *tp = netdev_priv(dev);
10964 int err;
10965
10966 if (!netif_running(dev))
10967 return 0;
10968
10969 pci_restore_state(tp->pdev);
10970
10971 err = tg3_set_power_state(tp, 0);
10972 if (err)
10973 return err;
10974
10975 netif_device_attach(dev);
10976
David S. Millerf47c11e2005-06-24 20:18:35 -070010977 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010978
Michael Chan6a9eba12005-12-13 21:08:58 -080010979 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010980 tg3_init_hw(tp);
10981
10982 tp->timer.expires = jiffies + tp->timer_offset;
10983 add_timer(&tp->timer);
10984
Linus Torvalds1da177e2005-04-16 15:20:36 -070010985 tg3_netif_start(tp);
10986
David S. Millerf47c11e2005-06-24 20:18:35 -070010987 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010988
10989 return 0;
10990}
10991
10992static struct pci_driver tg3_driver = {
10993 .name = DRV_MODULE_NAME,
10994 .id_table = tg3_pci_tbl,
10995 .probe = tg3_init_one,
10996 .remove = __devexit_p(tg3_remove_one),
10997 .suspend = tg3_suspend,
10998 .resume = tg3_resume
10999};
11000
11001static int __init tg3_init(void)
11002{
11003 return pci_module_init(&tg3_driver);
11004}
11005
11006static void __exit tg3_cleanup(void)
11007{
11008 pci_unregister_driver(&tg3_driver);
11009}
11010
11011module_init(tg3_init);
11012module_exit(tg3_cleanup);