blob: 9e61df607413d8ff2409d0cde117051f317a10f0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020027#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070040#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020041#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include <net/checksum.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC64
51#include <asm/idprom.h>
52#include <asm/oplib.h>
53#include <asm/pbm.h>
54#endif
55
56#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57#define TG3_VLAN_TAG_USED 1
58#else
59#define TG3_VLAN_TAG_USED 0
60#endif
61
62#ifdef NETIF_F_TSO
63#define TG3_TSO_SUPPORT 1
64#else
65#define TG3_TSO_SUPPORT 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
David S. Millerf49639e2006-06-09 11:58:36 -070072#define DRV_MODULE_VERSION "3.59"
73#define DRV_MODULE_RELDATE "June 8, 2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define TX_BUFFS_AVAIL(TP) \
Michael Chan51b91462005-09-01 17:41:28 -0700128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
Michael Chan4cafd3f2005-05-29 14:56:34 -0700141#define TG3_NUM_TEST 6
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
John W. Linville6e9017a2005-04-21 16:58:56 -0700214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
John W. Linvilleaf2bcd92005-04-21 16:57:50 -0700215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Xose Vazquez Perezd8659252005-05-23 12:54:51 -0700216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand9ab5ad2006-03-20 22:27:35 -0800224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chanaf36e6b2006-03-23 01:28:06 -0800228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chan30b6c282006-05-26 17:44:45 -0700232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand9ab5ad2006-03-20 22:27:35 -0800234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chana4e2b342005-10-26 15:46:52 -0700238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand4d2c552006-03-20 17:47:20 -0800240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chana4e2b342005-10-26 15:46:52 -0700242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand4d2c552006-03-20 17:47:20 -0800244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chan4cf78e42005-07-25 12:29:19 -0700246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
266 { 0, }
267};
268
269MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
270
271static struct {
272 const char string[ETH_GSTRING_LEN];
273} ethtool_stats_keys[TG3_NUM_STATS] = {
274 { "rx_octets" },
275 { "rx_fragments" },
276 { "rx_ucast_packets" },
277 { "rx_mcast_packets" },
278 { "rx_bcast_packets" },
279 { "rx_fcs_errors" },
280 { "rx_align_errors" },
281 { "rx_xon_pause_rcvd" },
282 { "rx_xoff_pause_rcvd" },
283 { "rx_mac_ctrl_rcvd" },
284 { "rx_xoff_entered" },
285 { "rx_frame_too_long_errors" },
286 { "rx_jabbers" },
287 { "rx_undersize_packets" },
288 { "rx_in_length_errors" },
289 { "rx_out_length_errors" },
290 { "rx_64_or_less_octet_packets" },
291 { "rx_65_to_127_octet_packets" },
292 { "rx_128_to_255_octet_packets" },
293 { "rx_256_to_511_octet_packets" },
294 { "rx_512_to_1023_octet_packets" },
295 { "rx_1024_to_1522_octet_packets" },
296 { "rx_1523_to_2047_octet_packets" },
297 { "rx_2048_to_4095_octet_packets" },
298 { "rx_4096_to_8191_octet_packets" },
299 { "rx_8192_to_9022_octet_packets" },
300
301 { "tx_octets" },
302 { "tx_collisions" },
303
304 { "tx_xon_sent" },
305 { "tx_xoff_sent" },
306 { "tx_flow_control" },
307 { "tx_mac_errors" },
308 { "tx_single_collisions" },
309 { "tx_mult_collisions" },
310 { "tx_deferred" },
311 { "tx_excessive_collisions" },
312 { "tx_late_collisions" },
313 { "tx_collide_2times" },
314 { "tx_collide_3times" },
315 { "tx_collide_4times" },
316 { "tx_collide_5times" },
317 { "tx_collide_6times" },
318 { "tx_collide_7times" },
319 { "tx_collide_8times" },
320 { "tx_collide_9times" },
321 { "tx_collide_10times" },
322 { "tx_collide_11times" },
323 { "tx_collide_12times" },
324 { "tx_collide_13times" },
325 { "tx_collide_14times" },
326 { "tx_collide_15times" },
327 { "tx_ucast_packets" },
328 { "tx_mcast_packets" },
329 { "tx_bcast_packets" },
330 { "tx_carrier_sense_errors" },
331 { "tx_discards" },
332 { "tx_errors" },
333
334 { "dma_writeq_full" },
335 { "dma_write_prioq_full" },
336 { "rxbds_empty" },
337 { "rx_discards" },
338 { "rx_errors" },
339 { "rx_threshold_hit" },
340
341 { "dma_readq_full" },
342 { "dma_read_prioq_full" },
343 { "tx_comp_queue_full" },
344
345 { "ring_set_send_prod_index" },
346 { "ring_status_update" },
347 { "nic_irqs" },
348 { "nic_avoided_irqs" },
349 { "nic_tx_threshold_hit" }
350};
351
Michael Chan4cafd3f2005-05-29 14:56:34 -0700352static struct {
353 const char string[ETH_GSTRING_LEN];
354} ethtool_test_keys[TG3_NUM_TEST] = {
355 { "nvram test (online) " },
356 { "link test (online) " },
357 { "register test (offline)" },
358 { "memory test (offline)" },
359 { "loopback test (offline)" },
360 { "interrupt test (offline)" },
361};
362
Michael Chanb401e9e2005-12-19 16:27:04 -0800363static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364{
365 writel(val, tp->regs + off);
366}
367
368static u32 tg3_read32(struct tg3 *tp, u32 off)
369{
370 return (readl(tp->regs + off));
371}
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
374{
Michael Chan68929142005-08-09 20:17:14 -0700375 unsigned long flags;
376
377 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700378 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
379 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700380 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700381}
382
383static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384{
385 writel(val, tp->regs + off);
386 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
Michael Chan68929142005-08-09 20:17:14 -0700389static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
390{
391 unsigned long flags;
392 u32 val;
393
394 spin_lock_irqsave(&tp->indirect_lock, flags);
395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
396 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397 spin_unlock_irqrestore(&tp->indirect_lock, flags);
398 return val;
399}
400
401static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
402{
403 unsigned long flags;
404
405 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
406 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
407 TG3_64BIT_REG_LOW, val);
408 return;
409 }
410 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
411 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
412 TG3_64BIT_REG_LOW, val);
413 return;
414 }
415
416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
418 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
419 spin_unlock_irqrestore(&tp->indirect_lock, flags);
420
421 /* In indirect mode when disabling interrupts, we also need
422 * to clear the interrupt bit in the GRC local ctrl register.
423 */
424 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425 (val == 0x1)) {
426 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
427 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
428 }
429}
430
431static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
432{
433 unsigned long flags;
434 u32 val;
435
436 spin_lock_irqsave(&tp->indirect_lock, flags);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
439 spin_unlock_irqrestore(&tp->indirect_lock, flags);
440 return val;
441}
442
Michael Chanb401e9e2005-12-19 16:27:04 -0800443/* usec_wait specifies the wait time in usec when writing to certain registers
444 * where it is unsafe to read back the register without some delay.
445 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
446 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447 */
448static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
Michael Chanb401e9e2005-12-19 16:27:04 -0800450 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
451 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452 /* Non-posted methods */
453 tp->write32(tp, off, val);
454 else {
455 /* Posted method */
456 tg3_write32(tp, off, val);
457 if (usec_wait)
458 udelay(usec_wait);
459 tp->read32(tp, off);
460 }
461 /* Wait again after the read for the posted method to guarantee that
462 * the wait time is met.
463 */
464 if (usec_wait)
465 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
467
Michael Chan09ee9292005-08-09 20:17:00 -0700468static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469{
470 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700471 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
472 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700474}
475
Michael Chan20094932005-08-09 20:16:32 -0700476static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
478 void __iomem *mbox = tp->regs + off;
479 writel(val, mbox);
480 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481 writel(val, mbox);
482 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
483 readl(mbox);
484}
485
Michael Chan20094932005-08-09 20:16:32 -0700486#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700487#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700488#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
489#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700490#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700491
492#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800493#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
494#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700495#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
498{
Michael Chan68929142005-08-09 20:17:14 -0700499 unsigned long flags;
500
501 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700502 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Michael Chanbbadf502006-04-06 21:46:34 -0700506 /* Always leave this as zero. */
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 } else {
509 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510 tw32_f(TG3PCI_MEM_WIN_DATA, val);
511
512 /* Always leave this as zero. */
513 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 }
Michael Chan68929142005-08-09 20:17:14 -0700515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516}
517
518static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
519{
Michael Chan68929142005-08-09 20:17:14 -0700520 unsigned long flags;
521
522 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700523 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
525 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Michael Chanbbadf502006-04-06 21:46:34 -0700527 /* Always leave this as zero. */
528 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529 } else {
530 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
531 *val = tr32(TG3PCI_MEM_WIN_DATA);
532
533 /* Always leave this as zero. */
534 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535 }
Michael Chan68929142005-08-09 20:17:14 -0700536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
539static void tg3_disable_ints(struct tg3 *tp)
540{
541 tw32(TG3PCI_MISC_HOST_CTRL,
542 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544}
545
546static inline void tg3_cond_int(struct tg3 *tp)
547{
Michael Chan38f38432005-09-05 17:53:32 -0700548 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
549 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
551}
552
553static void tg3_enable_ints(struct tg3 *tp)
554{
Michael Chanbbe832c2005-06-24 20:20:04 -0700555 tp->irq_sync = 0;
556 wmb();
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 tw32(TG3PCI_MISC_HOST_CTRL,
559 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700560 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800562 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
563 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 tg3_cond_int(tp);
566}
567
Michael Chan04237dd2005-04-25 15:17:17 -0700568static inline unsigned int tg3_has_work(struct tg3 *tp)
569{
570 struct tg3_hw_status *sblk = tp->hw_status;
571 unsigned int work_exists = 0;
572
573 /* check for phy events */
574 if (!(tp->tg3_flags &
575 (TG3_FLAG_USE_LINKCHG_REG |
576 TG3_FLAG_POLL_SERDES))) {
577 if (sblk->status & SD_STATUS_LINK_CHG)
578 work_exists = 1;
579 }
580 /* check for RX/TX work to do */
581 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
582 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
583 work_exists = 1;
584
585 return work_exists;
586}
587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700589 * similar to tg3_enable_ints, but it accurately determines whether there
590 * is new work pending and can return without flushing the PIO write
591 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 */
593static void tg3_restart_ints(struct tg3 *tp)
594{
David S. Millerfac9b832005-05-18 22:46:34 -0700595 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
596 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 mmiowb();
598
David S. Millerfac9b832005-05-18 22:46:34 -0700599 /* When doing tagged status, this work check is unnecessary.
600 * The last_tag we write above tells the chip which piece of
601 * work we've completed.
602 */
603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700605 tw32(HOSTCC_MODE, tp->coalesce_mode |
606 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608
609static inline void tg3_netif_stop(struct tg3 *tp)
610{
Michael Chanbbe832c2005-06-24 20:20:04 -0700611 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 netif_poll_disable(tp->dev);
613 netif_tx_disable(tp->dev);
614}
615
616static inline void tg3_netif_start(struct tg3 *tp)
617{
618 netif_wake_queue(tp->dev);
619 /* NOTE: unconditional netif_wake_queue is only appropriate
620 * so long as all callers are assured to have free tx slots
621 * (such as after tg3_init_hw)
622 */
623 netif_poll_enable(tp->dev);
David S. Millerf47c11e2005-06-24 20:18:35 -0700624 tp->hw_status->status |= SD_STATUS_UPDATED;
625 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
628static void tg3_switch_clocks(struct tg3 *tp)
629{
630 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
631 u32 orig_clock_ctrl;
632
Michael Chana4e2b342005-10-26 15:46:52 -0700633 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -0700634 return;
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 orig_clock_ctrl = clock_ctrl;
637 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
638 CLOCK_CTRL_CLKRUN_OENABLE |
639 0x1f);
640 tp->pci_clock_ctrl = clock_ctrl;
641
642 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
643 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800644 tw32_wait_f(TG3PCI_CLOCK_CTRL,
645 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 }
647 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800648 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649 clock_ctrl |
650 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651 40);
652 tw32_wait_f(TG3PCI_CLOCK_CTRL,
653 clock_ctrl | (CLOCK_CTRL_ALTCLK),
654 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800656 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657}
658
659#define PHY_BUSY_LOOPS 5000
660
661static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
662{
663 u32 frame_val;
664 unsigned int loops;
665 int ret;
666
667 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668 tw32_f(MAC_MI_MODE,
669 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
670 udelay(80);
671 }
672
673 *val = 0x0;
674
675 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
676 MI_COM_PHY_ADDR_MASK);
677 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
678 MI_COM_REG_ADDR_MASK);
679 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680
681 tw32_f(MAC_MI_COM, frame_val);
682
683 loops = PHY_BUSY_LOOPS;
684 while (loops != 0) {
685 udelay(10);
686 frame_val = tr32(MAC_MI_COM);
687
688 if ((frame_val & MI_COM_BUSY) == 0) {
689 udelay(5);
690 frame_val = tr32(MAC_MI_COM);
691 break;
692 }
693 loops -= 1;
694 }
695
696 ret = -EBUSY;
697 if (loops != 0) {
698 *val = frame_val & MI_COM_DATA_MASK;
699 ret = 0;
700 }
701
702 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703 tw32_f(MAC_MI_MODE, tp->mi_mode);
704 udelay(80);
705 }
706
707 return ret;
708}
709
710static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
711{
712 u32 frame_val;
713 unsigned int loops;
714 int ret;
715
716 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 tw32_f(MAC_MI_MODE,
718 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
719 udelay(80);
720 }
721
722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723 MI_COM_PHY_ADDR_MASK);
724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725 MI_COM_REG_ADDR_MASK);
726 frame_val |= (val & MI_COM_DATA_MASK);
727 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735 if ((frame_val & MI_COM_BUSY) == 0) {
736 udelay(5);
737 frame_val = tr32(MAC_MI_COM);
738 break;
739 }
740 loops -= 1;
741 }
742
743 ret = -EBUSY;
744 if (loops != 0)
745 ret = 0;
746
747 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748 tw32_f(MAC_MI_MODE, tp->mi_mode);
749 udelay(80);
750 }
751
752 return ret;
753}
754
755static void tg3_phy_set_wirespeed(struct tg3 *tp)
756{
757 u32 val;
758
759 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
760 return;
761
762 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
763 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
764 tg3_writephy(tp, MII_TG3_AUX_CTRL,
765 (val | (1 << 15) | (1 << 4)));
766}
767
768static int tg3_bmcr_reset(struct tg3 *tp)
769{
770 u32 phy_control;
771 int limit, err;
772
773 /* OK, reset it, and poll the BMCR_RESET bit until it
774 * clears or we time out.
775 */
776 phy_control = BMCR_RESET;
777 err = tg3_writephy(tp, MII_BMCR, phy_control);
778 if (err != 0)
779 return -EBUSY;
780
781 limit = 5000;
782 while (limit--) {
783 err = tg3_readphy(tp, MII_BMCR, &phy_control);
784 if (err != 0)
785 return -EBUSY;
786
787 if ((phy_control & BMCR_RESET) == 0) {
788 udelay(40);
789 break;
790 }
791 udelay(10);
792 }
793 if (limit <= 0)
794 return -EBUSY;
795
796 return 0;
797}
798
799static int tg3_wait_macro_done(struct tg3 *tp)
800{
801 int limit = 100;
802
803 while (limit--) {
804 u32 tmp32;
805
806 if (!tg3_readphy(tp, 0x16, &tmp32)) {
807 if ((tmp32 & 0x1000) == 0)
808 break;
809 }
810 }
811 if (limit <= 0)
812 return -EBUSY;
813
814 return 0;
815}
816
817static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818{
819 static const u32 test_pat[4][6] = {
820 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
821 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
822 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
823 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
824 };
825 int chan;
826
827 for (chan = 0; chan < 4; chan++) {
828 int i;
829
830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831 (chan * 0x2000) | 0x0200);
832 tg3_writephy(tp, 0x16, 0x0002);
833
834 for (i = 0; i < 6; i++)
835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
836 test_pat[chan][i]);
837
838 tg3_writephy(tp, 0x16, 0x0202);
839 if (tg3_wait_macro_done(tp)) {
840 *resetp = 1;
841 return -EBUSY;
842 }
843
844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
845 (chan * 0x2000) | 0x0200);
846 tg3_writephy(tp, 0x16, 0x0082);
847 if (tg3_wait_macro_done(tp)) {
848 *resetp = 1;
849 return -EBUSY;
850 }
851
852 tg3_writephy(tp, 0x16, 0x0802);
853 if (tg3_wait_macro_done(tp)) {
854 *resetp = 1;
855 return -EBUSY;
856 }
857
858 for (i = 0; i < 6; i += 2) {
859 u32 low, high;
860
861 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
862 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
863 tg3_wait_macro_done(tp)) {
864 *resetp = 1;
865 return -EBUSY;
866 }
867 low &= 0x7fff;
868 high &= 0x000f;
869 if (low != test_pat[chan][i] ||
870 high != test_pat[chan][i+1]) {
871 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
874
875 return -EBUSY;
876 }
877 }
878 }
879
880 return 0;
881}
882
883static int tg3_phy_reset_chanpat(struct tg3 *tp)
884{
885 int chan;
886
887 for (chan = 0; chan < 4; chan++) {
888 int i;
889
890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
891 (chan * 0x2000) | 0x0200);
892 tg3_writephy(tp, 0x16, 0x0002);
893 for (i = 0; i < 6; i++)
894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
895 tg3_writephy(tp, 0x16, 0x0202);
896 if (tg3_wait_macro_done(tp))
897 return -EBUSY;
898 }
899
900 return 0;
901}
902
903static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904{
905 u32 reg32, phy9_orig;
906 int retries, do_phy_reset, err;
907
908 retries = 10;
909 do_phy_reset = 1;
910 do {
911 if (do_phy_reset) {
912 err = tg3_bmcr_reset(tp);
913 if (err)
914 return err;
915 do_phy_reset = 0;
916 }
917
918 /* Disable transmitter and interrupt. */
919 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
920 continue;
921
922 reg32 |= 0x3000;
923 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924
925 /* Set full-duplex, 1000 mbps. */
926 tg3_writephy(tp, MII_BMCR,
927 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928
929 /* Set to master mode. */
930 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
931 continue;
932
933 tg3_writephy(tp, MII_TG3_CTRL,
934 (MII_TG3_CTRL_AS_MASTER |
935 MII_TG3_CTRL_ENABLE_AS_MASTER));
936
937 /* Enable SM_DSP_CLOCK and 6dB. */
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939
940 /* Block the PHY control access. */
941 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
942 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943
944 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
945 if (!err)
946 break;
947 } while (--retries);
948
949 err = tg3_phy_reset_chanpat(tp);
950 if (err)
951 return err;
952
953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
954 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955
956 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
957 tg3_writephy(tp, 0x16, 0x0000);
958
959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
961 /* Set Extended packet length bit for jumbo frames */
962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
963 }
964 else {
965 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
966 }
967
968 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969
970 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
971 reg32 &= ~0x3000;
972 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
973 } else if (!err)
974 err = -EBUSY;
975
976 return err;
977}
978
Michael Chanc8e1e822006-04-29 18:55:17 -0700979static void tg3_link_report(struct tg3 *);
980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981/* This will reset the tigon3 PHY if there is no valid
982 * link unless the FORCE argument is non-zero.
983 */
984static int tg3_phy_reset(struct tg3 *tp)
985{
986 u32 phy_status;
987 int err;
988
989 err = tg3_readphy(tp, MII_BMSR, &phy_status);
990 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
991 if (err != 0)
992 return -EBUSY;
993
Michael Chanc8e1e822006-04-29 18:55:17 -0700994 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
995 netif_carrier_off(tp->dev);
996 tg3_link_report(tp);
997 }
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1002 err = tg3_phy_reset_5703_4_5(tp);
1003 if (err)
1004 return err;
1005 goto out;
1006 }
1007
1008 err = tg3_bmcr_reset(tp);
1009 if (err)
1010 return err;
1011
1012out:
1013 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1014 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1015 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1016 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 }
1021 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1022 tg3_writephy(tp, 0x1c, 0x8d68);
1023 tg3_writephy(tp, 0x1c, 0x8d68);
1024 }
1025 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1026 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1027 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1028 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1033 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034 }
Michael Chanc424cb22006-04-29 18:56:34 -07001035 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1036 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1037 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1038 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1039 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 /* Set Extended packet length bit (bit 14) on all chips that */
1042 /* support jumbo frames */
1043 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1044 /* Cannot do read-modify-write on 5401 */
1045 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001046 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 u32 phy_reg;
1048
1049 /* Set bit 14 with read-modify-write to preserve other bits */
1050 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1051 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1053 }
1054
1055 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1056 * jumbo frames transmission.
1057 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001058 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 u32 phy_reg;
1060
1061 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1063 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1064 }
1065
1066 tg3_phy_set_wirespeed(tp);
1067 return 0;
1068}
1069
1070static void tg3_frob_aux_power(struct tg3 *tp)
1071{
1072 struct tg3 *tp_peer = tp;
1073
1074 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1075 return;
1076
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001077 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1079 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001081 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001082 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001083 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001084 tp_peer = tp;
1085 else
1086 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001087 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001090 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1092 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001095 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096 (GRC_LCLCTRL_GPIO_OE0 |
1097 GRC_LCLCTRL_GPIO_OE1 |
1098 GRC_LCLCTRL_GPIO_OE2 |
1099 GRC_LCLCTRL_GPIO_OUTPUT0 |
1100 GRC_LCLCTRL_GPIO_OUTPUT1),
1101 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 } else {
1103 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001104 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
1106 if (tp_peer != tp &&
1107 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1108 return;
1109
Michael Chandc56b7d2005-12-19 16:26:28 -08001110 /* Workaround to prevent overdrawing Amps. */
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112 ASIC_REV_5714) {
1113 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001116 }
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 /* On 5753 and variants, GPIO2 cannot be used. */
1119 no_gpio2 = tp->nic_sram_data_cfg &
1120 NIC_SRAM_DATA_CFG_NO_GPIO2;
1121
Michael Chandc56b7d2005-12-19 16:26:28 -08001122 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 GRC_LCLCTRL_GPIO_OE1 |
1124 GRC_LCLCTRL_GPIO_OE2 |
1125 GRC_LCLCTRL_GPIO_OUTPUT1 |
1126 GRC_LCLCTRL_GPIO_OUTPUT2;
1127 if (no_gpio2) {
1128 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1129 GRC_LCLCTRL_GPIO_OUTPUT2);
1130 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135
Michael Chanb401e9e2005-12-19 16:27:04 -08001136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1137 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
1139 if (!no_gpio2) {
1140 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
1144 }
1145 } else {
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1147 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1148 if (tp_peer != tp &&
1149 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1150 return;
1151
Michael Chanb401e9e2005-12-19 16:27:04 -08001152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153 (GRC_LCLCTRL_GPIO_OE1 |
1154 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Michael Chanb401e9e2005-12-19 16:27:04 -08001156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1157 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
Michael Chanb401e9e2005-12-19 16:27:04 -08001159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160 (GRC_LCLCTRL_GPIO_OE1 |
1161 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
1163 }
1164}
1165
1166static int tg3_setup_phy(struct tg3 *, int);
1167
1168#define RESET_KIND_SHUTDOWN 0
1169#define RESET_KIND_INIT 1
1170#define RESET_KIND_SUSPEND 2
1171
1172static void tg3_write_sig_post_reset(struct tg3 *, int);
1173static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001174static int tg3_nvram_lock(struct tg3 *);
1175static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Michael Chan15c3b692006-03-22 01:06:52 -08001177static void tg3_power_down_phy(struct tg3 *tp)
1178{
1179 /* The PHY should not be powered down on some chips because
1180 * of bugs.
1181 */
1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186 return;
1187 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1188}
1189
Michael Chanbc1c7562006-03-20 17:48:03 -08001190static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191{
1192 u32 misc_host_ctrl;
1193 u16 power_control, power_caps;
1194 int pm = tp->pm_cap;
1195
1196 /* Make sure register accesses (indirect or otherwise)
1197 * will function correctly.
1198 */
1199 pci_write_config_dword(tp->pdev,
1200 TG3PCI_MISC_HOST_CTRL,
1201 tp->misc_host_ctrl);
1202
1203 pci_read_config_word(tp->pdev,
1204 pm + PCI_PM_CTRL,
1205 &power_control);
1206 power_control |= PCI_PM_CTRL_PME_STATUS;
1207 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1208 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08001209 case PCI_D0:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 power_control |= 0;
1211 pci_write_config_word(tp->pdev,
1212 pm + PCI_PM_CTRL,
1213 power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001214 udelay(100); /* Delay after power state change */
1215
1216 /* Switch out of Vaux if it is not a LOM */
Michael Chanb401e9e2005-12-19 16:27:04 -08001217 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 return 0;
1221
Michael Chanbc1c7562006-03-20 17:48:03 -08001222 case PCI_D1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 power_control |= 1;
1224 break;
1225
Michael Chanbc1c7562006-03-20 17:48:03 -08001226 case PCI_D2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 power_control |= 2;
1228 break;
1229
Michael Chanbc1c7562006-03-20 17:48:03 -08001230 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 power_control |= 3;
1232 break;
1233
1234 default:
1235 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236 "requested.\n",
1237 tp->dev->name, state);
1238 return -EINVAL;
1239 };
1240
1241 power_control |= PCI_PM_CTRL_PME_ENABLE;
1242
1243 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244 tw32(TG3PCI_MISC_HOST_CTRL,
1245 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246
1247 if (tp->link_config.phy_is_low_power == 0) {
1248 tp->link_config.phy_is_low_power = 1;
1249 tp->link_config.orig_speed = tp->link_config.speed;
1250 tp->link_config.orig_duplex = tp->link_config.duplex;
1251 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1252 }
1253
Michael Chan747e8f82005-07-25 12:33:22 -07001254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 tp->link_config.speed = SPEED_10;
1256 tp->link_config.duplex = DUPLEX_HALF;
1257 tp->link_config.autoneg = AUTONEG_ENABLE;
1258 tg3_setup_phy(tp, 0);
1259 }
1260
Michael Chan6921d202005-12-13 21:15:53 -08001261 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1262 int i;
1263 u32 val;
1264
1265 for (i = 0; i < 200; i++) {
1266 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1267 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1268 break;
1269 msleep(1);
1270 }
1271 }
1272 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1273 WOL_DRV_STATE_SHUTDOWN |
1274 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277
1278 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1279 u32 mac_mode;
1280
1281 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1282 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1283 udelay(40);
1284
1285 mac_mode = MAC_MODE_PORT_MODE_MII;
1286
1287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1288 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1289 mac_mode |= MAC_MODE_LINK_POLARITY;
1290 } else {
1291 mac_mode = MAC_MODE_PORT_MODE_TBI;
1292 }
1293
John W. Linvillecbf46852005-04-21 17:01:29 -07001294 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 tw32(MAC_LED_CTRL, tp->led_ctrl);
1296
1297 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1298 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1299 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300
1301 tw32_f(MAC_MODE, mac_mode);
1302 udelay(100);
1303
1304 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1305 udelay(10);
1306 }
1307
1308 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1309 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1311 u32 base_val;
1312
1313 base_val = tp->pci_clock_ctrl;
1314 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1315 CLOCK_CTRL_TXCLK_DISABLE);
1316
Michael Chanb401e9e2005-12-19 16:27:04 -08001317 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1318 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chana4e2b342005-10-26 15:46:52 -07001319 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chan4cf78e42005-07-25 12:29:19 -07001320 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07001321 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1323 u32 newbits1, newbits2;
1324
1325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1327 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1328 CLOCK_CTRL_TXCLK_DISABLE |
1329 CLOCK_CTRL_ALTCLK);
1330 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1331 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1332 newbits1 = CLOCK_CTRL_625_CORE;
1333 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334 } else {
1335 newbits1 = CLOCK_CTRL_ALTCLK;
1336 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1337 }
1338
Michael Chanb401e9e2005-12-19 16:27:04 -08001339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1340 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
Michael Chanb401e9e2005-12-19 16:27:04 -08001342 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1343 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
1345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1346 u32 newbits3;
1347
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1351 CLOCK_CTRL_TXCLK_DISABLE |
1352 CLOCK_CTRL_44MHZ_CORE);
1353 } else {
1354 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1355 }
1356
Michael Chanb401e9e2005-12-19 16:27:04 -08001357 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1358 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 }
1360 }
1361
Michael Chan6921d202005-12-13 21:15:53 -08001362 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1363 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1364 /* Turn off the PHY */
1365 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1366 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1367 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1368 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
Michael Chan15c3b692006-03-22 01:06:52 -08001369 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001370 }
1371 }
1372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 tg3_frob_aux_power(tp);
1374
1375 /* Workaround for unstable PLL clock */
1376 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378 u32 val = tr32(0x7d00);
1379
1380 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08001382 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08001383 int err;
1384
1385 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08001387 if (!err)
1388 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001389 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 }
1391
Michael Chanbbadf502006-04-06 21:46:34 -07001392 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 /* Finally, set the new power state. */
1395 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001396 udelay(100); /* Delay after power state change */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399}
1400
1401static void tg3_link_report(struct tg3 *tp)
1402{
1403 if (!netif_carrier_ok(tp->dev)) {
1404 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405 } else {
1406 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407 tp->dev->name,
1408 (tp->link_config.active_speed == SPEED_1000 ?
1409 1000 :
1410 (tp->link_config.active_speed == SPEED_100 ?
1411 100 : 10)),
1412 (tp->link_config.active_duplex == DUPLEX_FULL ?
1413 "full" : "half"));
1414
1415 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1416 "%s for RX.\n",
1417 tp->dev->name,
1418 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1420 }
1421}
1422
1423static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424{
1425 u32 new_tg3_flags = 0;
1426 u32 old_rx_mode = tp->rx_mode;
1427 u32 old_tx_mode = tp->tx_mode;
1428
1429 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
Michael Chan747e8f82005-07-25 12:33:22 -07001430
1431 /* Convert 1000BaseX flow control bits to 1000BaseT
1432 * bits before resolving flow control.
1433 */
1434 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436 ADVERTISE_PAUSE_ASYM);
1437 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438
1439 if (local_adv & ADVERTISE_1000XPAUSE)
1440 local_adv |= ADVERTISE_PAUSE_CAP;
1441 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442 local_adv |= ADVERTISE_PAUSE_ASYM;
1443 if (remote_adv & LPA_1000XPAUSE)
1444 remote_adv |= LPA_PAUSE_CAP;
1445 if (remote_adv & LPA_1000XPAUSE_ASYM)
1446 remote_adv |= LPA_PAUSE_ASYM;
1447 }
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451 if (remote_adv & LPA_PAUSE_CAP)
1452 new_tg3_flags |=
1453 (TG3_FLAG_RX_PAUSE |
1454 TG3_FLAG_TX_PAUSE);
1455 else if (remote_adv & LPA_PAUSE_ASYM)
1456 new_tg3_flags |=
1457 (TG3_FLAG_RX_PAUSE);
1458 } else {
1459 if (remote_adv & LPA_PAUSE_CAP)
1460 new_tg3_flags |=
1461 (TG3_FLAG_RX_PAUSE |
1462 TG3_FLAG_TX_PAUSE);
1463 }
1464 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465 if ((remote_adv & LPA_PAUSE_CAP) &&
1466 (remote_adv & LPA_PAUSE_ASYM))
1467 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1468 }
1469
1470 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471 tp->tg3_flags |= new_tg3_flags;
1472 } else {
1473 new_tg3_flags = tp->tg3_flags;
1474 }
1475
1476 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478 else
1479 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480
1481 if (old_rx_mode != tp->rx_mode) {
1482 tw32_f(MAC_RX_MODE, tp->rx_mode);
1483 }
1484
1485 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487 else
1488 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489
1490 if (old_tx_mode != tp->tx_mode) {
1491 tw32_f(MAC_TX_MODE, tp->tx_mode);
1492 }
1493}
1494
1495static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496{
1497 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498 case MII_TG3_AUX_STAT_10HALF:
1499 *speed = SPEED_10;
1500 *duplex = DUPLEX_HALF;
1501 break;
1502
1503 case MII_TG3_AUX_STAT_10FULL:
1504 *speed = SPEED_10;
1505 *duplex = DUPLEX_FULL;
1506 break;
1507
1508 case MII_TG3_AUX_STAT_100HALF:
1509 *speed = SPEED_100;
1510 *duplex = DUPLEX_HALF;
1511 break;
1512
1513 case MII_TG3_AUX_STAT_100FULL:
1514 *speed = SPEED_100;
1515 *duplex = DUPLEX_FULL;
1516 break;
1517
1518 case MII_TG3_AUX_STAT_1000HALF:
1519 *speed = SPEED_1000;
1520 *duplex = DUPLEX_HALF;
1521 break;
1522
1523 case MII_TG3_AUX_STAT_1000FULL:
1524 *speed = SPEED_1000;
1525 *duplex = DUPLEX_FULL;
1526 break;
1527
1528 default:
1529 *speed = SPEED_INVALID;
1530 *duplex = DUPLEX_INVALID;
1531 break;
1532 };
1533}
1534
1535static void tg3_phy_copper_begin(struct tg3 *tp)
1536{
1537 u32 new_adv;
1538 int i;
1539
1540 if (tp->link_config.phy_is_low_power) {
1541 /* Entering low power mode. Disable gigabit and
1542 * 100baseT advertisements.
1543 */
1544 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545
1546 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1547 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1548 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1549 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550
1551 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552 } else if (tp->link_config.speed == SPEED_INVALID) {
1553 tp->link_config.advertising =
1554 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1555 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1556 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1557 ADVERTISED_Autoneg | ADVERTISED_MII);
1558
1559 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1560 tp->link_config.advertising &=
1561 ~(ADVERTISED_1000baseT_Half |
1562 ADVERTISED_1000baseT_Full);
1563
1564 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1565 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1566 new_adv |= ADVERTISE_10HALF;
1567 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1568 new_adv |= ADVERTISE_10FULL;
1569 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1570 new_adv |= ADVERTISE_100HALF;
1571 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1572 new_adv |= ADVERTISE_100FULL;
1573 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575 if (tp->link_config.advertising &
1576 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577 new_adv = 0;
1578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1579 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1580 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1581 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1582 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1583 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1584 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1585 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1586 MII_TG3_CTRL_ENABLE_AS_MASTER);
1587 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588 } else {
1589 tg3_writephy(tp, MII_TG3_CTRL, 0);
1590 }
1591 } else {
1592 /* Asking for a specific link mode. */
1593 if (tp->link_config.speed == SPEED_1000) {
1594 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596
1597 if (tp->link_config.duplex == DUPLEX_FULL)
1598 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599 else
1600 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1601 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1602 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1603 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1604 MII_TG3_CTRL_ENABLE_AS_MASTER);
1605 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606 } else {
1607 tg3_writephy(tp, MII_TG3_CTRL, 0);
1608
1609 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1610 if (tp->link_config.speed == SPEED_100) {
1611 if (tp->link_config.duplex == DUPLEX_FULL)
1612 new_adv |= ADVERTISE_100FULL;
1613 else
1614 new_adv |= ADVERTISE_100HALF;
1615 } else {
1616 if (tp->link_config.duplex == DUPLEX_FULL)
1617 new_adv |= ADVERTISE_10FULL;
1618 else
1619 new_adv |= ADVERTISE_10HALF;
1620 }
1621 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1622 }
1623 }
1624
1625 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1626 tp->link_config.speed != SPEED_INVALID) {
1627 u32 bmcr, orig_bmcr;
1628
1629 tp->link_config.active_speed = tp->link_config.speed;
1630 tp->link_config.active_duplex = tp->link_config.duplex;
1631
1632 bmcr = 0;
1633 switch (tp->link_config.speed) {
1634 default:
1635 case SPEED_10:
1636 break;
1637
1638 case SPEED_100:
1639 bmcr |= BMCR_SPEED100;
1640 break;
1641
1642 case SPEED_1000:
1643 bmcr |= TG3_BMCR_SPEED1000;
1644 break;
1645 };
1646
1647 if (tp->link_config.duplex == DUPLEX_FULL)
1648 bmcr |= BMCR_FULLDPLX;
1649
1650 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1651 (bmcr != orig_bmcr)) {
1652 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1653 for (i = 0; i < 1500; i++) {
1654 u32 tmp;
1655
1656 udelay(10);
1657 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1658 tg3_readphy(tp, MII_BMSR, &tmp))
1659 continue;
1660 if (!(tmp & BMSR_LSTATUS)) {
1661 udelay(40);
1662 break;
1663 }
1664 }
1665 tg3_writephy(tp, MII_BMCR, bmcr);
1666 udelay(40);
1667 }
1668 } else {
1669 tg3_writephy(tp, MII_BMCR,
1670 BMCR_ANENABLE | BMCR_ANRESTART);
1671 }
1672}
1673
1674static int tg3_init_5401phy_dsp(struct tg3 *tp)
1675{
1676 int err;
1677
1678 /* Turn off tap power management. */
1679 /* Set Extended packet length bit */
1680 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681
1682 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1683 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684
1685 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1686 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687
1688 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1689 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690
1691 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1692 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693
1694 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1695 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1696
1697 udelay(40);
1698
1699 return err;
1700}
1701
1702static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703{
1704 u32 adv_reg, all_mask;
1705
1706 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1707 return 0;
1708
1709 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1710 ADVERTISE_100HALF | ADVERTISE_100FULL);
1711 if ((adv_reg & all_mask) != all_mask)
1712 return 0;
1713 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1714 u32 tg3_ctrl;
1715
1716 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1717 return 0;
1718
1719 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1720 MII_TG3_CTRL_ADV_1000_FULL);
1721 if ((tg3_ctrl & all_mask) != all_mask)
1722 return 0;
1723 }
1724 return 1;
1725}
1726
1727static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728{
1729 int current_link_up;
1730 u32 bmsr, dummy;
1731 u16 current_speed;
1732 u8 current_duplex;
1733 int i, err;
1734
1735 tw32(MAC_EVENT, 0);
1736
1737 tw32_f(MAC_STATUS,
1738 (MAC_STATUS_SYNC_CHANGED |
1739 MAC_STATUS_CFG_CHANGED |
1740 MAC_STATUS_MI_COMPLETION |
1741 MAC_STATUS_LNKSTATE_CHANGED));
1742 udelay(40);
1743
1744 tp->mi_mode = MAC_MI_MODE_BASE;
1745 tw32_f(MAC_MI_MODE, tp->mi_mode);
1746 udelay(80);
1747
1748 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749
1750 /* Some third-party PHYs need to be reset on link going
1751 * down.
1752 */
1753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1756 netif_carrier_ok(tp->dev)) {
1757 tg3_readphy(tp, MII_BMSR, &bmsr);
1758 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759 !(bmsr & BMSR_LSTATUS))
1760 force_reset = 1;
1761 }
1762 if (force_reset)
1763 tg3_phy_reset(tp);
1764
1765 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1766 tg3_readphy(tp, MII_BMSR, &bmsr);
1767 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1768 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1769 bmsr = 0;
1770
1771 if (!(bmsr & BMSR_LSTATUS)) {
1772 err = tg3_init_5401phy_dsp(tp);
1773 if (err)
1774 return err;
1775
1776 tg3_readphy(tp, MII_BMSR, &bmsr);
1777 for (i = 0; i < 1000; i++) {
1778 udelay(10);
1779 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1780 (bmsr & BMSR_LSTATUS)) {
1781 udelay(40);
1782 break;
1783 }
1784 }
1785
1786 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1787 !(bmsr & BMSR_LSTATUS) &&
1788 tp->link_config.active_speed == SPEED_1000) {
1789 err = tg3_phy_reset(tp);
1790 if (!err)
1791 err = tg3_init_5401phy_dsp(tp);
1792 if (err)
1793 return err;
1794 }
1795 }
1796 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1797 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1798 /* 5701 {A0,B0} CRC bug workaround */
1799 tg3_writephy(tp, 0x15, 0x0a75);
1800 tg3_writephy(tp, 0x1c, 0x8c68);
1801 tg3_writephy(tp, 0x1c, 0x8d68);
1802 tg3_writephy(tp, 0x1c, 0x8c68);
1803 }
1804
1805 /* Clear pending interrupts... */
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808
1809 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1810 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811 else
1812 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813
1814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1816 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819 else
1820 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1821 }
1822
1823 current_link_up = 0;
1824 current_speed = SPEED_INVALID;
1825 current_duplex = DUPLEX_INVALID;
1826
1827 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1828 u32 val;
1829
1830 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1831 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1832 if (!(val & (1 << 10))) {
1833 val |= (1 << 10);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1835 goto relink;
1836 }
1837 }
1838
1839 bmsr = 0;
1840 for (i = 0; i < 100; i++) {
1841 tg3_readphy(tp, MII_BMSR, &bmsr);
1842 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1843 (bmsr & BMSR_LSTATUS))
1844 break;
1845 udelay(40);
1846 }
1847
1848 if (bmsr & BMSR_LSTATUS) {
1849 u32 aux_stat, bmcr;
1850
1851 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1852 for (i = 0; i < 2000; i++) {
1853 udelay(10);
1854 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1855 aux_stat)
1856 break;
1857 }
1858
1859 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1860 &current_speed,
1861 &current_duplex);
1862
1863 bmcr = 0;
1864 for (i = 0; i < 200; i++) {
1865 tg3_readphy(tp, MII_BMCR, &bmcr);
1866 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867 continue;
1868 if (bmcr && bmcr != 0x7fff)
1869 break;
1870 udelay(10);
1871 }
1872
1873 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1874 if (bmcr & BMCR_ANENABLE) {
1875 current_link_up = 1;
1876
1877 /* Force autoneg restart if we are exiting
1878 * low power mode.
1879 */
1880 if (!tg3_copper_is_advertising_all(tp))
1881 current_link_up = 0;
1882 } else {
1883 current_link_up = 0;
1884 }
1885 } else {
1886 if (!(bmcr & BMCR_ANENABLE) &&
1887 tp->link_config.speed == current_speed &&
1888 tp->link_config.duplex == current_duplex) {
1889 current_link_up = 1;
1890 } else {
1891 current_link_up = 0;
1892 }
1893 }
1894
1895 tp->link_config.active_speed = current_speed;
1896 tp->link_config.active_duplex = current_duplex;
1897 }
1898
1899 if (current_link_up == 1 &&
1900 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1901 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1902 u32 local_adv, remote_adv;
1903
1904 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905 local_adv = 0;
1906 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907
1908 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1909 remote_adv = 0;
1910
1911 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912
1913 /* If we are not advertising full pause capability,
1914 * something is wrong. Bring the link down and reconfigure.
1915 */
1916 if (local_adv != ADVERTISE_PAUSE_CAP) {
1917 current_link_up = 0;
1918 } else {
1919 tg3_setup_flow_control(tp, local_adv, remote_adv);
1920 }
1921 }
1922relink:
Michael Chan6921d202005-12-13 21:15:53 -08001923 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 u32 tmp;
1925
1926 tg3_phy_copper_begin(tp);
1927
1928 tg3_readphy(tp, MII_BMSR, &tmp);
1929 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1930 (tmp & BMSR_LSTATUS))
1931 current_link_up = 1;
1932 }
1933
1934 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1935 if (current_link_up == 1) {
1936 if (tp->link_config.active_speed == SPEED_100 ||
1937 tp->link_config.active_speed == SPEED_10)
1938 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939 else
1940 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941 } else
1942 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943
1944 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1945 if (tp->link_config.active_duplex == DUPLEX_HALF)
1946 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947
1948 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1950 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1951 (current_link_up == 1 &&
1952 tp->link_config.active_speed == SPEED_10))
1953 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954 } else {
1955 if (current_link_up == 1)
1956 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1957 }
1958
1959 /* ??? Without this setting Netgear GA302T PHY does not
1960 * ??? send/receive packets...
1961 */
1962 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1963 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1964 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1965 tw32_f(MAC_MI_MODE, tp->mi_mode);
1966 udelay(80);
1967 }
1968
1969 tw32_f(MAC_MODE, tp->mac_mode);
1970 udelay(40);
1971
1972 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1973 /* Polled via timer. */
1974 tw32_f(MAC_EVENT, 0);
1975 } else {
1976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1977 }
1978 udelay(40);
1979
1980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1981 current_link_up == 1 &&
1982 tp->link_config.active_speed == SPEED_1000 &&
1983 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1984 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1985 udelay(120);
1986 tw32_f(MAC_STATUS,
1987 (MAC_STATUS_SYNC_CHANGED |
1988 MAC_STATUS_CFG_CHANGED));
1989 udelay(40);
1990 tg3_write_mem(tp,
1991 NIC_SRAM_FIRMWARE_MBOX,
1992 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1993 }
1994
1995 if (current_link_up != netif_carrier_ok(tp->dev)) {
1996 if (current_link_up)
1997 netif_carrier_on(tp->dev);
1998 else
1999 netif_carrier_off(tp->dev);
2000 tg3_link_report(tp);
2001 }
2002
2003 return 0;
2004}
2005
2006struct tg3_fiber_aneginfo {
2007 int state;
2008#define ANEG_STATE_UNKNOWN 0
2009#define ANEG_STATE_AN_ENABLE 1
2010#define ANEG_STATE_RESTART_INIT 2
2011#define ANEG_STATE_RESTART 3
2012#define ANEG_STATE_DISABLE_LINK_OK 4
2013#define ANEG_STATE_ABILITY_DETECT_INIT 5
2014#define ANEG_STATE_ABILITY_DETECT 6
2015#define ANEG_STATE_ACK_DETECT_INIT 7
2016#define ANEG_STATE_ACK_DETECT 8
2017#define ANEG_STATE_COMPLETE_ACK_INIT 9
2018#define ANEG_STATE_COMPLETE_ACK 10
2019#define ANEG_STATE_IDLE_DETECT_INIT 11
2020#define ANEG_STATE_IDLE_DETECT 12
2021#define ANEG_STATE_LINK_OK 13
2022#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2023#define ANEG_STATE_NEXT_PAGE_WAIT 15
2024
2025 u32 flags;
2026#define MR_AN_ENABLE 0x00000001
2027#define MR_RESTART_AN 0x00000002
2028#define MR_AN_COMPLETE 0x00000004
2029#define MR_PAGE_RX 0x00000008
2030#define MR_NP_LOADED 0x00000010
2031#define MR_TOGGLE_TX 0x00000020
2032#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2033#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2034#define MR_LP_ADV_SYM_PAUSE 0x00000100
2035#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2036#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2037#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2038#define MR_LP_ADV_NEXT_PAGE 0x00001000
2039#define MR_TOGGLE_RX 0x00002000
2040#define MR_NP_RX 0x00004000
2041
2042#define MR_LINK_OK 0x80000000
2043
2044 unsigned long link_time, cur_time;
2045
2046 u32 ability_match_cfg;
2047 int ability_match_count;
2048
2049 char ability_match, idle_match, ack_match;
2050
2051 u32 txconfig, rxconfig;
2052#define ANEG_CFG_NP 0x00000080
2053#define ANEG_CFG_ACK 0x00000040
2054#define ANEG_CFG_RF2 0x00000020
2055#define ANEG_CFG_RF1 0x00000010
2056#define ANEG_CFG_PS2 0x00000001
2057#define ANEG_CFG_PS1 0x00008000
2058#define ANEG_CFG_HD 0x00004000
2059#define ANEG_CFG_FD 0x00002000
2060#define ANEG_CFG_INVAL 0x00001f06
2061
2062};
2063#define ANEG_OK 0
2064#define ANEG_DONE 1
2065#define ANEG_TIMER_ENAB 2
2066#define ANEG_FAILED -1
2067
2068#define ANEG_STATE_SETTLE_TIME 10000
2069
2070static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2071 struct tg3_fiber_aneginfo *ap)
2072{
2073 unsigned long delta;
2074 u32 rx_cfg_reg;
2075 int ret;
2076
2077 if (ap->state == ANEG_STATE_UNKNOWN) {
2078 ap->rxconfig = 0;
2079 ap->link_time = 0;
2080 ap->cur_time = 0;
2081 ap->ability_match_cfg = 0;
2082 ap->ability_match_count = 0;
2083 ap->ability_match = 0;
2084 ap->idle_match = 0;
2085 ap->ack_match = 0;
2086 }
2087 ap->cur_time++;
2088
2089 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2090 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091
2092 if (rx_cfg_reg != ap->ability_match_cfg) {
2093 ap->ability_match_cfg = rx_cfg_reg;
2094 ap->ability_match = 0;
2095 ap->ability_match_count = 0;
2096 } else {
2097 if (++ap->ability_match_count > 1) {
2098 ap->ability_match = 1;
2099 ap->ability_match_cfg = rx_cfg_reg;
2100 }
2101 }
2102 if (rx_cfg_reg & ANEG_CFG_ACK)
2103 ap->ack_match = 1;
2104 else
2105 ap->ack_match = 0;
2106
2107 ap->idle_match = 0;
2108 } else {
2109 ap->idle_match = 1;
2110 ap->ability_match_cfg = 0;
2111 ap->ability_match_count = 0;
2112 ap->ability_match = 0;
2113 ap->ack_match = 0;
2114
2115 rx_cfg_reg = 0;
2116 }
2117
2118 ap->rxconfig = rx_cfg_reg;
2119 ret = ANEG_OK;
2120
2121 switch(ap->state) {
2122 case ANEG_STATE_UNKNOWN:
2123 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2124 ap->state = ANEG_STATE_AN_ENABLE;
2125
2126 /* fallthru */
2127 case ANEG_STATE_AN_ENABLE:
2128 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2129 if (ap->flags & MR_AN_ENABLE) {
2130 ap->link_time = 0;
2131 ap->cur_time = 0;
2132 ap->ability_match_cfg = 0;
2133 ap->ability_match_count = 0;
2134 ap->ability_match = 0;
2135 ap->idle_match = 0;
2136 ap->ack_match = 0;
2137
2138 ap->state = ANEG_STATE_RESTART_INIT;
2139 } else {
2140 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2141 }
2142 break;
2143
2144 case ANEG_STATE_RESTART_INIT:
2145 ap->link_time = ap->cur_time;
2146 ap->flags &= ~(MR_NP_LOADED);
2147 ap->txconfig = 0;
2148 tw32(MAC_TX_AUTO_NEG, 0);
2149 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150 tw32_f(MAC_MODE, tp->mac_mode);
2151 udelay(40);
2152
2153 ret = ANEG_TIMER_ENAB;
2154 ap->state = ANEG_STATE_RESTART;
2155
2156 /* fallthru */
2157 case ANEG_STATE_RESTART:
2158 delta = ap->cur_time - ap->link_time;
2159 if (delta > ANEG_STATE_SETTLE_TIME) {
2160 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161 } else {
2162 ret = ANEG_TIMER_ENAB;
2163 }
2164 break;
2165
2166 case ANEG_STATE_DISABLE_LINK_OK:
2167 ret = ANEG_DONE;
2168 break;
2169
2170 case ANEG_STATE_ABILITY_DETECT_INIT:
2171 ap->flags &= ~(MR_TOGGLE_TX);
2172 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2173 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2174 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2175 tw32_f(MAC_MODE, tp->mac_mode);
2176 udelay(40);
2177
2178 ap->state = ANEG_STATE_ABILITY_DETECT;
2179 break;
2180
2181 case ANEG_STATE_ABILITY_DETECT:
2182 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2183 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2184 }
2185 break;
2186
2187 case ANEG_STATE_ACK_DETECT_INIT:
2188 ap->txconfig |= ANEG_CFG_ACK;
2189 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2190 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2191 tw32_f(MAC_MODE, tp->mac_mode);
2192 udelay(40);
2193
2194 ap->state = ANEG_STATE_ACK_DETECT;
2195
2196 /* fallthru */
2197 case ANEG_STATE_ACK_DETECT:
2198 if (ap->ack_match != 0) {
2199 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2200 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2201 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202 } else {
2203 ap->state = ANEG_STATE_AN_ENABLE;
2204 }
2205 } else if (ap->ability_match != 0 &&
2206 ap->rxconfig == 0) {
2207 ap->state = ANEG_STATE_AN_ENABLE;
2208 }
2209 break;
2210
2211 case ANEG_STATE_COMPLETE_ACK_INIT:
2212 if (ap->rxconfig & ANEG_CFG_INVAL) {
2213 ret = ANEG_FAILED;
2214 break;
2215 }
2216 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2217 MR_LP_ADV_HALF_DUPLEX |
2218 MR_LP_ADV_SYM_PAUSE |
2219 MR_LP_ADV_ASYM_PAUSE |
2220 MR_LP_ADV_REMOTE_FAULT1 |
2221 MR_LP_ADV_REMOTE_FAULT2 |
2222 MR_LP_ADV_NEXT_PAGE |
2223 MR_TOGGLE_RX |
2224 MR_NP_RX);
2225 if (ap->rxconfig & ANEG_CFG_FD)
2226 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2227 if (ap->rxconfig & ANEG_CFG_HD)
2228 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2229 if (ap->rxconfig & ANEG_CFG_PS1)
2230 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2231 if (ap->rxconfig & ANEG_CFG_PS2)
2232 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2233 if (ap->rxconfig & ANEG_CFG_RF1)
2234 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2235 if (ap->rxconfig & ANEG_CFG_RF2)
2236 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2237 if (ap->rxconfig & ANEG_CFG_NP)
2238 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239
2240 ap->link_time = ap->cur_time;
2241
2242 ap->flags ^= (MR_TOGGLE_TX);
2243 if (ap->rxconfig & 0x0008)
2244 ap->flags |= MR_TOGGLE_RX;
2245 if (ap->rxconfig & ANEG_CFG_NP)
2246 ap->flags |= MR_NP_RX;
2247 ap->flags |= MR_PAGE_RX;
2248
2249 ap->state = ANEG_STATE_COMPLETE_ACK;
2250 ret = ANEG_TIMER_ENAB;
2251 break;
2252
2253 case ANEG_STATE_COMPLETE_ACK:
2254 if (ap->ability_match != 0 &&
2255 ap->rxconfig == 0) {
2256 ap->state = ANEG_STATE_AN_ENABLE;
2257 break;
2258 }
2259 delta = ap->cur_time - ap->link_time;
2260 if (delta > ANEG_STATE_SETTLE_TIME) {
2261 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2262 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263 } else {
2264 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2265 !(ap->flags & MR_NP_RX)) {
2266 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2267 } else {
2268 ret = ANEG_FAILED;
2269 }
2270 }
2271 }
2272 break;
2273
2274 case ANEG_STATE_IDLE_DETECT_INIT:
2275 ap->link_time = ap->cur_time;
2276 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2277 tw32_f(MAC_MODE, tp->mac_mode);
2278 udelay(40);
2279
2280 ap->state = ANEG_STATE_IDLE_DETECT;
2281 ret = ANEG_TIMER_ENAB;
2282 break;
2283
2284 case ANEG_STATE_IDLE_DETECT:
2285 if (ap->ability_match != 0 &&
2286 ap->rxconfig == 0) {
2287 ap->state = ANEG_STATE_AN_ENABLE;
2288 break;
2289 }
2290 delta = ap->cur_time - ap->link_time;
2291 if (delta > ANEG_STATE_SETTLE_TIME) {
2292 /* XXX another gem from the Broadcom driver :( */
2293 ap->state = ANEG_STATE_LINK_OK;
2294 }
2295 break;
2296
2297 case ANEG_STATE_LINK_OK:
2298 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2299 ret = ANEG_DONE;
2300 break;
2301
2302 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2303 /* ??? unimplemented */
2304 break;
2305
2306 case ANEG_STATE_NEXT_PAGE_WAIT:
2307 /* ??? unimplemented */
2308 break;
2309
2310 default:
2311 ret = ANEG_FAILED;
2312 break;
2313 };
2314
2315 return ret;
2316}
2317
2318static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2319{
2320 int res = 0;
2321 struct tg3_fiber_aneginfo aninfo;
2322 int status = ANEG_FAILED;
2323 unsigned int tick;
2324 u32 tmp;
2325
2326 tw32_f(MAC_TX_AUTO_NEG, 0);
2327
2328 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2329 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2330 udelay(40);
2331
2332 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2333 udelay(40);
2334
2335 memset(&aninfo, 0, sizeof(aninfo));
2336 aninfo.flags |= MR_AN_ENABLE;
2337 aninfo.state = ANEG_STATE_UNKNOWN;
2338 aninfo.cur_time = 0;
2339 tick = 0;
2340 while (++tick < 195000) {
2341 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2342 if (status == ANEG_DONE || status == ANEG_FAILED)
2343 break;
2344
2345 udelay(1);
2346 }
2347
2348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2349 tw32_f(MAC_MODE, tp->mac_mode);
2350 udelay(40);
2351
2352 *flags = aninfo.flags;
2353
2354 if (status == ANEG_DONE &&
2355 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2356 MR_LP_ADV_FULL_DUPLEX)))
2357 res = 1;
2358
2359 return res;
2360}
2361
2362static void tg3_init_bcm8002(struct tg3 *tp)
2363{
2364 u32 mac_status = tr32(MAC_STATUS);
2365 int i;
2366
2367 /* Reset when initting first time or we have a link. */
2368 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2369 !(mac_status & MAC_STATUS_PCS_SYNCED))
2370 return;
2371
2372 /* Set PLL lock range. */
2373 tg3_writephy(tp, 0x16, 0x8007);
2374
2375 /* SW reset */
2376 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377
2378 /* Wait for reset to complete. */
2379 /* XXX schedule_timeout() ... */
2380 for (i = 0; i < 500; i++)
2381 udelay(10);
2382
2383 /* Config mode; select PMA/Ch 1 regs. */
2384 tg3_writephy(tp, 0x10, 0x8411);
2385
2386 /* Enable auto-lock and comdet, select txclk for tx. */
2387 tg3_writephy(tp, 0x11, 0x0a10);
2388
2389 tg3_writephy(tp, 0x18, 0x00a0);
2390 tg3_writephy(tp, 0x16, 0x41ff);
2391
2392 /* Assert and deassert POR. */
2393 tg3_writephy(tp, 0x13, 0x0400);
2394 udelay(40);
2395 tg3_writephy(tp, 0x13, 0x0000);
2396
2397 tg3_writephy(tp, 0x11, 0x0a50);
2398 udelay(40);
2399 tg3_writephy(tp, 0x11, 0x0a10);
2400
2401 /* Wait for signal to stabilize */
2402 /* XXX schedule_timeout() ... */
2403 for (i = 0; i < 15000; i++)
2404 udelay(10);
2405
2406 /* Deselect the channel register so we can read the PHYID
2407 * later.
2408 */
2409 tg3_writephy(tp, 0x10, 0x8011);
2410}
2411
2412static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413{
2414 u32 sg_dig_ctrl, sg_dig_status;
2415 u32 serdes_cfg, expected_sg_dig_ctrl;
2416 int workaround, port_a;
2417 int current_link_up;
2418
2419 serdes_cfg = 0;
2420 expected_sg_dig_ctrl = 0;
2421 workaround = 0;
2422 port_a = 1;
2423 current_link_up = 0;
2424
2425 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2426 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427 workaround = 1;
2428 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2429 port_a = 0;
2430
2431 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2432 /* preserve bits 20-23 for voltage regulator */
2433 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2434 }
2435
2436 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437
2438 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2439 if (sg_dig_ctrl & (1 << 31)) {
2440 if (workaround) {
2441 u32 val = serdes_cfg;
2442
2443 if (port_a)
2444 val |= 0xc010000;
2445 else
2446 val |= 0x4010000;
2447 tw32_f(MAC_SERDES_CFG, val);
2448 }
2449 tw32_f(SG_DIG_CTRL, 0x01388400);
2450 }
2451 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2452 tg3_setup_flow_control(tp, 0, 0);
2453 current_link_up = 1;
2454 }
2455 goto out;
2456 }
2457
2458 /* Want auto-negotiation. */
2459 expected_sg_dig_ctrl = 0x81388400;
2460
2461 /* Pause capability */
2462 expected_sg_dig_ctrl |= (1 << 11);
2463
2464 /* Asymettric pause */
2465 expected_sg_dig_ctrl |= (1 << 12);
2466
2467 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468 if (workaround)
2469 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2470 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471 udelay(5);
2472 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473
2474 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2475 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2476 MAC_STATUS_SIGNAL_DET)) {
2477 int i;
2478
2479 /* Giver time to negotiate (~200ms) */
2480 for (i = 0; i < 40000; i++) {
2481 sg_dig_status = tr32(SG_DIG_STATUS);
2482 if (sg_dig_status & (0x3))
2483 break;
2484 udelay(5);
2485 }
2486 mac_status = tr32(MAC_STATUS);
2487
2488 if ((sg_dig_status & (1 << 1)) &&
2489 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2490 u32 local_adv, remote_adv;
2491
2492 local_adv = ADVERTISE_PAUSE_CAP;
2493 remote_adv = 0;
2494 if (sg_dig_status & (1 << 19))
2495 remote_adv |= LPA_PAUSE_CAP;
2496 if (sg_dig_status & (1 << 20))
2497 remote_adv |= LPA_PAUSE_ASYM;
2498
2499 tg3_setup_flow_control(tp, local_adv, remote_adv);
2500 current_link_up = 1;
2501 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2502 } else if (!(sg_dig_status & (1 << 1))) {
2503 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2504 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2505 else {
2506 if (workaround) {
2507 u32 val = serdes_cfg;
2508
2509 if (port_a)
2510 val |= 0xc010000;
2511 else
2512 val |= 0x4010000;
2513
2514 tw32_f(MAC_SERDES_CFG, val);
2515 }
2516
2517 tw32_f(SG_DIG_CTRL, 0x01388400);
2518 udelay(40);
2519
2520 /* Link parallel detection - link is up */
2521 /* only if we have PCS_SYNC and not */
2522 /* receiving config code words */
2523 mac_status = tr32(MAC_STATUS);
2524 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2525 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2526 tg3_setup_flow_control(tp, 0, 0);
2527 current_link_up = 1;
2528 }
2529 }
2530 }
2531 }
2532
2533out:
2534 return current_link_up;
2535}
2536
2537static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538{
2539 int current_link_up = 0;
2540
2541 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2542 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2543 goto out;
2544 }
2545
2546 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2547 u32 flags;
2548 int i;
2549
2550 if (fiber_autoneg(tp, &flags)) {
2551 u32 local_adv, remote_adv;
2552
2553 local_adv = ADVERTISE_PAUSE_CAP;
2554 remote_adv = 0;
2555 if (flags & MR_LP_ADV_SYM_PAUSE)
2556 remote_adv |= LPA_PAUSE_CAP;
2557 if (flags & MR_LP_ADV_ASYM_PAUSE)
2558 remote_adv |= LPA_PAUSE_ASYM;
2559
2560 tg3_setup_flow_control(tp, local_adv, remote_adv);
2561
2562 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2563 current_link_up = 1;
2564 }
2565 for (i = 0; i < 30; i++) {
2566 udelay(20);
2567 tw32_f(MAC_STATUS,
2568 (MAC_STATUS_SYNC_CHANGED |
2569 MAC_STATUS_CFG_CHANGED));
2570 udelay(40);
2571 if ((tr32(MAC_STATUS) &
2572 (MAC_STATUS_SYNC_CHANGED |
2573 MAC_STATUS_CFG_CHANGED)) == 0)
2574 break;
2575 }
2576
2577 mac_status = tr32(MAC_STATUS);
2578 if (current_link_up == 0 &&
2579 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2580 !(mac_status & MAC_STATUS_RCVD_CFG))
2581 current_link_up = 1;
2582 } else {
2583 /* Forcing 1000FD link up. */
2584 current_link_up = 1;
2585 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586
2587 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2588 udelay(40);
2589 }
2590
2591out:
2592 return current_link_up;
2593}
2594
2595static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2596{
2597 u32 orig_pause_cfg;
2598 u16 orig_active_speed;
2599 u8 orig_active_duplex;
2600 u32 mac_status;
2601 int current_link_up;
2602 int i;
2603
2604 orig_pause_cfg =
2605 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2606 TG3_FLAG_TX_PAUSE));
2607 orig_active_speed = tp->link_config.active_speed;
2608 orig_active_duplex = tp->link_config.active_duplex;
2609
2610 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2611 netif_carrier_ok(tp->dev) &&
2612 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2613 mac_status = tr32(MAC_STATUS);
2614 mac_status &= (MAC_STATUS_PCS_SYNCED |
2615 MAC_STATUS_SIGNAL_DET |
2616 MAC_STATUS_CFG_CHANGED |
2617 MAC_STATUS_RCVD_CFG);
2618 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2619 MAC_STATUS_SIGNAL_DET)) {
2620 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2621 MAC_STATUS_CFG_CHANGED));
2622 return 0;
2623 }
2624 }
2625
2626 tw32_f(MAC_TX_AUTO_NEG, 0);
2627
2628 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2629 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2630 tw32_f(MAC_MODE, tp->mac_mode);
2631 udelay(40);
2632
2633 if (tp->phy_id == PHY_ID_BCM8002)
2634 tg3_init_bcm8002(tp);
2635
2636 /* Enable link change event even when serdes polling. */
2637 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2638 udelay(40);
2639
2640 current_link_up = 0;
2641 mac_status = tr32(MAC_STATUS);
2642
2643 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2644 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645 else
2646 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647
2648 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2649 tw32_f(MAC_MODE, tp->mac_mode);
2650 udelay(40);
2651
2652 tp->hw_status->status =
2653 (SD_STATUS_UPDATED |
2654 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655
2656 for (i = 0; i < 100; i++) {
2657 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2658 MAC_STATUS_CFG_CHANGED));
2659 udelay(5);
2660 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2661 MAC_STATUS_CFG_CHANGED)) == 0)
2662 break;
2663 }
2664
2665 mac_status = tr32(MAC_STATUS);
2666 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2667 current_link_up = 0;
2668 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669 tw32_f(MAC_MODE, (tp->mac_mode |
2670 MAC_MODE_SEND_CONFIGS));
2671 udelay(1);
2672 tw32_f(MAC_MODE, tp->mac_mode);
2673 }
2674 }
2675
2676 if (current_link_up == 1) {
2677 tp->link_config.active_speed = SPEED_1000;
2678 tp->link_config.active_duplex = DUPLEX_FULL;
2679 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2680 LED_CTRL_LNKLED_OVERRIDE |
2681 LED_CTRL_1000MBPS_ON));
2682 } else {
2683 tp->link_config.active_speed = SPEED_INVALID;
2684 tp->link_config.active_duplex = DUPLEX_INVALID;
2685 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2686 LED_CTRL_LNKLED_OVERRIDE |
2687 LED_CTRL_TRAFFIC_OVERRIDE));
2688 }
2689
2690 if (current_link_up != netif_carrier_ok(tp->dev)) {
2691 if (current_link_up)
2692 netif_carrier_on(tp->dev);
2693 else
2694 netif_carrier_off(tp->dev);
2695 tg3_link_report(tp);
2696 } else {
2697 u32 now_pause_cfg =
2698 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699 TG3_FLAG_TX_PAUSE);
2700 if (orig_pause_cfg != now_pause_cfg ||
2701 orig_active_speed != tp->link_config.active_speed ||
2702 orig_active_duplex != tp->link_config.active_duplex)
2703 tg3_link_report(tp);
2704 }
2705
2706 return 0;
2707}
2708
Michael Chan747e8f82005-07-25 12:33:22 -07002709static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710{
2711 int current_link_up, err = 0;
2712 u32 bmsr, bmcr;
2713 u16 current_speed;
2714 u8 current_duplex;
2715
2716 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2717 tw32_f(MAC_MODE, tp->mac_mode);
2718 udelay(40);
2719
2720 tw32(MAC_EVENT, 0);
2721
2722 tw32_f(MAC_STATUS,
2723 (MAC_STATUS_SYNC_CHANGED |
2724 MAC_STATUS_CFG_CHANGED |
2725 MAC_STATUS_MI_COMPLETION |
2726 MAC_STATUS_LNKSTATE_CHANGED));
2727 udelay(40);
2728
2729 if (force_reset)
2730 tg3_phy_reset(tp);
2731
2732 current_link_up = 0;
2733 current_speed = SPEED_INVALID;
2734 current_duplex = DUPLEX_INVALID;
2735
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08002738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2739 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2740 bmsr |= BMSR_LSTATUS;
2741 else
2742 bmsr &= ~BMSR_LSTATUS;
2743 }
Michael Chan747e8f82005-07-25 12:33:22 -07002744
2745 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746
2747 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2748 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2749 /* do nothing, just check for link up at the end */
2750 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2751 u32 adv, new_adv;
2752
2753 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2754 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2755 ADVERTISE_1000XPAUSE |
2756 ADVERTISE_1000XPSE_ASYM |
2757 ADVERTISE_SLCT);
2758
2759 /* Always advertise symmetric PAUSE just like copper */
2760 new_adv |= ADVERTISE_1000XPAUSE;
2761
2762 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2763 new_adv |= ADVERTISE_1000XHALF;
2764 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2765 new_adv |= ADVERTISE_1000XFULL;
2766
2767 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2768 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2769 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2770 tg3_writephy(tp, MII_BMCR, bmcr);
2771
2772 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2773 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2774 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775
2776 return err;
2777 }
2778 } else {
2779 u32 new_bmcr;
2780
2781 bmcr &= ~BMCR_SPEED1000;
2782 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783
2784 if (tp->link_config.duplex == DUPLEX_FULL)
2785 new_bmcr |= BMCR_FULLDPLX;
2786
2787 if (new_bmcr != bmcr) {
2788 /* BMCR_SPEED1000 is a reserved bit that needs
2789 * to be set on write.
2790 */
2791 new_bmcr |= BMCR_SPEED1000;
2792
2793 /* Force a linkdown */
2794 if (netif_carrier_ok(tp->dev)) {
2795 u32 adv;
2796
2797 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2798 adv &= ~(ADVERTISE_1000XFULL |
2799 ADVERTISE_1000XHALF |
2800 ADVERTISE_SLCT);
2801 tg3_writephy(tp, MII_ADVERTISE, adv);
2802 tg3_writephy(tp, MII_BMCR, bmcr |
2803 BMCR_ANRESTART |
2804 BMCR_ANENABLE);
2805 udelay(10);
2806 netif_carrier_off(tp->dev);
2807 }
2808 tg3_writephy(tp, MII_BMCR, new_bmcr);
2809 bmcr = new_bmcr;
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08002812 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813 ASIC_REV_5714) {
2814 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2815 bmsr |= BMSR_LSTATUS;
2816 else
2817 bmsr &= ~BMSR_LSTATUS;
2818 }
Michael Chan747e8f82005-07-25 12:33:22 -07002819 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2820 }
2821 }
2822
2823 if (bmsr & BMSR_LSTATUS) {
2824 current_speed = SPEED_1000;
2825 current_link_up = 1;
2826 if (bmcr & BMCR_FULLDPLX)
2827 current_duplex = DUPLEX_FULL;
2828 else
2829 current_duplex = DUPLEX_HALF;
2830
2831 if (bmcr & BMCR_ANENABLE) {
2832 u32 local_adv, remote_adv, common;
2833
2834 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2835 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2836 common = local_adv & remote_adv;
2837 if (common & (ADVERTISE_1000XHALF |
2838 ADVERTISE_1000XFULL)) {
2839 if (common & ADVERTISE_1000XFULL)
2840 current_duplex = DUPLEX_FULL;
2841 else
2842 current_duplex = DUPLEX_HALF;
2843
2844 tg3_setup_flow_control(tp, local_adv,
2845 remote_adv);
2846 }
2847 else
2848 current_link_up = 0;
2849 }
2850 }
2851
2852 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2853 if (tp->link_config.active_duplex == DUPLEX_HALF)
2854 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855
2856 tw32_f(MAC_MODE, tp->mac_mode);
2857 udelay(40);
2858
2859 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860
2861 tp->link_config.active_speed = current_speed;
2862 tp->link_config.active_duplex = current_duplex;
2863
2864 if (current_link_up != netif_carrier_ok(tp->dev)) {
2865 if (current_link_up)
2866 netif_carrier_on(tp->dev);
2867 else {
2868 netif_carrier_off(tp->dev);
2869 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870 }
2871 tg3_link_report(tp);
2872 }
2873 return err;
2874}
2875
2876static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877{
2878 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2879 /* Give autoneg time to complete. */
2880 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2881 return;
2882 }
2883 if (!netif_carrier_ok(tp->dev) &&
2884 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2885 u32 bmcr;
2886
2887 tg3_readphy(tp, MII_BMCR, &bmcr);
2888 if (bmcr & BMCR_ANENABLE) {
2889 u32 phy1, phy2;
2890
2891 /* Select shadow register 0x1f */
2892 tg3_writephy(tp, 0x1c, 0x7c00);
2893 tg3_readphy(tp, 0x1c, &phy1);
2894
2895 /* Select expansion interrupt status register */
2896 tg3_writephy(tp, 0x17, 0x0f01);
2897 tg3_readphy(tp, 0x15, &phy2);
2898 tg3_readphy(tp, 0x15, &phy2);
2899
2900 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2901 /* We have signal detect and not receiving
2902 * config code words, link is up by parallel
2903 * detection.
2904 */
2905
2906 bmcr &= ~BMCR_ANENABLE;
2907 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2908 tg3_writephy(tp, MII_BMCR, bmcr);
2909 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2910 }
2911 }
2912 }
2913 else if (netif_carrier_ok(tp->dev) &&
2914 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2915 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2916 u32 phy2;
2917
2918 /* Select expansion interrupt status register */
2919 tg3_writephy(tp, 0x17, 0x0f01);
2920 tg3_readphy(tp, 0x15, &phy2);
2921 if (phy2 & 0x20) {
2922 u32 bmcr;
2923
2924 /* Config code words received, turn on autoneg. */
2925 tg3_readphy(tp, MII_BMCR, &bmcr);
2926 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927
2928 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2929
2930 }
2931 }
2932}
2933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2935{
2936 int err;
2937
2938 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2939 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07002940 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2941 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 } else {
2943 err = tg3_setup_copper_phy(tp, force_reset);
2944 }
2945
2946 if (tp->link_config.active_speed == SPEED_1000 &&
2947 tp->link_config.active_duplex == DUPLEX_HALF)
2948 tw32(MAC_TX_LENGTHS,
2949 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2950 (6 << TX_LENGTHS_IPG_SHIFT) |
2951 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952 else
2953 tw32(MAC_TX_LENGTHS,
2954 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2955 (6 << TX_LENGTHS_IPG_SHIFT) |
2956 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957
2958 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2959 if (netif_carrier_ok(tp->dev)) {
2960 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07002961 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 } else {
2963 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2964 }
2965 }
2966
2967 return err;
2968}
2969
Michael Chandf3e6542006-05-26 17:48:07 -07002970/* This is called whenever we suspect that the system chipset is re-
2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972 * is bogus tx completions. We try to recover by setting the
2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2974 * in the workqueue.
2975 */
2976static void tg3_tx_recover(struct tg3 *tp)
2977{
2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980
2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982 "mapped I/O cycles to the network device, attempting to "
2983 "recover. Please report the problem to the driver maintainer "
2984 "and include system chipset information.\n", tp->dev->name);
2985
2986 spin_lock(&tp->lock);
2987 spin_lock(&tp->tx_lock);
2988 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2989 spin_unlock(&tp->tx_lock);
2990 spin_unlock(&tp->lock);
2991}
2992
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993/* Tigon3 never reports partial packet sends. So we do not
2994 * need special logic to handle SKBs that have not had all
2995 * of their frags sent yet, like SunGEM does.
2996 */
2997static void tg3_tx(struct tg3 *tp)
2998{
2999 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3000 u32 sw_idx = tp->tx_cons;
3001
3002 while (sw_idx != hw_idx) {
3003 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3004 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003005 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006
Michael Chandf3e6542006-05-26 17:48:07 -07003007 if (unlikely(skb == NULL)) {
3008 tg3_tx_recover(tp);
3009 return;
3010 }
3011
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 pci_unmap_single(tp->pdev,
3013 pci_unmap_addr(ri, mapping),
3014 skb_headlen(skb),
3015 PCI_DMA_TODEVICE);
3016
3017 ri->skb = NULL;
3018
3019 sw_idx = NEXT_TX(sw_idx);
3020
3021 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003023 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3024 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
3026 pci_unmap_page(tp->pdev,
3027 pci_unmap_addr(ri, mapping),
3028 skb_shinfo(skb)->frags[i].size,
3029 PCI_DMA_TODEVICE);
3030
3031 sw_idx = NEXT_TX(sw_idx);
3032 }
3033
David S. Millerf47c11e2005-06-24 20:18:35 -07003034 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003035
3036 if (unlikely(tx_bug)) {
3037 tg3_tx_recover(tp);
3038 return;
3039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 }
3041
3042 tp->tx_cons = sw_idx;
3043
Michael Chan51b91462005-09-01 17:41:28 -07003044 if (unlikely(netif_queue_stopped(tp->dev))) {
3045 spin_lock(&tp->tx_lock);
3046 if (netif_queue_stopped(tp->dev) &&
3047 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3048 netif_wake_queue(tp->dev);
3049 spin_unlock(&tp->tx_lock);
3050 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051}
3052
3053/* Returns size of skb allocated or < 0 on error.
3054 *
3055 * We only need to fill in the address because the other members
3056 * of the RX descriptor are invariant, see tg3_init_rings.
3057 *
3058 * Note the purposeful assymetry of cpu vs. chip accesses. For
3059 * posting buffers we only dirty the first cache line of the RX
3060 * descriptor (containing the address). Whereas for the RX status
3061 * buffers the cpu only reads the last cacheline of the RX descriptor
3062 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3063 */
3064static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3065 int src_idx, u32 dest_idx_unmasked)
3066{
3067 struct tg3_rx_buffer_desc *desc;
3068 struct ring_info *map, *src_map;
3069 struct sk_buff *skb;
3070 dma_addr_t mapping;
3071 int skb_size, dest_idx;
3072
3073 src_map = NULL;
3074 switch (opaque_key) {
3075 case RXD_OPAQUE_RING_STD:
3076 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3077 desc = &tp->rx_std[dest_idx];
3078 map = &tp->rx_std_buffers[dest_idx];
3079 if (src_idx >= 0)
3080 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003081 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 break;
3083
3084 case RXD_OPAQUE_RING_JUMBO:
3085 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3086 desc = &tp->rx_jumbo[dest_idx];
3087 map = &tp->rx_jumbo_buffers[dest_idx];
3088 if (src_idx >= 0)
3089 src_map = &tp->rx_jumbo_buffers[src_idx];
3090 skb_size = RX_JUMBO_PKT_BUF_SZ;
3091 break;
3092
3093 default:
3094 return -EINVAL;
3095 };
3096
3097 /* Do not overwrite any of the map or rp information
3098 * until we are sure we can commit to a new buffer.
3099 *
3100 * Callers depend upon this behavior and assume that
3101 * we leave everything unchanged if we fail.
3102 */
3103 skb = dev_alloc_skb(skb_size);
3104 if (skb == NULL)
3105 return -ENOMEM;
3106
3107 skb->dev = tp->dev;
3108 skb_reserve(skb, tp->rx_offset);
3109
3110 mapping = pci_map_single(tp->pdev, skb->data,
3111 skb_size - tp->rx_offset,
3112 PCI_DMA_FROMDEVICE);
3113
3114 map->skb = skb;
3115 pci_unmap_addr_set(map, mapping, mapping);
3116
3117 if (src_map != NULL)
3118 src_map->skb = NULL;
3119
3120 desc->addr_hi = ((u64)mapping >> 32);
3121 desc->addr_lo = ((u64)mapping & 0xffffffff);
3122
3123 return skb_size;
3124}
3125
3126/* We only need to move over in the address because the other
3127 * members of the RX descriptor are invariant. See notes above
3128 * tg3_alloc_rx_skb for full details.
3129 */
3130static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3131 int src_idx, u32 dest_idx_unmasked)
3132{
3133 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3134 struct ring_info *src_map, *dest_map;
3135 int dest_idx;
3136
3137 switch (opaque_key) {
3138 case RXD_OPAQUE_RING_STD:
3139 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3140 dest_desc = &tp->rx_std[dest_idx];
3141 dest_map = &tp->rx_std_buffers[dest_idx];
3142 src_desc = &tp->rx_std[src_idx];
3143 src_map = &tp->rx_std_buffers[src_idx];
3144 break;
3145
3146 case RXD_OPAQUE_RING_JUMBO:
3147 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3148 dest_desc = &tp->rx_jumbo[dest_idx];
3149 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3150 src_desc = &tp->rx_jumbo[src_idx];
3151 src_map = &tp->rx_jumbo_buffers[src_idx];
3152 break;
3153
3154 default:
3155 return;
3156 };
3157
3158 dest_map->skb = src_map->skb;
3159 pci_unmap_addr_set(dest_map, mapping,
3160 pci_unmap_addr(src_map, mapping));
3161 dest_desc->addr_hi = src_desc->addr_hi;
3162 dest_desc->addr_lo = src_desc->addr_lo;
3163
3164 src_map->skb = NULL;
3165}
3166
3167#if TG3_VLAN_TAG_USED
3168static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3169{
3170 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3171}
3172#endif
3173
3174/* The RX ring scheme is composed of multiple rings which post fresh
3175 * buffers to the chip, and one special ring the chip uses to report
3176 * status back to the host.
3177 *
3178 * The special ring reports the status of received packets to the
3179 * host. The chip does not write into the original descriptor the
3180 * RX buffer was obtained from. The chip simply takes the original
3181 * descriptor as provided by the host, updates the status and length
3182 * field, then writes this into the next status ring entry.
3183 *
3184 * Each ring the host uses to post buffers to the chip is described
3185 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3186 * it is first placed into the on-chip ram. When the packet's length
3187 * is known, it walks down the TG3_BDINFO entries to select the ring.
3188 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3189 * which is within the range of the new packet's length is chosen.
3190 *
3191 * The "separate ring for rx status" scheme may sound queer, but it makes
3192 * sense from a cache coherency perspective. If only the host writes
3193 * to the buffer post rings, and only the chip writes to the rx status
3194 * rings, then cache lines never move beyond shared-modified state.
3195 * If both the host and chip were to write into the same ring, cache line
3196 * eviction could occur since both entities want it in an exclusive state.
3197 */
3198static int tg3_rx(struct tg3 *tp, int budget)
3199{
3200 u32 work_mask;
Michael Chan483ba502005-04-25 15:14:03 -07003201 u32 sw_idx = tp->rx_rcb_ptr;
3202 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 int received;
3204
3205 hw_idx = tp->hw_status->idx[0].rx_producer;
3206 /*
3207 * We need to order the read of hw_idx and the read of
3208 * the opaque cookie.
3209 */
3210 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 work_mask = 0;
3212 received = 0;
3213 while (sw_idx != hw_idx && budget > 0) {
3214 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3215 unsigned int len;
3216 struct sk_buff *skb;
3217 dma_addr_t dma_addr;
3218 u32 opaque_key, desc_idx, *post_ptr;
3219
3220 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3221 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3222 if (opaque_key == RXD_OPAQUE_RING_STD) {
3223 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3224 mapping);
3225 skb = tp->rx_std_buffers[desc_idx].skb;
3226 post_ptr = &tp->rx_std_ptr;
3227 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3228 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3229 mapping);
3230 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3231 post_ptr = &tp->rx_jumbo_ptr;
3232 }
3233 else {
3234 goto next_pkt_nopost;
3235 }
3236
3237 work_mask |= opaque_key;
3238
3239 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3240 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3241 drop_it:
3242 tg3_recycle_rx(tp, opaque_key,
3243 desc_idx, *post_ptr);
3244 drop_it_no_recycle:
3245 /* Other statistics kept track of by card. */
3246 tp->net_stats.rx_dropped++;
3247 goto next_pkt;
3248 }
3249
3250 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3251
3252 if (len > RX_COPY_THRESHOLD
3253 && tp->rx_offset == 2
3254 /* rx_offset != 2 iff this is a 5701 card running
3255 * in PCI-X mode [see tg3_get_invariants()] */
3256 ) {
3257 int skb_size;
3258
3259 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3260 desc_idx, *post_ptr);
3261 if (skb_size < 0)
3262 goto drop_it;
3263
3264 pci_unmap_single(tp->pdev, dma_addr,
3265 skb_size - tp->rx_offset,
3266 PCI_DMA_FROMDEVICE);
3267
3268 skb_put(skb, len);
3269 } else {
3270 struct sk_buff *copy_skb;
3271
3272 tg3_recycle_rx(tp, opaque_key,
3273 desc_idx, *post_ptr);
3274
3275 copy_skb = dev_alloc_skb(len + 2);
3276 if (copy_skb == NULL)
3277 goto drop_it_no_recycle;
3278
3279 copy_skb->dev = tp->dev;
3280 skb_reserve(copy_skb, 2);
3281 skb_put(copy_skb, len);
3282 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3283 memcpy(copy_skb->data, skb->data, len);
3284 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3285
3286 /* We'll reuse the original ring buffer. */
3287 skb = copy_skb;
3288 }
3289
3290 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3291 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3292 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3293 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3294 skb->ip_summed = CHECKSUM_UNNECESSARY;
3295 else
3296 skb->ip_summed = CHECKSUM_NONE;
3297
3298 skb->protocol = eth_type_trans(skb, tp->dev);
3299#if TG3_VLAN_TAG_USED
3300 if (tp->vlgrp != NULL &&
3301 desc->type_flags & RXD_FLAG_VLAN) {
3302 tg3_vlan_rx(tp, skb,
3303 desc->err_vlan & RXD_VLAN_MASK);
3304 } else
3305#endif
3306 netif_receive_skb(skb);
3307
3308 tp->dev->last_rx = jiffies;
3309 received++;
3310 budget--;
3311
3312next_pkt:
3313 (*post_ptr)++;
3314next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07003315 sw_idx++;
3316 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
Michael Chan52f6d692005-04-25 15:14:32 -07003317
3318 /* Refresh hw_idx to see if there is new work */
3319 if (sw_idx == hw_idx) {
3320 hw_idx = tp->hw_status->idx[0].rx_producer;
3321 rmb();
3322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 }
3324
3325 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07003326 tp->rx_rcb_ptr = sw_idx;
3327 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328
3329 /* Refill RX ring(s). */
3330 if (work_mask & RXD_OPAQUE_RING_STD) {
3331 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3332 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3333 sw_idx);
3334 }
3335 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3336 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3337 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3338 sw_idx);
3339 }
3340 mmiowb();
3341
3342 return received;
3343}
3344
3345static int tg3_poll(struct net_device *netdev, int *budget)
3346{
3347 struct tg3 *tp = netdev_priv(netdev);
3348 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 int done;
3350
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 /* handle link change and other phy events */
3352 if (!(tp->tg3_flags &
3353 (TG3_FLAG_USE_LINKCHG_REG |
3354 TG3_FLAG_POLL_SERDES))) {
3355 if (sblk->status & SD_STATUS_LINK_CHG) {
3356 sblk->status = SD_STATUS_UPDATED |
3357 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07003358 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07003360 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 }
3362 }
3363
3364 /* run TX completion thread */
3365 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 tg3_tx(tp);
Michael Chandf3e6542006-05-26 17:48:07 -07003367 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3368 netif_rx_complete(netdev);
3369 schedule_work(&tp->reset_task);
3370 return 0;
3371 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372 }
3373
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 /* run RX thread, within the bounds set by NAPI.
3375 * All RX "locking" is done by ensuring outside
3376 * code synchronizes with dev->poll()
3377 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3379 int orig_budget = *budget;
3380 int work_done;
3381
3382 if (orig_budget > netdev->quota)
3383 orig_budget = netdev->quota;
3384
3385 work_done = tg3_rx(tp, orig_budget);
3386
3387 *budget -= work_done;
3388 netdev->quota -= work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 }
3390
Michael Chan38f38432005-09-05 17:53:32 -07003391 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
David S. Millerf7383c22005-05-18 22:50:53 -07003392 tp->last_tag = sblk->status_tag;
Michael Chan38f38432005-09-05 17:53:32 -07003393 rmb();
3394 } else
3395 sblk->status &= ~SD_STATUS_UPDATED;
David S. Millerf7383c22005-05-18 22:50:53 -07003396
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 /* if no more work, tell net stack and NIC we're done */
David S. Millerf7383c22005-05-18 22:50:53 -07003398 done = !tg3_has_work(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 if (done) {
David S. Millerf47c11e2005-06-24 20:18:35 -07003400 netif_rx_complete(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 tg3_restart_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 }
3403
3404 return (done ? 0 : 1);
3405}
3406
David S. Millerf47c11e2005-06-24 20:18:35 -07003407static void tg3_irq_quiesce(struct tg3 *tp)
3408{
3409 BUG_ON(tp->irq_sync);
3410
3411 tp->irq_sync = 1;
3412 smp_mb();
3413
3414 synchronize_irq(tp->pdev->irq);
3415}
3416
3417static inline int tg3_irq_sync(struct tg3 *tp)
3418{
3419 return tp->irq_sync;
3420}
3421
3422/* Fully shutdown all tg3 driver activity elsewhere in the system.
3423 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3424 * with as well. Most of the time, this is not necessary except when
3425 * shutting down the device.
3426 */
3427static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3428{
3429 if (irq_sync)
3430 tg3_irq_quiesce(tp);
3431 spin_lock_bh(&tp->lock);
3432 spin_lock(&tp->tx_lock);
3433}
3434
3435static inline void tg3_full_unlock(struct tg3 *tp)
3436{
3437 spin_unlock(&tp->tx_lock);
3438 spin_unlock_bh(&tp->lock);
3439}
3440
Michael Chanfcfa0a32006-03-20 22:28:41 -08003441/* One-shot MSI handler - Chip automatically disables interrupt
3442 * after sending MSI so driver doesn't have to do it.
3443 */
3444static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3445{
3446 struct net_device *dev = dev_id;
3447 struct tg3 *tp = netdev_priv(dev);
3448
3449 prefetch(tp->hw_status);
3450 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3451
3452 if (likely(!tg3_irq_sync(tp)))
3453 netif_rx_schedule(dev); /* schedule NAPI poll */
3454
3455 return IRQ_HANDLED;
3456}
3457
Michael Chan88b06bc2005-04-21 17:13:25 -07003458/* MSI ISR - No need to check for interrupt sharing and no need to
3459 * flush status block and interrupt mailbox. PCI ordering rules
3460 * guarantee that MSI will arrive after the status block.
3461 */
3462static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3463{
3464 struct net_device *dev = dev_id;
3465 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07003466
Michael Chan61487482005-09-05 17:53:19 -07003467 prefetch(tp->hw_status);
3468 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07003469 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003470 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07003471 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07003472 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07003473 * NIC to stop sending us irqs, engaging "in-intr-handler"
3474 * event coalescing.
3475 */
3476 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07003477 if (likely(!tg3_irq_sync(tp)))
Michael Chan88b06bc2005-04-21 17:13:25 -07003478 netif_rx_schedule(dev); /* schedule NAPI poll */
Michael Chan61487482005-09-05 17:53:19 -07003479
Michael Chan88b06bc2005-04-21 17:13:25 -07003480 return IRQ_RETVAL(1);
3481}
3482
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3484{
3485 struct net_device *dev = dev_id;
3486 struct tg3 *tp = netdev_priv(dev);
3487 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 unsigned int handled = 1;
3489
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 /* In INTx mode, it is possible for the interrupt to arrive at
3491 * the CPU before the status block posted prior to the interrupt.
3492 * Reading the PCI State register will confirm whether the
3493 * interrupt is ours and will flush the status block.
3494 */
3495 if ((sblk->status & SD_STATUS_UPDATED) ||
3496 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3497 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003498 * Writing any value to intr-mbox-0 clears PCI INTA# and
3499 * chip-internal interrupt pending events.
3500 * Writing non-zero to intr-mbox-0 additional tells the
3501 * NIC to stop sending us irqs, engaging "in-intr-handler"
3502 * event coalescing.
3503 */
3504 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3505 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003506 if (tg3_irq_sync(tp))
3507 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07003508 sblk->status &= ~SD_STATUS_UPDATED;
Michael Chan61487482005-09-05 17:53:19 -07003509 if (likely(tg3_has_work(tp))) {
3510 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
David S. Millerfac9b832005-05-18 22:46:34 -07003511 netif_rx_schedule(dev); /* schedule NAPI poll */
Michael Chan61487482005-09-05 17:53:19 -07003512 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07003513 /* No work, shared interrupt perhaps? re-enable
3514 * interrupts, and flush that PCI write
3515 */
Michael Chan09ee9292005-08-09 20:17:00 -07003516 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
David S. Millerfac9b832005-05-18 22:46:34 -07003517 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07003518 }
3519 } else { /* shared interrupt */
3520 handled = 0;
3521 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003522out:
David S. Millerfac9b832005-05-18 22:46:34 -07003523 return IRQ_RETVAL(handled);
3524}
3525
3526static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3527{
3528 struct net_device *dev = dev_id;
3529 struct tg3 *tp = netdev_priv(dev);
3530 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07003531 unsigned int handled = 1;
3532
David S. Millerfac9b832005-05-18 22:46:34 -07003533 /* In INTx mode, it is possible for the interrupt to arrive at
3534 * the CPU before the status block posted prior to the interrupt.
3535 * Reading the PCI State register will confirm whether the
3536 * interrupt is ours and will flush the status block.
3537 */
Michael Chan38f38432005-09-05 17:53:32 -07003538 if ((sblk->status_tag != tp->last_tag) ||
David S. Millerfac9b832005-05-18 22:46:34 -07003539 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3540 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 * writing any value to intr-mbox-0 clears PCI INTA# and
3542 * chip-internal interrupt pending events.
3543 * writing non-zero to intr-mbox-0 additional tells the
3544 * NIC to stop sending us irqs, engaging "in-intr-handler"
3545 * event coalescing.
3546 */
3547 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3548 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003549 if (tg3_irq_sync(tp))
3550 goto out;
Michael Chan38f38432005-09-05 17:53:32 -07003551 if (netif_rx_schedule_prep(dev)) {
Michael Chan61487482005-09-05 17:53:19 -07003552 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan38f38432005-09-05 17:53:32 -07003553 /* Update last_tag to mark that this status has been
3554 * seen. Because interrupt may be shared, we may be
3555 * racing with tg3_poll(), so only update last_tag
3556 * if tg3_poll() is not scheduled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 */
Michael Chan38f38432005-09-05 17:53:32 -07003558 tp->last_tag = sblk->status_tag;
3559 __netif_rx_schedule(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 }
3561 } else { /* shared interrupt */
3562 handled = 0;
3563 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003564out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 return IRQ_RETVAL(handled);
3566}
3567
Michael Chan79381092005-04-21 17:13:59 -07003568/* ISR for interrupt test */
3569static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3570 struct pt_regs *regs)
3571{
3572 struct net_device *dev = dev_id;
3573 struct tg3 *tp = netdev_priv(dev);
3574 struct tg3_hw_status *sblk = tp->hw_status;
3575
Michael Chanf9804dd2005-09-27 12:13:10 -07003576 if ((sblk->status & SD_STATUS_UPDATED) ||
3577 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chan79381092005-04-21 17:13:59 -07003578 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3579 0x00000001);
3580 return IRQ_RETVAL(1);
3581 }
3582 return IRQ_RETVAL(0);
3583}
3584
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07003585static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07003586static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587
3588#ifdef CONFIG_NET_POLL_CONTROLLER
3589static void tg3_poll_controller(struct net_device *dev)
3590{
Michael Chan88b06bc2005-04-21 17:13:25 -07003591 struct tg3 *tp = netdev_priv(dev);
3592
3593 tg3_interrupt(tp->pdev->irq, dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594}
3595#endif
3596
3597static void tg3_reset_task(void *_data)
3598{
3599 struct tg3 *tp = _data;
3600 unsigned int restart_timer;
3601
Michael Chan7faa0062006-02-02 17:29:28 -08003602 tg3_full_lock(tp, 0);
3603 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3604
3605 if (!netif_running(tp->dev)) {
3606 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3607 tg3_full_unlock(tp);
3608 return;
3609 }
3610
3611 tg3_full_unlock(tp);
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 tg3_netif_stop(tp);
3614
David S. Millerf47c11e2005-06-24 20:18:35 -07003615 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
3617 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3618 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3619
Michael Chandf3e6542006-05-26 17:48:07 -07003620 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3621 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3622 tp->write32_rx_mbox = tg3_write_flush_reg32;
3623 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3624 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3625 }
3626
Michael Chan944d9802005-05-29 14:57:48 -07003627 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07003628 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629
3630 tg3_netif_start(tp);
3631
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 if (restart_timer)
3633 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08003634
3635 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3636
3637 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638}
3639
3640static void tg3_tx_timeout(struct net_device *dev)
3641{
3642 struct tg3 *tp = netdev_priv(dev);
3643
3644 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3645 dev->name);
3646
3647 schedule_work(&tp->reset_task);
3648}
3649
Michael Chanc58ec932005-09-17 00:46:27 -07003650/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3651static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3652{
3653 u32 base = (u32) mapping & 0xffffffff;
3654
3655 return ((base > 0xffffdcc0) &&
3656 (base + len + 8 < base));
3657}
3658
Michael Chan72f2afb2006-03-06 19:28:35 -08003659/* Test for DMA addresses > 40-bit */
3660static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3661 int len)
3662{
3663#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08003664 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08003665 return (((u64) mapping + len) > DMA_40BIT_MASK);
3666 return 0;
3667#else
3668 return 0;
3669#endif
3670}
3671
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3673
Michael Chan72f2afb2006-03-06 19:28:35 -08003674/* Workaround 4GB and 40-bit hardware DMA bugs. */
3675static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07003676 u32 last_plus_one, u32 *start,
3677 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678{
3679 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
Michael Chanc58ec932005-09-17 00:46:27 -07003680 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07003682 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
3684 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07003685 ret = -1;
3686 } else {
3687 /* New SKB is guaranteed to be linear. */
3688 entry = *start;
3689 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3690 PCI_DMA_TODEVICE);
3691 /* Make sure new skb does not cross any 4G boundaries.
3692 * Drop the packet if it does.
3693 */
3694 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3695 ret = -1;
3696 dev_kfree_skb(new_skb);
3697 new_skb = NULL;
3698 } else {
3699 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3700 base_flags, 1 | (mss << 1));
3701 *start = NEXT_TX(entry);
3702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 }
3704
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 /* Now clean up the sw ring entries. */
3706 i = 0;
3707 while (entry != last_plus_one) {
3708 int len;
3709
3710 if (i == 0)
3711 len = skb_headlen(skb);
3712 else
3713 len = skb_shinfo(skb)->frags[i-1].size;
3714 pci_unmap_single(tp->pdev,
3715 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3716 len, PCI_DMA_TODEVICE);
3717 if (i == 0) {
3718 tp->tx_buffers[entry].skb = new_skb;
3719 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3720 } else {
3721 tp->tx_buffers[entry].skb = NULL;
3722 }
3723 entry = NEXT_TX(entry);
3724 i++;
3725 }
3726
3727 dev_kfree_skb(skb);
3728
Michael Chanc58ec932005-09-17 00:46:27 -07003729 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730}
3731
3732static void tg3_set_txd(struct tg3 *tp, int entry,
3733 dma_addr_t mapping, int len, u32 flags,
3734 u32 mss_and_is_end)
3735{
3736 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3737 int is_end = (mss_and_is_end & 0x1);
3738 u32 mss = (mss_and_is_end >> 1);
3739 u32 vlan_tag = 0;
3740
3741 if (is_end)
3742 flags |= TXD_FLAG_END;
3743 if (flags & TXD_FLAG_VLAN) {
3744 vlan_tag = flags >> 16;
3745 flags &= 0xffff;
3746 }
3747 vlan_tag |= (mss << TXD_MSS_SHIFT);
3748
3749 txd->addr_hi = ((u64) mapping >> 32);
3750 txd->addr_lo = ((u64) mapping & 0xffffffff);
3751 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3752 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3753}
3754
Michael Chan5a6f3072006-03-20 22:28:05 -08003755/* hard_start_xmit for devices that don't have any bugs and
3756 * support TG3_FLG2_HW_TSO_2 only.
3757 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3759{
3760 struct tg3 *tp = netdev_priv(dev);
3761 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 u32 len, entry, base_flags, mss;
Michael Chan5a6f3072006-03-20 22:28:05 -08003763
3764 len = skb_headlen(skb);
3765
3766 /* No BH disabling for tx_lock here. We are running in BH disabled
3767 * context and TX reclaim runs via tp->poll inside of a software
3768 * interrupt. Furthermore, IRQ processing runs lockless so we have
3769 * no IRQ context deadlocks to worry about either. Rejoice!
3770 */
3771 if (!spin_trylock(&tp->tx_lock))
3772 return NETDEV_TX_LOCKED;
3773
3774 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3775 if (!netif_queue_stopped(dev)) {
3776 netif_stop_queue(dev);
3777
3778 /* This is a hard error, log it. */
3779 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3780 "queue awake!\n", dev->name);
3781 }
3782 spin_unlock(&tp->tx_lock);
3783 return NETDEV_TX_BUSY;
3784 }
3785
3786 entry = tp->tx_prod;
3787 base_flags = 0;
3788#if TG3_TSO_SUPPORT != 0
3789 mss = 0;
3790 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3791 (mss = skb_shinfo(skb)->tso_size) != 0) {
3792 int tcp_opt_len, ip_tcp_len;
3793
3794 if (skb_header_cloned(skb) &&
3795 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3796 dev_kfree_skb(skb);
3797 goto out_unlock;
3798 }
3799
3800 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3801 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3802
3803 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3804 TXD_FLAG_CPU_POST_DMA);
3805
3806 skb->nh.iph->check = 0;
3807 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3808
3809 skb->h.th->check = 0;
3810
3811 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3812 }
3813 else if (skb->ip_summed == CHECKSUM_HW)
3814 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3815#else
3816 mss = 0;
3817 if (skb->ip_summed == CHECKSUM_HW)
3818 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3819#endif
3820#if TG3_VLAN_TAG_USED
3821 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3822 base_flags |= (TXD_FLAG_VLAN |
3823 (vlan_tx_tag_get(skb) << 16));
3824#endif
3825
3826 /* Queue skb data, a.k.a. the main skb fragment. */
3827 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3828
3829 tp->tx_buffers[entry].skb = skb;
3830 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3831
3832 tg3_set_txd(tp, entry, mapping, len, base_flags,
3833 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3834
3835 entry = NEXT_TX(entry);
3836
3837 /* Now loop through additional data fragments, and queue them. */
3838 if (skb_shinfo(skb)->nr_frags > 0) {
3839 unsigned int i, last;
3840
3841 last = skb_shinfo(skb)->nr_frags - 1;
3842 for (i = 0; i <= last; i++) {
3843 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3844
3845 len = frag->size;
3846 mapping = pci_map_page(tp->pdev,
3847 frag->page,
3848 frag->page_offset,
3849 len, PCI_DMA_TODEVICE);
3850
3851 tp->tx_buffers[entry].skb = NULL;
3852 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3853
3854 tg3_set_txd(tp, entry, mapping, len,
3855 base_flags, (i == last) | (mss << 1));
3856
3857 entry = NEXT_TX(entry);
3858 }
3859 }
3860
3861 /* Packets are ready, update Tx producer idx local and on card. */
3862 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3863
3864 tp->tx_prod = entry;
3865 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3866 netif_stop_queue(dev);
3867 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3868 netif_wake_queue(tp->dev);
3869 }
3870
3871out_unlock:
3872 mmiowb();
3873 spin_unlock(&tp->tx_lock);
3874
3875 dev->trans_start = jiffies;
3876
3877 return NETDEV_TX_OK;
3878}
3879
3880/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3881 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3882 */
3883static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3884{
3885 struct tg3 *tp = netdev_priv(dev);
3886 dma_addr_t mapping;
3887 u32 len, entry, base_flags, mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888 int would_hit_hwbug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889
3890 len = skb_headlen(skb);
3891
3892 /* No BH disabling for tx_lock here. We are running in BH disabled
3893 * context and TX reclaim runs via tp->poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07003894 * interrupt. Furthermore, IRQ processing runs lockless so we have
3895 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 */
David S. Millerf47c11e2005-06-24 20:18:35 -07003897 if (!spin_trylock(&tp->tx_lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 return NETDEV_TX_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08003901 if (!netif_queue_stopped(dev)) {
3902 netif_stop_queue(dev);
3903
3904 /* This is a hard error, log it. */
3905 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3906 "queue awake!\n", dev->name);
3907 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003908 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 return NETDEV_TX_BUSY;
3910 }
3911
3912 entry = tp->tx_prod;
3913 base_flags = 0;
3914 if (skb->ip_summed == CHECKSUM_HW)
3915 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3916#if TG3_TSO_SUPPORT != 0
3917 mss = 0;
3918 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3919 (mss = skb_shinfo(skb)->tso_size) != 0) {
3920 int tcp_opt_len, ip_tcp_len;
3921
3922 if (skb_header_cloned(skb) &&
3923 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3924 dev_kfree_skb(skb);
3925 goto out_unlock;
3926 }
3927
3928 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3929 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3930
3931 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3932 TXD_FLAG_CPU_POST_DMA);
3933
3934 skb->nh.iph->check = 0;
Alexey Dobriyanfd303332006-01-03 14:19:25 -08003935 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3937 skb->h.th->check = 0;
3938 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3939 }
3940 else {
3941 skb->h.th->check =
3942 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3943 skb->nh.iph->daddr,
3944 0, IPPROTO_TCP, 0);
3945 }
3946
3947 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3948 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3949 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3950 int tsflags;
3951
3952 tsflags = ((skb->nh.iph->ihl - 5) +
3953 (tcp_opt_len >> 2));
3954 mss |= (tsflags << 11);
3955 }
3956 } else {
3957 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3958 int tsflags;
3959
3960 tsflags = ((skb->nh.iph->ihl - 5) +
3961 (tcp_opt_len >> 2));
3962 base_flags |= tsflags << 12;
3963 }
3964 }
3965 }
3966#else
3967 mss = 0;
3968#endif
3969#if TG3_VLAN_TAG_USED
3970 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3971 base_flags |= (TXD_FLAG_VLAN |
3972 (vlan_tx_tag_get(skb) << 16));
3973#endif
3974
3975 /* Queue skb data, a.k.a. the main skb fragment. */
3976 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3977
3978 tp->tx_buffers[entry].skb = skb;
3979 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3980
3981 would_hit_hwbug = 0;
3982
3983 if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07003984 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985
3986 tg3_set_txd(tp, entry, mapping, len, base_flags,
3987 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3988
3989 entry = NEXT_TX(entry);
3990
3991 /* Now loop through additional data fragments, and queue them. */
3992 if (skb_shinfo(skb)->nr_frags > 0) {
3993 unsigned int i, last;
3994
3995 last = skb_shinfo(skb)->nr_frags - 1;
3996 for (i = 0; i <= last; i++) {
3997 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3998
3999 len = frag->size;
4000 mapping = pci_map_page(tp->pdev,
4001 frag->page,
4002 frag->page_offset,
4003 len, PCI_DMA_TODEVICE);
4004
4005 tp->tx_buffers[entry].skb = NULL;
4006 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4007
Michael Chanc58ec932005-09-17 00:46:27 -07004008 if (tg3_4g_overflow_test(mapping, len))
4009 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010
Michael Chan72f2afb2006-03-06 19:28:35 -08004011 if (tg3_40bit_overflow_test(tp, mapping, len))
4012 would_hit_hwbug = 1;
4013
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4015 tg3_set_txd(tp, entry, mapping, len,
4016 base_flags, (i == last)|(mss << 1));
4017 else
4018 tg3_set_txd(tp, entry, mapping, len,
4019 base_flags, (i == last));
4020
4021 entry = NEXT_TX(entry);
4022 }
4023 }
4024
4025 if (would_hit_hwbug) {
4026 u32 last_plus_one = entry;
4027 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028
Michael Chanc58ec932005-09-17 00:46:27 -07004029 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4030 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031
4032 /* If the workaround fails due to memory/mapping
4033 * failure, silently drop this packet.
4034 */
Michael Chan72f2afb2006-03-06 19:28:35 -08004035 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07004036 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037 goto out_unlock;
4038
4039 entry = start;
4040 }
4041
4042 /* Packets are ready, update Tx producer idx local and on card. */
4043 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4044
4045 tp->tx_prod = entry;
Michael Chan51b91462005-09-01 17:41:28 -07004046 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 netif_stop_queue(dev);
Michael Chan51b91462005-09-01 17:41:28 -07004048 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4049 netif_wake_queue(tp->dev);
4050 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051
4052out_unlock:
4053 mmiowb();
David S. Millerf47c11e2005-06-24 20:18:35 -07004054 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055
4056 dev->trans_start = jiffies;
4057
4058 return NETDEV_TX_OK;
4059}
4060
4061static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4062 int new_mtu)
4063{
4064 dev->mtu = new_mtu;
4065
Michael Chanef7f5ec2005-07-25 12:32:25 -07004066 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07004067 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07004068 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4069 ethtool_op_set_tso(dev, 0);
4070 }
4071 else
4072 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4073 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07004074 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07004075 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07004076 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07004077 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078}
4079
4080static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4081{
4082 struct tg3 *tp = netdev_priv(dev);
4083
4084 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4085 return -EINVAL;
4086
4087 if (!netif_running(dev)) {
4088 /* We'll just catch it later when the
4089 * device is up'd.
4090 */
4091 tg3_set_mtu(dev, tp, new_mtu);
4092 return 0;
4093 }
4094
4095 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004096
4097 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098
Michael Chan944d9802005-05-29 14:57:48 -07004099 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100
4101 tg3_set_mtu(dev, tp, new_mtu);
4102
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004103 tg3_init_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
4105 tg3_netif_start(tp);
4106
David S. Millerf47c11e2005-06-24 20:18:35 -07004107 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108
4109 return 0;
4110}
4111
4112/* Free up pending packets in all rx/tx rings.
4113 *
4114 * The chip has been shut down and the driver detached from
4115 * the networking, so no interrupts or new tx packets will
4116 * end up in the driver. tp->{tx,}lock is not held and we are not
4117 * in an interrupt context and thus may sleep.
4118 */
4119static void tg3_free_rings(struct tg3 *tp)
4120{
4121 struct ring_info *rxp;
4122 int i;
4123
4124 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4125 rxp = &tp->rx_std_buffers[i];
4126
4127 if (rxp->skb == NULL)
4128 continue;
4129 pci_unmap_single(tp->pdev,
4130 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07004131 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 PCI_DMA_FROMDEVICE);
4133 dev_kfree_skb_any(rxp->skb);
4134 rxp->skb = NULL;
4135 }
4136
4137 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4138 rxp = &tp->rx_jumbo_buffers[i];
4139
4140 if (rxp->skb == NULL)
4141 continue;
4142 pci_unmap_single(tp->pdev,
4143 pci_unmap_addr(rxp, mapping),
4144 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4145 PCI_DMA_FROMDEVICE);
4146 dev_kfree_skb_any(rxp->skb);
4147 rxp->skb = NULL;
4148 }
4149
4150 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4151 struct tx_ring_info *txp;
4152 struct sk_buff *skb;
4153 int j;
4154
4155 txp = &tp->tx_buffers[i];
4156 skb = txp->skb;
4157
4158 if (skb == NULL) {
4159 i++;
4160 continue;
4161 }
4162
4163 pci_unmap_single(tp->pdev,
4164 pci_unmap_addr(txp, mapping),
4165 skb_headlen(skb),
4166 PCI_DMA_TODEVICE);
4167 txp->skb = NULL;
4168
4169 i++;
4170
4171 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4172 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4173 pci_unmap_page(tp->pdev,
4174 pci_unmap_addr(txp, mapping),
4175 skb_shinfo(skb)->frags[j].size,
4176 PCI_DMA_TODEVICE);
4177 i++;
4178 }
4179
4180 dev_kfree_skb_any(skb);
4181 }
4182}
4183
4184/* Initialize tx/rx rings for packet processing.
4185 *
4186 * The chip has been shut down and the driver detached from
4187 * the networking, so no interrupts or new tx packets will
4188 * end up in the driver. tp->{tx,}lock are held and thus
4189 * we may not sleep.
4190 */
4191static void tg3_init_rings(struct tg3 *tp)
4192{
4193 u32 i;
4194
4195 /* Free up all the SKBs. */
4196 tg3_free_rings(tp);
4197
4198 /* Zero out all descriptors. */
4199 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4200 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4201 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4202 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4203
Michael Chan7e72aad2005-07-25 12:31:17 -07004204 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07004205 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07004206 (tp->dev->mtu > ETH_DATA_LEN))
4207 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4208
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 /* Initialize invariants of the rings, we only set this
4210 * stuff once. This works because the card does not
4211 * write into the rx buffer posting rings.
4212 */
4213 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4214 struct tg3_rx_buffer_desc *rxd;
4215
4216 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07004217 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 << RXD_LEN_SHIFT;
4219 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4220 rxd->opaque = (RXD_OPAQUE_RING_STD |
4221 (i << RXD_OPAQUE_INDEX_SHIFT));
4222 }
4223
Michael Chan0f893dc2005-07-25 12:30:38 -07004224 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4226 struct tg3_rx_buffer_desc *rxd;
4227
4228 rxd = &tp->rx_jumbo[i];
4229 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4230 << RXD_LEN_SHIFT;
4231 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4232 RXD_FLAG_JUMBO;
4233 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4234 (i << RXD_OPAQUE_INDEX_SHIFT));
4235 }
4236 }
4237
4238 /* Now allocate fresh SKBs for each rx ring. */
4239 for (i = 0; i < tp->rx_pending; i++) {
4240 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4241 -1, i) < 0)
4242 break;
4243 }
4244
Michael Chan0f893dc2005-07-25 12:30:38 -07004245 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4247 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4248 -1, i) < 0)
4249 break;
4250 }
4251 }
4252}
4253
4254/*
4255 * Must not be invoked with interrupt sources disabled and
4256 * the hardware shutdown down.
4257 */
4258static void tg3_free_consistent(struct tg3 *tp)
4259{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04004260 kfree(tp->rx_std_buffers);
4261 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 if (tp->rx_std) {
4263 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4264 tp->rx_std, tp->rx_std_mapping);
4265 tp->rx_std = NULL;
4266 }
4267 if (tp->rx_jumbo) {
4268 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4269 tp->rx_jumbo, tp->rx_jumbo_mapping);
4270 tp->rx_jumbo = NULL;
4271 }
4272 if (tp->rx_rcb) {
4273 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4274 tp->rx_rcb, tp->rx_rcb_mapping);
4275 tp->rx_rcb = NULL;
4276 }
4277 if (tp->tx_ring) {
4278 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4279 tp->tx_ring, tp->tx_desc_mapping);
4280 tp->tx_ring = NULL;
4281 }
4282 if (tp->hw_status) {
4283 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4284 tp->hw_status, tp->status_mapping);
4285 tp->hw_status = NULL;
4286 }
4287 if (tp->hw_stats) {
4288 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4289 tp->hw_stats, tp->stats_mapping);
4290 tp->hw_stats = NULL;
4291 }
4292}
4293
4294/*
4295 * Must not be invoked with interrupt sources disabled and
4296 * the hardware shutdown down. Can sleep.
4297 */
4298static int tg3_alloc_consistent(struct tg3 *tp)
4299{
4300 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4301 (TG3_RX_RING_SIZE +
4302 TG3_RX_JUMBO_RING_SIZE)) +
4303 (sizeof(struct tx_ring_info) *
4304 TG3_TX_RING_SIZE),
4305 GFP_KERNEL);
4306 if (!tp->rx_std_buffers)
4307 return -ENOMEM;
4308
4309 memset(tp->rx_std_buffers, 0,
4310 (sizeof(struct ring_info) *
4311 (TG3_RX_RING_SIZE +
4312 TG3_RX_JUMBO_RING_SIZE)) +
4313 (sizeof(struct tx_ring_info) *
4314 TG3_TX_RING_SIZE));
4315
4316 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4317 tp->tx_buffers = (struct tx_ring_info *)
4318 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4319
4320 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4321 &tp->rx_std_mapping);
4322 if (!tp->rx_std)
4323 goto err_out;
4324
4325 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4326 &tp->rx_jumbo_mapping);
4327
4328 if (!tp->rx_jumbo)
4329 goto err_out;
4330
4331 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4332 &tp->rx_rcb_mapping);
4333 if (!tp->rx_rcb)
4334 goto err_out;
4335
4336 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4337 &tp->tx_desc_mapping);
4338 if (!tp->tx_ring)
4339 goto err_out;
4340
4341 tp->hw_status = pci_alloc_consistent(tp->pdev,
4342 TG3_HW_STATUS_SIZE,
4343 &tp->status_mapping);
4344 if (!tp->hw_status)
4345 goto err_out;
4346
4347 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4348 sizeof(struct tg3_hw_stats),
4349 &tp->stats_mapping);
4350 if (!tp->hw_stats)
4351 goto err_out;
4352
4353 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4354 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4355
4356 return 0;
4357
4358err_out:
4359 tg3_free_consistent(tp);
4360 return -ENOMEM;
4361}
4362
4363#define MAX_WAIT_CNT 1000
4364
4365/* To stop a block, clear the enable bit and poll till it
4366 * clears. tp->lock is held.
4367 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004368static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369{
4370 unsigned int i;
4371 u32 val;
4372
4373 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4374 switch (ofs) {
4375 case RCVLSC_MODE:
4376 case DMAC_MODE:
4377 case MBFREE_MODE:
4378 case BUFMGR_MODE:
4379 case MEMARB_MODE:
4380 /* We can't enable/disable these bits of the
4381 * 5705/5750, just say success.
4382 */
4383 return 0;
4384
4385 default:
4386 break;
4387 };
4388 }
4389
4390 val = tr32(ofs);
4391 val &= ~enable_bit;
4392 tw32_f(ofs, val);
4393
4394 for (i = 0; i < MAX_WAIT_CNT; i++) {
4395 udelay(100);
4396 val = tr32(ofs);
4397 if ((val & enable_bit) == 0)
4398 break;
4399 }
4400
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004401 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4403 "ofs=%lx enable_bit=%x\n",
4404 ofs, enable_bit);
4405 return -ENODEV;
4406 }
4407
4408 return 0;
4409}
4410
4411/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004412static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413{
4414 int i, err;
4415
4416 tg3_disable_ints(tp);
4417
4418 tp->rx_mode &= ~RX_MODE_ENABLE;
4419 tw32_f(MAC_RX_MODE, tp->rx_mode);
4420 udelay(10);
4421
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004422 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4423 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4424 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4425 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4426 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4427 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004429 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4430 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4431 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4432 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4433 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4434 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4435 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436
4437 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4438 tw32_f(MAC_MODE, tp->mac_mode);
4439 udelay(40);
4440
4441 tp->tx_mode &= ~TX_MODE_ENABLE;
4442 tw32_f(MAC_TX_MODE, tp->tx_mode);
4443
4444 for (i = 0; i < MAX_WAIT_CNT; i++) {
4445 udelay(100);
4446 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4447 break;
4448 }
4449 if (i >= MAX_WAIT_CNT) {
4450 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4451 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4452 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07004453 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 }
4455
Michael Chane6de8ad2005-05-05 14:42:41 -07004456 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004457 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4458 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459
4460 tw32(FTQ_RESET, 0xffffffff);
4461 tw32(FTQ_RESET, 0x00000000);
4462
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004463 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4464 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465
4466 if (tp->hw_status)
4467 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4468 if (tp->hw_stats)
4469 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4470
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 return err;
4472}
4473
4474/* tp->lock is held. */
4475static int tg3_nvram_lock(struct tg3 *tp)
4476{
4477 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4478 int i;
4479
Michael Chanec41c7d2006-01-17 02:40:55 -08004480 if (tp->nvram_lock_cnt == 0) {
4481 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4482 for (i = 0; i < 8000; i++) {
4483 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4484 break;
4485 udelay(20);
4486 }
4487 if (i == 8000) {
4488 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4489 return -ENODEV;
4490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 }
Michael Chanec41c7d2006-01-17 02:40:55 -08004492 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 }
4494 return 0;
4495}
4496
4497/* tp->lock is held. */
4498static void tg3_nvram_unlock(struct tg3 *tp)
4499{
Michael Chanec41c7d2006-01-17 02:40:55 -08004500 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4501 if (tp->nvram_lock_cnt > 0)
4502 tp->nvram_lock_cnt--;
4503 if (tp->nvram_lock_cnt == 0)
4504 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506}
4507
4508/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07004509static void tg3_enable_nvram_access(struct tg3 *tp)
4510{
4511 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4512 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4513 u32 nvaccess = tr32(NVRAM_ACCESS);
4514
4515 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4516 }
4517}
4518
4519/* tp->lock is held. */
4520static void tg3_disable_nvram_access(struct tg3 *tp)
4521{
4522 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4523 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4524 u32 nvaccess = tr32(NVRAM_ACCESS);
4525
4526 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4527 }
4528}
4529
4530/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4532{
David S. Millerf49639e2006-06-09 11:58:36 -07004533 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4534 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535
4536 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4537 switch (kind) {
4538 case RESET_KIND_INIT:
4539 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4540 DRV_STATE_START);
4541 break;
4542
4543 case RESET_KIND_SHUTDOWN:
4544 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4545 DRV_STATE_UNLOAD);
4546 break;
4547
4548 case RESET_KIND_SUSPEND:
4549 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4550 DRV_STATE_SUSPEND);
4551 break;
4552
4553 default:
4554 break;
4555 };
4556 }
4557}
4558
4559/* tp->lock is held. */
4560static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4561{
4562 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4563 switch (kind) {
4564 case RESET_KIND_INIT:
4565 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4566 DRV_STATE_START_DONE);
4567 break;
4568
4569 case RESET_KIND_SHUTDOWN:
4570 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4571 DRV_STATE_UNLOAD_DONE);
4572 break;
4573
4574 default:
4575 break;
4576 };
4577 }
4578}
4579
4580/* tp->lock is held. */
4581static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4582{
4583 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4584 switch (kind) {
4585 case RESET_KIND_INIT:
4586 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4587 DRV_STATE_START);
4588 break;
4589
4590 case RESET_KIND_SHUTDOWN:
4591 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4592 DRV_STATE_UNLOAD);
4593 break;
4594
4595 case RESET_KIND_SUSPEND:
4596 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4597 DRV_STATE_SUSPEND);
4598 break;
4599
4600 default:
4601 break;
4602 };
4603 }
4604}
4605
4606static void tg3_stop_fw(struct tg3 *);
4607
4608/* tp->lock is held. */
4609static int tg3_chip_reset(struct tg3 *tp)
4610{
4611 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07004612 void (*write_op)(struct tg3 *, u32, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613 int i;
4614
David S. Millerf49639e2006-06-09 11:58:36 -07004615 tg3_nvram_lock(tp);
4616
4617 /* No matching tg3_nvram_unlock() after this because
4618 * chip reset below will undo the nvram lock.
4619 */
4620 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621
Michael Chand9ab5ad2006-03-20 22:27:35 -08004622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08004623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -08004624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4625 tw32(GRC_FASTBOOT_PC, 0);
4626
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 /*
4628 * We must avoid the readl() that normally takes place.
4629 * It locks machines, causes machine checks, and other
4630 * fun things. So, temporarily disable the 5701
4631 * hardware workaround, while we do the reset.
4632 */
Michael Chan1ee582d2005-08-09 20:16:46 -07004633 write_op = tp->write32;
4634 if (write_op == tg3_write_flush_reg32)
4635 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636
4637 /* do the reset */
4638 val = GRC_MISC_CFG_CORECLK_RESET;
4639
4640 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4641 if (tr32(0x7e2c) == 0x60) {
4642 tw32(0x7e2c, 0x20);
4643 }
4644 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4645 tw32(GRC_MISC_CFG, (1 << 29));
4646 val |= (1 << 29);
4647 }
4648 }
4649
4650 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4651 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4652 tw32(GRC_MISC_CFG, val);
4653
Michael Chan1ee582d2005-08-09 20:16:46 -07004654 /* restore 5701 hardware bug workaround write method */
4655 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656
4657 /* Unfortunately, we have to delay before the PCI read back.
4658 * Some 575X chips even will not respond to a PCI cfg access
4659 * when the reset command is given to the chip.
4660 *
4661 * How do these hardware designers expect things to work
4662 * properly if the PCI write is posted for a long period
4663 * of time? It is always necessary to have some method by
4664 * which a register read back can occur to push the write
4665 * out which does the reset.
4666 *
4667 * For most tg3 variants the trick below was working.
4668 * Ho hum...
4669 */
4670 udelay(120);
4671
4672 /* Flush PCI posted writes. The normal MMIO registers
4673 * are inaccessible at this time so this is the only
4674 * way to make this reliably (actually, this is no longer
4675 * the case, see above). I tried to use indirect
4676 * register read/write but this upset some 5701 variants.
4677 */
4678 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4679
4680 udelay(120);
4681
4682 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4683 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4684 int i;
4685 u32 cfg_val;
4686
4687 /* Wait for link training to complete. */
4688 for (i = 0; i < 5000; i++)
4689 udelay(100);
4690
4691 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4692 pci_write_config_dword(tp->pdev, 0xc4,
4693 cfg_val | (1 << 15));
4694 }
4695 /* Set PCIE max payload size and clear error status. */
4696 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4697 }
4698
4699 /* Re-enable indirect register accesses. */
4700 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4701 tp->misc_host_ctrl);
4702
4703 /* Set MAX PCI retry to zero. */
4704 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4705 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4706 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4707 val |= PCISTATE_RETRY_SAME_DMA;
4708 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4709
4710 pci_restore_state(tp->pdev);
4711
4712 /* Make sure PCI-X relaxed ordering bit is clear. */
4713 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4714 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4715 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4716
Michael Chana4e2b342005-10-26 15:46:52 -07004717 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chan4cf78e42005-07-25 12:29:19 -07004718 u32 val;
4719
4720 /* Chip reset on 5780 will reset MSI enable bit,
4721 * so need to restore it.
4722 */
4723 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4724 u16 ctrl;
4725
4726 pci_read_config_word(tp->pdev,
4727 tp->msi_cap + PCI_MSI_FLAGS,
4728 &ctrl);
4729 pci_write_config_word(tp->pdev,
4730 tp->msi_cap + PCI_MSI_FLAGS,
4731 ctrl | PCI_MSI_FLAGS_ENABLE);
4732 val = tr32(MSGINT_MODE);
4733 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4734 }
4735
4736 val = tr32(MEMARB_MODE);
4737 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4738
4739 } else
4740 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741
4742 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4743 tg3_stop_fw(tp);
4744 tw32(0x5000, 0x400);
4745 }
4746
4747 tw32(GRC_MODE, tp->grc_mode);
4748
4749 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4750 u32 val = tr32(0xc4);
4751
4752 tw32(0xc4, val | (1 << 15));
4753 }
4754
4755 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4757 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4758 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4759 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4760 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4761 }
4762
4763 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4764 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4765 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07004766 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4767 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4768 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769 } else
4770 tw32_f(MAC_MODE, 0);
4771 udelay(40);
4772
David S. Millerf49639e2006-06-09 11:58:36 -07004773 /* Wait for firmware initialization to complete. */
4774 for (i = 0; i < 100000; i++) {
4775 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4776 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4777 break;
4778 udelay(10);
4779 }
4780
4781 /* Chip might not be fitted with firmare. Some Sun onboard
4782 * parts are configured like that. So don't signal the timeout
4783 * of the above loop as an error, but do report the lack of
4784 * running firmware once.
4785 */
4786 if (i >= 100000 &&
4787 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4788 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4789
4790 printk(KERN_INFO PFX "%s: No firmware running.\n",
4791 tp->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 }
4793
4794 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4795 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4796 u32 val = tr32(0x7c00);
4797
4798 tw32(0x7c00, val | (1 << 25));
4799 }
4800
4801 /* Reprobe ASF enable state. */
4802 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4803 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4804 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4805 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4806 u32 nic_cfg;
4807
4808 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4809 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4810 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07004811 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4813 }
4814 }
4815
4816 return 0;
4817}
4818
4819/* tp->lock is held. */
4820static void tg3_stop_fw(struct tg3 *tp)
4821{
4822 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4823 u32 val;
4824 int i;
4825
4826 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4827 val = tr32(GRC_RX_CPU_EVENT);
4828 val |= (1 << 14);
4829 tw32(GRC_RX_CPU_EVENT, val);
4830
4831 /* Wait for RX cpu to ACK the event. */
4832 for (i = 0; i < 100; i++) {
4833 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4834 break;
4835 udelay(1);
4836 }
4837 }
4838}
4839
4840/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07004841static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842{
4843 int err;
4844
4845 tg3_stop_fw(tp);
4846
Michael Chan944d9802005-05-29 14:57:48 -07004847 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004849 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 err = tg3_chip_reset(tp);
4851
Michael Chan944d9802005-05-29 14:57:48 -07004852 tg3_write_sig_legacy(tp, kind);
4853 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854
4855 if (err)
4856 return err;
4857
4858 return 0;
4859}
4860
4861#define TG3_FW_RELEASE_MAJOR 0x0
4862#define TG3_FW_RELASE_MINOR 0x0
4863#define TG3_FW_RELEASE_FIX 0x0
4864#define TG3_FW_START_ADDR 0x08000000
4865#define TG3_FW_TEXT_ADDR 0x08000000
4866#define TG3_FW_TEXT_LEN 0x9c0
4867#define TG3_FW_RODATA_ADDR 0x080009c0
4868#define TG3_FW_RODATA_LEN 0x60
4869#define TG3_FW_DATA_ADDR 0x08000a40
4870#define TG3_FW_DATA_LEN 0x20
4871#define TG3_FW_SBSS_ADDR 0x08000a60
4872#define TG3_FW_SBSS_LEN 0xc
4873#define TG3_FW_BSS_ADDR 0x08000a70
4874#define TG3_FW_BSS_LEN 0x10
4875
4876static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4877 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4878 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4879 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4880 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4881 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4882 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4883 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4884 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4885 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4886 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4887 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4888 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4889 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4890 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4891 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4892 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4893 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4894 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4895 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4896 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4897 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4898 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4899 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4900 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4901 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4902 0, 0, 0, 0, 0, 0,
4903 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4904 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4905 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4906 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4907 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4908 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4909 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4910 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4911 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4912 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4913 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4914 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4915 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4916 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4917 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4918 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4919 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4920 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4921 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4922 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4923 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4924 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4925 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4926 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4927 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4928 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4929 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4930 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4931 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4932 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4933 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4934 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4935 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4936 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4937 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4938 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4939 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4940 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4941 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4942 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4943 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4944 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4945 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4946 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4947 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4948 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4949 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4950 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4951 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4952 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4953 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4954 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4955 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4956 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4957 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4958 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4959 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4960 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4961 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4962 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4963 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4964 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4965 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4966 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4967 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4968};
4969
4970static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4971 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4972 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4973 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4974 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4975 0x00000000
4976};
4977
4978#if 0 /* All zeros, don't eat up space with it. */
4979u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4980 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4981 0x00000000, 0x00000000, 0x00000000, 0x00000000
4982};
4983#endif
4984
4985#define RX_CPU_SCRATCH_BASE 0x30000
4986#define RX_CPU_SCRATCH_SIZE 0x04000
4987#define TX_CPU_SCRATCH_BASE 0x34000
4988#define TX_CPU_SCRATCH_SIZE 0x04000
4989
4990/* tp->lock is held. */
4991static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4992{
4993 int i;
4994
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02004995 BUG_ON(offset == TX_CPU_BASE &&
4996 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997
4998 if (offset == RX_CPU_BASE) {
4999 for (i = 0; i < 10000; i++) {
5000 tw32(offset + CPU_STATE, 0xffffffff);
5001 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5002 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5003 break;
5004 }
5005
5006 tw32(offset + CPU_STATE, 0xffffffff);
5007 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5008 udelay(10);
5009 } else {
5010 for (i = 0; i < 10000; i++) {
5011 tw32(offset + CPU_STATE, 0xffffffff);
5012 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5013 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5014 break;
5015 }
5016 }
5017
5018 if (i >= 10000) {
5019 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5020 "and %s CPU\n",
5021 tp->dev->name,
5022 (offset == RX_CPU_BASE ? "RX" : "TX"));
5023 return -ENODEV;
5024 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005025
5026 /* Clear firmware's nvram arbitration. */
5027 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5028 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029 return 0;
5030}
5031
5032struct fw_info {
5033 unsigned int text_base;
5034 unsigned int text_len;
5035 u32 *text_data;
5036 unsigned int rodata_base;
5037 unsigned int rodata_len;
5038 u32 *rodata_data;
5039 unsigned int data_base;
5040 unsigned int data_len;
5041 u32 *data_data;
5042};
5043
5044/* tp->lock is held. */
5045static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5046 int cpu_scratch_size, struct fw_info *info)
5047{
Michael Chanec41c7d2006-01-17 02:40:55 -08005048 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049 void (*write_op)(struct tg3 *, u32, u32);
5050
5051 if (cpu_base == TX_CPU_BASE &&
5052 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5053 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5054 "TX cpu firmware on %s which is 5705.\n",
5055 tp->dev->name);
5056 return -EINVAL;
5057 }
5058
5059 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5060 write_op = tg3_write_mem;
5061 else
5062 write_op = tg3_write_indirect_reg32;
5063
Michael Chan1b628152005-05-29 14:59:49 -07005064 /* It is possible that bootcode is still loading at this point.
5065 * Get the nvram lock first before halting the cpu.
5066 */
Michael Chanec41c7d2006-01-17 02:40:55 -08005067 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08005069 if (!lock_err)
5070 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 if (err)
5072 goto out;
5073
5074 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5075 write_op(tp, cpu_scratch_base + i, 0);
5076 tw32(cpu_base + CPU_STATE, 0xffffffff);
5077 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5078 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5079 write_op(tp, (cpu_scratch_base +
5080 (info->text_base & 0xffff) +
5081 (i * sizeof(u32))),
5082 (info->text_data ?
5083 info->text_data[i] : 0));
5084 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5085 write_op(tp, (cpu_scratch_base +
5086 (info->rodata_base & 0xffff) +
5087 (i * sizeof(u32))),
5088 (info->rodata_data ?
5089 info->rodata_data[i] : 0));
5090 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5091 write_op(tp, (cpu_scratch_base +
5092 (info->data_base & 0xffff) +
5093 (i * sizeof(u32))),
5094 (info->data_data ?
5095 info->data_data[i] : 0));
5096
5097 err = 0;
5098
5099out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 return err;
5101}
5102
5103/* tp->lock is held. */
5104static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5105{
5106 struct fw_info info;
5107 int err, i;
5108
5109 info.text_base = TG3_FW_TEXT_ADDR;
5110 info.text_len = TG3_FW_TEXT_LEN;
5111 info.text_data = &tg3FwText[0];
5112 info.rodata_base = TG3_FW_RODATA_ADDR;
5113 info.rodata_len = TG3_FW_RODATA_LEN;
5114 info.rodata_data = &tg3FwRodata[0];
5115 info.data_base = TG3_FW_DATA_ADDR;
5116 info.data_len = TG3_FW_DATA_LEN;
5117 info.data_data = NULL;
5118
5119 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5120 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5121 &info);
5122 if (err)
5123 return err;
5124
5125 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5126 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5127 &info);
5128 if (err)
5129 return err;
5130
5131 /* Now startup only the RX cpu. */
5132 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5133 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5134
5135 for (i = 0; i < 5; i++) {
5136 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5137 break;
5138 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5139 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5140 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5141 udelay(1000);
5142 }
5143 if (i >= 5) {
5144 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5145 "to set RX CPU PC, is %08x should be %08x\n",
5146 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5147 TG3_FW_TEXT_ADDR);
5148 return -ENODEV;
5149 }
5150 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5151 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5152
5153 return 0;
5154}
5155
5156#if TG3_TSO_SUPPORT != 0
5157
5158#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5159#define TG3_TSO_FW_RELASE_MINOR 0x6
5160#define TG3_TSO_FW_RELEASE_FIX 0x0
5161#define TG3_TSO_FW_START_ADDR 0x08000000
5162#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5163#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5164#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5165#define TG3_TSO_FW_RODATA_LEN 0x60
5166#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5167#define TG3_TSO_FW_DATA_LEN 0x30
5168#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5169#define TG3_TSO_FW_SBSS_LEN 0x2c
5170#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5171#define TG3_TSO_FW_BSS_LEN 0x894
5172
5173static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5174 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5175 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5176 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5177 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5178 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5179 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5180 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5181 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5182 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5183 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5184 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5185 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5186 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5187 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5188 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5189 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5190 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5191 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5192 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5193 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5194 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5195 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5196 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5197 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5198 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5199 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5200 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5201 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5202 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5203 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5204 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5205 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5206 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5207 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5208 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5209 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5210 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5211 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5212 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5213 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5214 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5215 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5216 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5217 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5218 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5219 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5220 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5221 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5222 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5223 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5224 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5225 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5226 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5227 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5228 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5229 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5230 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5231 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5232 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5233 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5234 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5235 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5236 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5237 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5238 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5239 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5240 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5241 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5242 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5243 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5244 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5245 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5246 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5247 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5248 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5249 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5250 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5251 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5252 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5253 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5254 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5255 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5256 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5257 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5258 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5259 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5260 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5261 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5262 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5263 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5264 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5265 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5266 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5267 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5268 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5269 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5270 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5271 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5272 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5273 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5274 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5275 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5276 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5277 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5278 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5279 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5280 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5281 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5282 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5283 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5284 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5285 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5286 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5287 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5288 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5289 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5290 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5291 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5292 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5293 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5294 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5295 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5296 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5297 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5298 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5299 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5300 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5301 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5302 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5303 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5304 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5305 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5306 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5307 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5308 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5309 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5310 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5311 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5312 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5313 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5314 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5315 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5316 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5317 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5318 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5319 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5320 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5321 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5322 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5323 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5324 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5325 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5326 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5327 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5328 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5329 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5330 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5331 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5332 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5333 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5334 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5335 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5336 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5337 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5338 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5339 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5340 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5341 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5342 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5343 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5344 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5345 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5346 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5347 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5348 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5349 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5350 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5351 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5352 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5353 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5354 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5355 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5356 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5357 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5358 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5359 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5360 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5361 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5362 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5363 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5364 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5365 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5366 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5367 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5368 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5369 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5370 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5371 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5372 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5373 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5374 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5375 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5376 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5377 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5378 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5379 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5380 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5381 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5382 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5383 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5384 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5385 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5386 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5387 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5388 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5389 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5390 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5391 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5392 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5393 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5394 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5395 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5396 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5397 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5398 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5399 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5400 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5401 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5402 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5403 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5404 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5405 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5406 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5407 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5408 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5409 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5410 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5411 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5412 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5413 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5414 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5415 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5416 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5417 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5418 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5419 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5420 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5421 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5422 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5423 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5424 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5425 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5426 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5427 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5428 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5429 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5430 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5431 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5432 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5433 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5434 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5435 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5436 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5437 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5438 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5439 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5440 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5441 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5442 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5443 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5444 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5445 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5446 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5447 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5448 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5449 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5450 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5451 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5452 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5453 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5454 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5455 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5456 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5457 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5458};
5459
5460static u32 tg3TsoFwRodata[] = {
5461 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5462 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5463 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5464 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5465 0x00000000,
5466};
5467
5468static u32 tg3TsoFwData[] = {
5469 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5470 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5471 0x00000000,
5472};
5473
5474/* 5705 needs a special version of the TSO firmware. */
5475#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5476#define TG3_TSO5_FW_RELASE_MINOR 0x2
5477#define TG3_TSO5_FW_RELEASE_FIX 0x0
5478#define TG3_TSO5_FW_START_ADDR 0x00010000
5479#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5480#define TG3_TSO5_FW_TEXT_LEN 0xe90
5481#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5482#define TG3_TSO5_FW_RODATA_LEN 0x50
5483#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5484#define TG3_TSO5_FW_DATA_LEN 0x20
5485#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5486#define TG3_TSO5_FW_SBSS_LEN 0x28
5487#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5488#define TG3_TSO5_FW_BSS_LEN 0x88
5489
5490static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5491 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5492 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5493 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5494 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5495 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5496 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5497 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5498 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5499 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5500 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5501 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5502 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5503 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5504 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5505 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5506 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5507 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5508 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5509 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5510 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5511 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5512 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5513 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5514 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5515 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5516 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5517 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5518 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5519 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5520 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5521 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5522 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5523 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5524 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5525 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5526 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5527 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5528 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5529 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5530 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5531 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5532 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5533 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5534 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5535 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5536 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5537 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5538 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5539 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5540 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5541 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5542 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5543 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5544 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5545 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5546 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5547 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5548 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5549 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5550 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5551 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5552 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5553 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5554 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5555 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5556 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5557 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5558 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5559 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5560 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5561 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5562 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5563 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5564 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5565 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5566 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5567 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5568 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5569 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5570 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5571 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5572 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5573 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5574 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5575 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5576 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5577 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5578 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5579 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5580 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5581 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5582 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5583 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5584 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5585 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5586 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5587 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5588 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5589 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5590 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5591 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5592 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5593 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5594 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5595 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5596 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5597 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5598 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5599 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5600 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5601 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5602 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5603 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5604 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5605 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5606 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5607 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5608 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5609 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5610 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5611 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5612 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5613 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5614 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5615 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5616 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5617 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5618 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5619 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5620 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5621 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5622 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5623 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5624 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5625 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5626 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5627 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5628 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5629 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5630 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5631 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5632 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5633 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5634 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5635 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5636 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5637 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5638 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5639 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5640 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5641 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5642 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5643 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5644 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5645 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5646 0x00000000, 0x00000000, 0x00000000,
5647};
5648
5649static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5650 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5651 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5652 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5653 0x00000000, 0x00000000, 0x00000000,
5654};
5655
5656static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5657 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5658 0x00000000, 0x00000000, 0x00000000,
5659};
5660
5661/* tp->lock is held. */
5662static int tg3_load_tso_firmware(struct tg3 *tp)
5663{
5664 struct fw_info info;
5665 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5666 int err, i;
5667
5668 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5669 return 0;
5670
5671 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5672 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5673 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5674 info.text_data = &tg3Tso5FwText[0];
5675 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5676 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5677 info.rodata_data = &tg3Tso5FwRodata[0];
5678 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5679 info.data_len = TG3_TSO5_FW_DATA_LEN;
5680 info.data_data = &tg3Tso5FwData[0];
5681 cpu_base = RX_CPU_BASE;
5682 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5683 cpu_scratch_size = (info.text_len +
5684 info.rodata_len +
5685 info.data_len +
5686 TG3_TSO5_FW_SBSS_LEN +
5687 TG3_TSO5_FW_BSS_LEN);
5688 } else {
5689 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5690 info.text_len = TG3_TSO_FW_TEXT_LEN;
5691 info.text_data = &tg3TsoFwText[0];
5692 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5693 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5694 info.rodata_data = &tg3TsoFwRodata[0];
5695 info.data_base = TG3_TSO_FW_DATA_ADDR;
5696 info.data_len = TG3_TSO_FW_DATA_LEN;
5697 info.data_data = &tg3TsoFwData[0];
5698 cpu_base = TX_CPU_BASE;
5699 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5700 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5701 }
5702
5703 err = tg3_load_firmware_cpu(tp, cpu_base,
5704 cpu_scratch_base, cpu_scratch_size,
5705 &info);
5706 if (err)
5707 return err;
5708
5709 /* Now startup the cpu. */
5710 tw32(cpu_base + CPU_STATE, 0xffffffff);
5711 tw32_f(cpu_base + CPU_PC, info.text_base);
5712
5713 for (i = 0; i < 5; i++) {
5714 if (tr32(cpu_base + CPU_PC) == info.text_base)
5715 break;
5716 tw32(cpu_base + CPU_STATE, 0xffffffff);
5717 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5718 tw32_f(cpu_base + CPU_PC, info.text_base);
5719 udelay(1000);
5720 }
5721 if (i >= 5) {
5722 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5723 "to set CPU PC, is %08x should be %08x\n",
5724 tp->dev->name, tr32(cpu_base + CPU_PC),
5725 info.text_base);
5726 return -ENODEV;
5727 }
5728 tw32(cpu_base + CPU_STATE, 0xffffffff);
5729 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5730 return 0;
5731}
5732
5733#endif /* TG3_TSO_SUPPORT != 0 */
5734
5735/* tp->lock is held. */
5736static void __tg3_set_mac_addr(struct tg3 *tp)
5737{
5738 u32 addr_high, addr_low;
5739 int i;
5740
5741 addr_high = ((tp->dev->dev_addr[0] << 8) |
5742 tp->dev->dev_addr[1]);
5743 addr_low = ((tp->dev->dev_addr[2] << 24) |
5744 (tp->dev->dev_addr[3] << 16) |
5745 (tp->dev->dev_addr[4] << 8) |
5746 (tp->dev->dev_addr[5] << 0));
5747 for (i = 0; i < 4; i++) {
5748 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5749 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5750 }
5751
5752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5754 for (i = 0; i < 12; i++) {
5755 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5756 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5757 }
5758 }
5759
5760 addr_high = (tp->dev->dev_addr[0] +
5761 tp->dev->dev_addr[1] +
5762 tp->dev->dev_addr[2] +
5763 tp->dev->dev_addr[3] +
5764 tp->dev->dev_addr[4] +
5765 tp->dev->dev_addr[5]) &
5766 TX_BACKOFF_SEED_MASK;
5767 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5768}
5769
5770static int tg3_set_mac_addr(struct net_device *dev, void *p)
5771{
5772 struct tg3 *tp = netdev_priv(dev);
5773 struct sockaddr *addr = p;
5774
Michael Chanf9804dd2005-09-27 12:13:10 -07005775 if (!is_valid_ether_addr(addr->sa_data))
5776 return -EINVAL;
5777
Linus Torvalds1da177e2005-04-16 15:20:36 -07005778 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5779
Michael Chane75f7c92006-03-20 21:33:26 -08005780 if (!netif_running(dev))
5781 return 0;
5782
Michael Chan58712ef2006-04-29 18:58:01 -07005783 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5784 /* Reset chip so that ASF can re-init any MAC addresses it
5785 * needs.
5786 */
5787 tg3_netif_stop(tp);
5788 tg3_full_lock(tp, 1);
5789
5790 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07005791 tg3_init_hw(tp, 0);
Michael Chan58712ef2006-04-29 18:58:01 -07005792
5793 tg3_netif_start(tp);
5794 tg3_full_unlock(tp);
5795 } else {
5796 spin_lock_bh(&tp->lock);
5797 __tg3_set_mac_addr(tp);
5798 spin_unlock_bh(&tp->lock);
5799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005800
5801 return 0;
5802}
5803
5804/* tp->lock is held. */
5805static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5806 dma_addr_t mapping, u32 maxlen_flags,
5807 u32 nic_addr)
5808{
5809 tg3_write_mem(tp,
5810 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5811 ((u64) mapping >> 32));
5812 tg3_write_mem(tp,
5813 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5814 ((u64) mapping & 0xffffffff));
5815 tg3_write_mem(tp,
5816 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5817 maxlen_flags);
5818
5819 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5820 tg3_write_mem(tp,
5821 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5822 nic_addr);
5823}
5824
5825static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07005826static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07005827{
5828 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5829 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5830 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5831 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5832 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5833 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5834 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5835 }
5836 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5837 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5838 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5839 u32 val = ec->stats_block_coalesce_usecs;
5840
5841 if (!netif_carrier_ok(tp->dev))
5842 val = 0;
5843
5844 tw32(HOSTCC_STAT_COAL_TICKS, val);
5845 }
5846}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005847
5848/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07005849static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850{
5851 u32 val, rdmac_mode;
5852 int i, err, limit;
5853
5854 tg3_disable_ints(tp);
5855
5856 tg3_stop_fw(tp);
5857
5858 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5859
5860 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07005861 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005862 }
5863
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07005864 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
Michael Chand4d2c552006-03-20 17:47:20 -08005865 tg3_phy_reset(tp);
5866
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867 err = tg3_chip_reset(tp);
5868 if (err)
5869 return err;
5870
5871 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5872
5873 /* This works around an issue with Athlon chipsets on
5874 * B3 tigon3 silicon. This bit has no effect on any
5875 * other revision. But do not set this on PCI Express
5876 * chips.
5877 */
5878 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5879 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5880 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5881
5882 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5883 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5884 val = tr32(TG3PCI_PCISTATE);
5885 val |= PCISTATE_RETRY_SAME_DMA;
5886 tw32(TG3PCI_PCISTATE, val);
5887 }
5888
5889 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5890 /* Enable some hw fixes. */
5891 val = tr32(TG3PCI_MSI_DATA);
5892 val |= (1 << 26) | (1 << 28) | (1 << 29);
5893 tw32(TG3PCI_MSI_DATA, val);
5894 }
5895
5896 /* Descriptor ring init may make accesses to the
5897 * NIC SRAM area to setup the TX descriptors, so we
5898 * can only do this after the hardware has been
5899 * successfully reset.
5900 */
5901 tg3_init_rings(tp);
5902
5903 /* This value is determined during the probe time DMA
5904 * engine test, tg3_test_dma.
5905 */
5906 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5907
5908 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5909 GRC_MODE_4X_NIC_SEND_RINGS |
5910 GRC_MODE_NO_TX_PHDR_CSUM |
5911 GRC_MODE_NO_RX_PHDR_CSUM);
5912 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07005913
5914 /* Pseudo-header checksum is done by hardware logic and not
5915 * the offload processers, so make the chip do the pseudo-
5916 * header checksums on receive. For transmit it is more
5917 * convenient to do the pseudo-header checksum in software
5918 * as Linux does that on transmit for us in all cases.
5919 */
5920 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005921
5922 tw32(GRC_MODE,
5923 tp->grc_mode |
5924 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5925
5926 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5927 val = tr32(GRC_MISC_CFG);
5928 val &= ~0xff;
5929 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5930 tw32(GRC_MISC_CFG, val);
5931
5932 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07005933 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005934 /* Do nothing. */
5935 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5936 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5938 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5939 else
5940 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5941 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5942 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5943 }
5944#if TG3_TSO_SUPPORT != 0
5945 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5946 int fw_len;
5947
5948 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5949 TG3_TSO5_FW_RODATA_LEN +
5950 TG3_TSO5_FW_DATA_LEN +
5951 TG3_TSO5_FW_SBSS_LEN +
5952 TG3_TSO5_FW_BSS_LEN);
5953 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5954 tw32(BUFMGR_MB_POOL_ADDR,
5955 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5956 tw32(BUFMGR_MB_POOL_SIZE,
5957 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5958 }
5959#endif
5960
Michael Chan0f893dc2005-07-25 12:30:38 -07005961 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5963 tp->bufmgr_config.mbuf_read_dma_low_water);
5964 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5965 tp->bufmgr_config.mbuf_mac_rx_low_water);
5966 tw32(BUFMGR_MB_HIGH_WATER,
5967 tp->bufmgr_config.mbuf_high_water);
5968 } else {
5969 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5970 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5971 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5972 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5973 tw32(BUFMGR_MB_HIGH_WATER,
5974 tp->bufmgr_config.mbuf_high_water_jumbo);
5975 }
5976 tw32(BUFMGR_DMA_LOW_WATER,
5977 tp->bufmgr_config.dma_low_water);
5978 tw32(BUFMGR_DMA_HIGH_WATER,
5979 tp->bufmgr_config.dma_high_water);
5980
5981 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5982 for (i = 0; i < 2000; i++) {
5983 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5984 break;
5985 udelay(10);
5986 }
5987 if (i >= 2000) {
5988 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5989 tp->dev->name);
5990 return -ENODEV;
5991 }
5992
5993 /* Setup replenish threshold. */
5994 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5995
5996 /* Initialize TG3_BDINFO's at:
5997 * RCVDBDI_STD_BD: standard eth size rx ring
5998 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5999 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6000 *
6001 * like so:
6002 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6003 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6004 * ring attribute flags
6005 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6006 *
6007 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6008 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6009 *
6010 * The size of each ring is fixed in the firmware, but the location is
6011 * configurable.
6012 */
6013 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6014 ((u64) tp->rx_std_mapping >> 32));
6015 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6016 ((u64) tp->rx_std_mapping & 0xffffffff));
6017 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6018 NIC_SRAM_RX_BUFFER_DESC);
6019
6020 /* Don't even try to program the JUMBO/MINI buffer descriptor
6021 * configs on 5705.
6022 */
6023 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6024 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6025 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6026 } else {
6027 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6028 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6029
6030 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6031 BDINFO_FLAGS_DISABLED);
6032
6033 /* Setup replenish threshold. */
6034 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6035
Michael Chan0f893dc2005-07-25 12:30:38 -07006036 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006037 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6038 ((u64) tp->rx_jumbo_mapping >> 32));
6039 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6040 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6041 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6042 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6043 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6044 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6045 } else {
6046 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6047 BDINFO_FLAGS_DISABLED);
6048 }
6049
6050 }
6051
6052 /* There is only one send ring on 5705/5750, no need to explicitly
6053 * disable the others.
6054 */
6055 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6056 /* Clear out send RCB ring in SRAM. */
6057 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6058 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6059 BDINFO_FLAGS_DISABLED);
6060 }
6061
6062 tp->tx_prod = 0;
6063 tp->tx_cons = 0;
6064 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6065 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6066
6067 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6068 tp->tx_desc_mapping,
6069 (TG3_TX_RING_SIZE <<
6070 BDINFO_FLAGS_MAXLEN_SHIFT),
6071 NIC_SRAM_TX_BUFFER_DESC);
6072
6073 /* There is only one receive return ring on 5705/5750, no need
6074 * to explicitly disable the others.
6075 */
6076 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6077 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6078 i += TG3_BDINFO_SIZE) {
6079 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6080 BDINFO_FLAGS_DISABLED);
6081 }
6082 }
6083
6084 tp->rx_rcb_ptr = 0;
6085 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6086
6087 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6088 tp->rx_rcb_mapping,
6089 (TG3_RX_RCB_RING_SIZE(tp) <<
6090 BDINFO_FLAGS_MAXLEN_SHIFT),
6091 0);
6092
6093 tp->rx_std_ptr = tp->rx_pending;
6094 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6095 tp->rx_std_ptr);
6096
Michael Chan0f893dc2005-07-25 12:30:38 -07006097 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07006098 tp->rx_jumbo_pending : 0;
6099 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6100 tp->rx_jumbo_ptr);
6101
6102 /* Initialize MAC address and backoff seed. */
6103 __tg3_set_mac_addr(tp);
6104
6105 /* MTU + ethernet header + FCS + optional VLAN tag */
6106 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6107
6108 /* The slot time is changed by tg3_setup_phy if we
6109 * run at gigabit with half duplex.
6110 */
6111 tw32(MAC_TX_LENGTHS,
6112 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6113 (6 << TX_LENGTHS_IPG_SHIFT) |
6114 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6115
6116 /* Receive rules. */
6117 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6118 tw32(RCVLPC_CONFIG, 0x0181);
6119
6120 /* Calculate RDMAC_MODE setting early, we need it to determine
6121 * the RCVLPC_STATE_ENABLE mask.
6122 */
6123 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6124 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6125 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6126 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6127 RDMAC_MODE_LNGREAD_ENAB);
6128 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6129 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
Michael Chan85e94ce2005-04-21 17:05:28 -07006130
6131 /* If statement applies to 5705 and 5750 PCI devices only */
6132 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6133 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6134 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6136 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6137 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6138 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6139 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6140 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6141 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6142 }
6143 }
6144
Michael Chan85e94ce2005-04-21 17:05:28 -07006145 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6146 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6147
Linus Torvalds1da177e2005-04-16 15:20:36 -07006148#if TG3_TSO_SUPPORT != 0
6149 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6150 rdmac_mode |= (1 << 27);
6151#endif
6152
6153 /* Receive/send statistics. */
6154 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6155 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6156 val = tr32(RCVLPC_STATS_ENABLE);
6157 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6158 tw32(RCVLPC_STATS_ENABLE, val);
6159 } else {
6160 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6161 }
6162 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6163 tw32(SNDDATAI_STATSENAB, 0xffffff);
6164 tw32(SNDDATAI_STATSCTRL,
6165 (SNDDATAI_SCTRL_ENABLE |
6166 SNDDATAI_SCTRL_FASTUPD));
6167
6168 /* Setup host coalescing engine. */
6169 tw32(HOSTCC_MODE, 0);
6170 for (i = 0; i < 2000; i++) {
6171 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6172 break;
6173 udelay(10);
6174 }
6175
Michael Chand244c892005-07-05 14:42:33 -07006176 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006177
6178 /* set status block DMA address */
6179 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6180 ((u64) tp->status_mapping >> 32));
6181 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6182 ((u64) tp->status_mapping & 0xffffffff));
6183
6184 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6185 /* Status/statistics block address. See tg3_timer,
6186 * the tg3_periodic_fetch_stats call there, and
6187 * tg3_get_stats to see how this works for 5705/5750 chips.
6188 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006189 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6190 ((u64) tp->stats_mapping >> 32));
6191 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6192 ((u64) tp->stats_mapping & 0xffffffff));
6193 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6194 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6195 }
6196
6197 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6198
6199 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6200 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6201 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6202 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6203
6204 /* Clear statistics/status block in chip, and status block in ram. */
6205 for (i = NIC_SRAM_STATS_BLK;
6206 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6207 i += sizeof(u32)) {
6208 tg3_write_mem(tp, i, 0);
6209 udelay(40);
6210 }
6211 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6212
Michael Chanc94e3942005-09-27 12:12:42 -07006213 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6214 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6215 /* reset to prevent losing 1st rx packet intermittently */
6216 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6217 udelay(10);
6218 }
6219
Linus Torvalds1da177e2005-04-16 15:20:36 -07006220 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6221 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6222 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6223 udelay(40);
6224
Michael Chan314fba32005-04-21 17:07:04 -07006225 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6226 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6227 * register to preserve the GPIO settings for LOMs. The GPIOs,
6228 * whether used as inputs or outputs, are set by boot code after
6229 * reset.
6230 */
6231 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6232 u32 gpio_mask;
6233
6234 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6235 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07006236
6237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6238 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6239 GRC_LCLCTRL_GPIO_OUTPUT3;
6240
Michael Chanaf36e6b2006-03-23 01:28:06 -08006241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6242 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6243
Michael Chan314fba32005-04-21 17:07:04 -07006244 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6245
6246 /* GPIO1 must be driven high for eeprom write protect */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006247 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6248 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07006249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006250 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6251 udelay(100);
6252
Michael Chan09ee9292005-08-09 20:17:00 -07006253 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07006254 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006255
6256 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6257 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6258 udelay(40);
6259 }
6260
6261 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6262 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6263 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6264 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6265 WDMAC_MODE_LNGREAD_ENAB);
6266
Michael Chan85e94ce2005-04-21 17:05:28 -07006267 /* If statement applies to 5705 and 5750 PCI devices only */
6268 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6269 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6272 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6273 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6274 /* nothing */
6275 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6276 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6277 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6278 val |= WDMAC_MODE_RX_ACCEL;
6279 }
6280 }
6281
Michael Chand9ab5ad2006-03-20 22:27:35 -08006282 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08006283 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6284 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
Michael Chand9ab5ad2006-03-20 22:27:35 -08006285 val |= (1 << 29);
6286
Linus Torvalds1da177e2005-04-16 15:20:36 -07006287 tw32_f(WDMAC_MODE, val);
6288 udelay(40);
6289
6290 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6291 val = tr32(TG3PCI_X_CAPS);
6292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6293 val &= ~PCIX_CAPS_BURST_MASK;
6294 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6295 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6296 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6297 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6298 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6299 val |= (tp->split_mode_max_reqs <<
6300 PCIX_CAPS_SPLIT_SHIFT);
6301 }
6302 tw32(TG3PCI_X_CAPS, val);
6303 }
6304
6305 tw32_f(RDMAC_MODE, rdmac_mode);
6306 udelay(40);
6307
6308 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6309 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6310 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6311 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6312 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6313 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6314 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6315 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6316#if TG3_TSO_SUPPORT != 0
6317 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6318 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6319#endif
6320 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6321 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6322
6323 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6324 err = tg3_load_5701_a0_firmware_fix(tp);
6325 if (err)
6326 return err;
6327 }
6328
6329#if TG3_TSO_SUPPORT != 0
6330 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6331 err = tg3_load_tso_firmware(tp);
6332 if (err)
6333 return err;
6334 }
6335#endif
6336
6337 tp->tx_mode = TX_MODE_ENABLE;
6338 tw32_f(MAC_TX_MODE, tp->tx_mode);
6339 udelay(100);
6340
6341 tp->rx_mode = RX_MODE_ENABLE;
Michael Chanaf36e6b2006-03-23 01:28:06 -08006342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6343 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6344
Linus Torvalds1da177e2005-04-16 15:20:36 -07006345 tw32_f(MAC_RX_MODE, tp->rx_mode);
6346 udelay(10);
6347
6348 if (tp->link_config.phy_is_low_power) {
6349 tp->link_config.phy_is_low_power = 0;
6350 tp->link_config.speed = tp->link_config.orig_speed;
6351 tp->link_config.duplex = tp->link_config.orig_duplex;
6352 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6353 }
6354
6355 tp->mi_mode = MAC_MI_MODE_BASE;
6356 tw32_f(MAC_MI_MODE, tp->mi_mode);
6357 udelay(80);
6358
6359 tw32(MAC_LED_CTRL, tp->led_ctrl);
6360
6361 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07006362 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6364 udelay(10);
6365 }
6366 tw32_f(MAC_RX_MODE, tp->rx_mode);
6367 udelay(10);
6368
6369 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6370 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6371 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6372 /* Set drive transmission level to 1.2V */
6373 /* only if the signal pre-emphasis bit is not set */
6374 val = tr32(MAC_SERDES_CFG);
6375 val &= 0xfffff000;
6376 val |= 0x880;
6377 tw32(MAC_SERDES_CFG, val);
6378 }
6379 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6380 tw32(MAC_SERDES_CFG, 0x616000);
6381 }
6382
6383 /* Prevent chip from dropping frames when flow control
6384 * is enabled.
6385 */
6386 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6387
6388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6389 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6390 /* Use hardware link auto-negotiation */
6391 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6392 }
6393
Michael Chand4d2c552006-03-20 17:47:20 -08006394 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6395 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6396 u32 tmp;
6397
6398 tmp = tr32(SERDES_RX_CTRL);
6399 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6400 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6401 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6402 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6403 }
6404
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006405 err = tg3_setup_phy(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006406 if (err)
6407 return err;
6408
6409 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6410 u32 tmp;
6411
6412 /* Clear CRC stats. */
6413 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6414 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6415 tg3_readphy(tp, 0x14, &tmp);
6416 }
6417 }
6418
6419 __tg3_set_rx_mode(tp->dev);
6420
6421 /* Initialize receive rules. */
6422 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6423 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6424 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6425 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6426
Michael Chan4cf78e42005-07-25 12:29:19 -07006427 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07006428 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006429 limit = 8;
6430 else
6431 limit = 16;
6432 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6433 limit -= 4;
6434 switch (limit) {
6435 case 16:
6436 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6437 case 15:
6438 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6439 case 14:
6440 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6441 case 13:
6442 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6443 case 12:
6444 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6445 case 11:
6446 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6447 case 10:
6448 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6449 case 9:
6450 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6451 case 8:
6452 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6453 case 7:
6454 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6455 case 6:
6456 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6457 case 5:
6458 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6459 case 4:
6460 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6461 case 3:
6462 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6463 case 2:
6464 case 1:
6465
6466 default:
6467 break;
6468 };
6469
6470 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6471
Linus Torvalds1da177e2005-04-16 15:20:36 -07006472 return 0;
6473}
6474
6475/* Called at device open time to get the chip ready for
6476 * packet processing. Invoked with tp->lock held.
6477 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006478static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479{
6480 int err;
6481
6482 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -08006483 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006484 if (err)
6485 goto out;
6486
6487 tg3_switch_clocks(tp);
6488
6489 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6490
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006491 err = tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492
6493out:
6494 return err;
6495}
6496
6497#define TG3_STAT_ADD32(PSTAT, REG) \
6498do { u32 __val = tr32(REG); \
6499 (PSTAT)->low += __val; \
6500 if ((PSTAT)->low < __val) \
6501 (PSTAT)->high += 1; \
6502} while (0)
6503
6504static void tg3_periodic_fetch_stats(struct tg3 *tp)
6505{
6506 struct tg3_hw_stats *sp = tp->hw_stats;
6507
6508 if (!netif_carrier_ok(tp->dev))
6509 return;
6510
6511 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6512 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6513 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6514 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6515 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6516 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6517 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6518 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6519 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6520 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6521 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6522 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6523 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6524
6525 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6526 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6527 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6528 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6529 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6530 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6531 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6532 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6533 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6534 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6535 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6536 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6537 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6538 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07006539
6540 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6541 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6542 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006543}
6544
6545static void tg3_timer(unsigned long __opaque)
6546{
6547 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006548
Michael Chanf475f162006-03-27 23:20:14 -08006549 if (tp->irq_sync)
6550 goto restart_timer;
6551
David S. Millerf47c11e2005-06-24 20:18:35 -07006552 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006553
David S. Millerfac9b832005-05-18 22:46:34 -07006554 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6555 /* All of this garbage is because when using non-tagged
6556 * IRQ status the mailbox/status_block protocol the chip
6557 * uses with the cpu is race prone.
6558 */
6559 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6560 tw32(GRC_LOCAL_CTRL,
6561 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6562 } else {
6563 tw32(HOSTCC_MODE, tp->coalesce_mode |
6564 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6565 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006566
David S. Millerfac9b832005-05-18 22:46:34 -07006567 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6568 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07006569 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07006570 schedule_work(&tp->reset_task);
6571 return;
6572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006573 }
6574
Linus Torvalds1da177e2005-04-16 15:20:36 -07006575 /* This part only runs once per second. */
6576 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07006577 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6578 tg3_periodic_fetch_stats(tp);
6579
Linus Torvalds1da177e2005-04-16 15:20:36 -07006580 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6581 u32 mac_stat;
6582 int phy_event;
6583
6584 mac_stat = tr32(MAC_STATUS);
6585
6586 phy_event = 0;
6587 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6588 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6589 phy_event = 1;
6590 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6591 phy_event = 1;
6592
6593 if (phy_event)
6594 tg3_setup_phy(tp, 0);
6595 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6596 u32 mac_stat = tr32(MAC_STATUS);
6597 int need_setup = 0;
6598
6599 if (netif_carrier_ok(tp->dev) &&
6600 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6601 need_setup = 1;
6602 }
6603 if (! netif_carrier_ok(tp->dev) &&
6604 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6605 MAC_STATUS_SIGNAL_DET))) {
6606 need_setup = 1;
6607 }
6608 if (need_setup) {
6609 tw32_f(MAC_MODE,
6610 (tp->mac_mode &
6611 ~MAC_MODE_PORT_MODE_MASK));
6612 udelay(40);
6613 tw32_f(MAC_MODE, tp->mac_mode);
6614 udelay(40);
6615 tg3_setup_phy(tp, 0);
6616 }
Michael Chan747e8f82005-07-25 12:33:22 -07006617 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6618 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619
6620 tp->timer_counter = tp->timer_multiplier;
6621 }
6622
Michael Chan28fbef72005-10-26 15:48:35 -07006623 /* Heartbeat is only sent once every 2 seconds. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006624 if (!--tp->asf_counter) {
6625 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6626 u32 val;
6627
Michael Chanbbadf502006-04-06 21:46:34 -07006628 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6629 FWCMD_NICDRV_ALIVE2);
6630 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07006631 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07006632 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006633 val = tr32(GRC_RX_CPU_EVENT);
6634 val |= (1 << 14);
6635 tw32(GRC_RX_CPU_EVENT, val);
6636 }
6637 tp->asf_counter = tp->asf_multiplier;
6638 }
6639
David S. Millerf47c11e2005-06-24 20:18:35 -07006640 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006641
Michael Chanf475f162006-03-27 23:20:14 -08006642restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006643 tp->timer.expires = jiffies + tp->timer_offset;
6644 add_timer(&tp->timer);
6645}
6646
Adrian Bunk81789ef2006-03-20 23:00:14 -08006647static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08006648{
6649 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6650 unsigned long flags;
6651 struct net_device *dev = tp->dev;
6652
6653 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6654 fn = tg3_msi;
6655 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6656 fn = tg3_msi_1shot;
6657 flags = SA_SAMPLE_RANDOM;
6658 } else {
6659 fn = tg3_interrupt;
6660 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6661 fn = tg3_interrupt_tagged;
6662 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6663 }
6664 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6665}
6666
Michael Chan79381092005-04-21 17:13:59 -07006667static int tg3_test_interrupt(struct tg3 *tp)
6668{
6669 struct net_device *dev = tp->dev;
6670 int err, i;
6671 u32 int_mbox = 0;
6672
Michael Chand4bc3922005-05-29 14:59:20 -07006673 if (!netif_running(dev))
6674 return -ENODEV;
6675
Michael Chan79381092005-04-21 17:13:59 -07006676 tg3_disable_ints(tp);
6677
6678 free_irq(tp->pdev->irq, dev);
6679
6680 err = request_irq(tp->pdev->irq, tg3_test_isr,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006681 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07006682 if (err)
6683 return err;
6684
Michael Chan38f38432005-09-05 17:53:32 -07006685 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07006686 tg3_enable_ints(tp);
6687
6688 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6689 HOSTCC_MODE_NOW);
6690
6691 for (i = 0; i < 5; i++) {
Michael Chan09ee9292005-08-09 20:17:00 -07006692 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6693 TG3_64BIT_REG_LOW);
Michael Chan79381092005-04-21 17:13:59 -07006694 if (int_mbox != 0)
6695 break;
6696 msleep(10);
6697 }
6698
6699 tg3_disable_ints(tp);
6700
6701 free_irq(tp->pdev->irq, dev);
6702
Michael Chanfcfa0a32006-03-20 22:28:41 -08006703 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07006704
6705 if (err)
6706 return err;
6707
6708 if (int_mbox != 0)
6709 return 0;
6710
6711 return -EIO;
6712}
6713
6714/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6715 * successfully restored
6716 */
6717static int tg3_test_msi(struct tg3 *tp)
6718{
6719 struct net_device *dev = tp->dev;
6720 int err;
6721 u16 pci_cmd;
6722
6723 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6724 return 0;
6725
6726 /* Turn off SERR reporting in case MSI terminates with Master
6727 * Abort.
6728 */
6729 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6730 pci_write_config_word(tp->pdev, PCI_COMMAND,
6731 pci_cmd & ~PCI_COMMAND_SERR);
6732
6733 err = tg3_test_interrupt(tp);
6734
6735 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6736
6737 if (!err)
6738 return 0;
6739
6740 /* other failures */
6741 if (err != -EIO)
6742 return err;
6743
6744 /* MSI test failed, go back to INTx mode */
6745 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6746 "switching to INTx mode. Please report this failure to "
6747 "the PCI maintainer and include system chipset information.\n",
6748 tp->dev->name);
6749
6750 free_irq(tp->pdev->irq, dev);
6751 pci_disable_msi(tp->pdev);
6752
6753 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6754
Michael Chanfcfa0a32006-03-20 22:28:41 -08006755 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07006756 if (err)
6757 return err;
6758
6759 /* Need to reset the chip because the MSI cycle may have terminated
6760 * with Master Abort.
6761 */
David S. Millerf47c11e2005-06-24 20:18:35 -07006762 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07006763
Michael Chan944d9802005-05-29 14:57:48 -07006764 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006765 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07006766
David S. Millerf47c11e2005-06-24 20:18:35 -07006767 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006768
6769 if (err)
6770 free_irq(tp->pdev->irq, dev);
6771
6772 return err;
6773}
6774
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775static int tg3_open(struct net_device *dev)
6776{
6777 struct tg3 *tp = netdev_priv(dev);
6778 int err;
6779
David S. Millerf47c11e2005-06-24 20:18:35 -07006780 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006781
Michael Chanbc1c7562006-03-20 17:48:03 -08006782 err = tg3_set_power_state(tp, PCI_D0);
6783 if (err)
6784 return err;
6785
Linus Torvalds1da177e2005-04-16 15:20:36 -07006786 tg3_disable_ints(tp);
6787 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6788
David S. Millerf47c11e2005-06-24 20:18:35 -07006789 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006790
6791 /* The placement of this call is tied
6792 * to the setup and use of Host TX descriptors.
6793 */
6794 err = tg3_alloc_consistent(tp);
6795 if (err)
6796 return err;
6797
Michael Chan88b06bc2005-04-21 17:13:25 -07006798 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6799 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
Michael Chand4d2c552006-03-20 17:47:20 -08006800 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6801 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6802 (tp->pdev_peer == tp->pdev))) {
David S. Millerfac9b832005-05-18 22:46:34 -07006803 /* All MSI supporting chips should support tagged
6804 * status. Assert that this is the case.
6805 */
6806 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6807 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6808 "Not using MSI.\n", tp->dev->name);
6809 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006810 u32 msi_mode;
6811
6812 msi_mode = tr32(MSGINT_MODE);
6813 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6814 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6815 }
6816 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08006817 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006818
6819 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006820 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6821 pci_disable_msi(tp->pdev);
6822 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006824 tg3_free_consistent(tp);
6825 return err;
6826 }
6827
David S. Millerf47c11e2005-06-24 20:18:35 -07006828 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006829
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006830 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006831 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07006832 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833 tg3_free_rings(tp);
6834 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07006835 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6836 tp->timer_offset = HZ;
6837 else
6838 tp->timer_offset = HZ / 10;
6839
6840 BUG_ON(tp->timer_offset > HZ);
6841 tp->timer_counter = tp->timer_multiplier =
6842 (HZ / tp->timer_offset);
6843 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07006844 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006845
6846 init_timer(&tp->timer);
6847 tp->timer.expires = jiffies + tp->timer_offset;
6848 tp->timer.data = (unsigned long) tp;
6849 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006850 }
6851
David S. Millerf47c11e2005-06-24 20:18:35 -07006852 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006853
6854 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006855 free_irq(tp->pdev->irq, dev);
6856 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6857 pci_disable_msi(tp->pdev);
6858 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006860 tg3_free_consistent(tp);
6861 return err;
6862 }
6863
Michael Chan79381092005-04-21 17:13:59 -07006864 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6865 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07006866
Michael Chan79381092005-04-21 17:13:59 -07006867 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07006868 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07006869
6870 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6871 pci_disable_msi(tp->pdev);
6872 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6873 }
Michael Chan944d9802005-05-29 14:57:48 -07006874 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006875 tg3_free_rings(tp);
6876 tg3_free_consistent(tp);
6877
David S. Millerf47c11e2005-06-24 20:18:35 -07006878 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006879
6880 return err;
6881 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08006882
6883 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6884 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6885 u32 val = tr32(0x7c04);
6886
6887 tw32(0x7c04, val | (1 << 29));
6888 }
6889 }
Michael Chan79381092005-04-21 17:13:59 -07006890 }
6891
David S. Millerf47c11e2005-06-24 20:18:35 -07006892 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006893
Michael Chan79381092005-04-21 17:13:59 -07006894 add_timer(&tp->timer);
6895 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896 tg3_enable_ints(tp);
6897
David S. Millerf47c11e2005-06-24 20:18:35 -07006898 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006899
6900 netif_start_queue(dev);
6901
6902 return 0;
6903}
6904
6905#if 0
6906/*static*/ void tg3_dump_state(struct tg3 *tp)
6907{
6908 u32 val32, val32_2, val32_3, val32_4, val32_5;
6909 u16 val16;
6910 int i;
6911
6912 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6913 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6914 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6915 val16, val32);
6916
6917 /* MAC block */
6918 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6919 tr32(MAC_MODE), tr32(MAC_STATUS));
6920 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6921 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6922 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6923 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6924 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6925 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6926
6927 /* Send data initiator control block */
6928 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6929 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6930 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6931 tr32(SNDDATAI_STATSCTRL));
6932
6933 /* Send data completion control block */
6934 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6935
6936 /* Send BD ring selector block */
6937 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6938 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6939
6940 /* Send BD initiator control block */
6941 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6942 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6943
6944 /* Send BD completion control block */
6945 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6946
6947 /* Receive list placement control block */
6948 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6949 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6950 printk(" RCVLPC_STATSCTRL[%08x]\n",
6951 tr32(RCVLPC_STATSCTRL));
6952
6953 /* Receive data and receive BD initiator control block */
6954 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6955 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6956
6957 /* Receive data completion control block */
6958 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6959 tr32(RCVDCC_MODE));
6960
6961 /* Receive BD initiator control block */
6962 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6963 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6964
6965 /* Receive BD completion control block */
6966 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6967 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6968
6969 /* Receive list selector control block */
6970 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6971 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6972
6973 /* Mbuf cluster free block */
6974 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6975 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6976
6977 /* Host coalescing control block */
6978 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6979 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6980 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6981 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6982 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6983 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6984 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6985 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6986 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6987 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6988 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6989 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6990
6991 /* Memory arbiter control block */
6992 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6993 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6994
6995 /* Buffer manager control block */
6996 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6997 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6998 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6999 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7000 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7001 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7002 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7003 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7004
7005 /* Read DMA control block */
7006 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7007 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7008
7009 /* Write DMA control block */
7010 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7011 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7012
7013 /* DMA completion block */
7014 printk("DEBUG: DMAC_MODE[%08x]\n",
7015 tr32(DMAC_MODE));
7016
7017 /* GRC block */
7018 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7019 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7020 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7021 tr32(GRC_LOCAL_CTRL));
7022
7023 /* TG3_BDINFOs */
7024 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7025 tr32(RCVDBDI_JUMBO_BD + 0x0),
7026 tr32(RCVDBDI_JUMBO_BD + 0x4),
7027 tr32(RCVDBDI_JUMBO_BD + 0x8),
7028 tr32(RCVDBDI_JUMBO_BD + 0xc));
7029 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7030 tr32(RCVDBDI_STD_BD + 0x0),
7031 tr32(RCVDBDI_STD_BD + 0x4),
7032 tr32(RCVDBDI_STD_BD + 0x8),
7033 tr32(RCVDBDI_STD_BD + 0xc));
7034 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7035 tr32(RCVDBDI_MINI_BD + 0x0),
7036 tr32(RCVDBDI_MINI_BD + 0x4),
7037 tr32(RCVDBDI_MINI_BD + 0x8),
7038 tr32(RCVDBDI_MINI_BD + 0xc));
7039
7040 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7041 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7042 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7043 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7044 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7045 val32, val32_2, val32_3, val32_4);
7046
7047 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7048 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7049 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7050 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7051 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7052 val32, val32_2, val32_3, val32_4);
7053
7054 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7055 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7056 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7057 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7058 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7059 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7060 val32, val32_2, val32_3, val32_4, val32_5);
7061
7062 /* SW status block */
7063 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7064 tp->hw_status->status,
7065 tp->hw_status->status_tag,
7066 tp->hw_status->rx_jumbo_consumer,
7067 tp->hw_status->rx_consumer,
7068 tp->hw_status->rx_mini_consumer,
7069 tp->hw_status->idx[0].rx_producer,
7070 tp->hw_status->idx[0].tx_consumer);
7071
7072 /* SW statistics block */
7073 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7074 ((u32 *)tp->hw_stats)[0],
7075 ((u32 *)tp->hw_stats)[1],
7076 ((u32 *)tp->hw_stats)[2],
7077 ((u32 *)tp->hw_stats)[3]);
7078
7079 /* Mailboxes */
7080 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07007081 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7082 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7083 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7084 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007085
7086 /* NIC side send descriptors. */
7087 for (i = 0; i < 6; i++) {
7088 unsigned long txd;
7089
7090 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7091 + (i * sizeof(struct tg3_tx_buffer_desc));
7092 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7093 i,
7094 readl(txd + 0x0), readl(txd + 0x4),
7095 readl(txd + 0x8), readl(txd + 0xc));
7096 }
7097
7098 /* NIC side RX descriptors. */
7099 for (i = 0; i < 6; i++) {
7100 unsigned long rxd;
7101
7102 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7103 + (i * sizeof(struct tg3_rx_buffer_desc));
7104 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7105 i,
7106 readl(rxd + 0x0), readl(rxd + 0x4),
7107 readl(rxd + 0x8), readl(rxd + 0xc));
7108 rxd += (4 * sizeof(u32));
7109 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7110 i,
7111 readl(rxd + 0x0), readl(rxd + 0x4),
7112 readl(rxd + 0x8), readl(rxd + 0xc));
7113 }
7114
7115 for (i = 0; i < 6; i++) {
7116 unsigned long rxd;
7117
7118 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7119 + (i * sizeof(struct tg3_rx_buffer_desc));
7120 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7121 i,
7122 readl(rxd + 0x0), readl(rxd + 0x4),
7123 readl(rxd + 0x8), readl(rxd + 0xc));
7124 rxd += (4 * sizeof(u32));
7125 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7126 i,
7127 readl(rxd + 0x0), readl(rxd + 0x4),
7128 readl(rxd + 0x8), readl(rxd + 0xc));
7129 }
7130}
7131#endif
7132
7133static struct net_device_stats *tg3_get_stats(struct net_device *);
7134static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7135
7136static int tg3_close(struct net_device *dev)
7137{
7138 struct tg3 *tp = netdev_priv(dev);
7139
Michael Chan7faa0062006-02-02 17:29:28 -08007140 /* Calling flush_scheduled_work() may deadlock because
7141 * linkwatch_event() may be on the workqueue and it will try to get
7142 * the rtnl_lock which we are holding.
7143 */
7144 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7145 msleep(1);
7146
Linus Torvalds1da177e2005-04-16 15:20:36 -07007147 netif_stop_queue(dev);
7148
7149 del_timer_sync(&tp->timer);
7150
David S. Millerf47c11e2005-06-24 20:18:35 -07007151 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007152#if 0
7153 tg3_dump_state(tp);
7154#endif
7155
7156 tg3_disable_ints(tp);
7157
Michael Chan944d9802005-05-29 14:57:48 -07007158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159 tg3_free_rings(tp);
7160 tp->tg3_flags &=
7161 ~(TG3_FLAG_INIT_COMPLETE |
7162 TG3_FLAG_GOT_SERDES_FLOWCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007163
David S. Millerf47c11e2005-06-24 20:18:35 -07007164 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007165
Michael Chan88b06bc2005-04-21 17:13:25 -07007166 free_irq(tp->pdev->irq, dev);
7167 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7168 pci_disable_msi(tp->pdev);
7169 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007171
7172 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7173 sizeof(tp->net_stats_prev));
7174 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7175 sizeof(tp->estats_prev));
7176
7177 tg3_free_consistent(tp);
7178
Michael Chanbc1c7562006-03-20 17:48:03 -08007179 tg3_set_power_state(tp, PCI_D3hot);
7180
7181 netif_carrier_off(tp->dev);
7182
Linus Torvalds1da177e2005-04-16 15:20:36 -07007183 return 0;
7184}
7185
7186static inline unsigned long get_stat64(tg3_stat64_t *val)
7187{
7188 unsigned long ret;
7189
7190#if (BITS_PER_LONG == 32)
7191 ret = val->low;
7192#else
7193 ret = ((u64)val->high << 32) | ((u64)val->low);
7194#endif
7195 return ret;
7196}
7197
7198static unsigned long calc_crc_errors(struct tg3 *tp)
7199{
7200 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7201
7202 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7203 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007205 u32 val;
7206
David S. Millerf47c11e2005-06-24 20:18:35 -07007207 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208 if (!tg3_readphy(tp, 0x1e, &val)) {
7209 tg3_writephy(tp, 0x1e, val | 0x8000);
7210 tg3_readphy(tp, 0x14, &val);
7211 } else
7212 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07007213 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007214
7215 tp->phy_crc_errors += val;
7216
7217 return tp->phy_crc_errors;
7218 }
7219
7220 return get_stat64(&hw_stats->rx_fcs_errors);
7221}
7222
7223#define ESTAT_ADD(member) \
7224 estats->member = old_estats->member + \
7225 get_stat64(&hw_stats->member)
7226
7227static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7228{
7229 struct tg3_ethtool_stats *estats = &tp->estats;
7230 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7231 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7232
7233 if (!hw_stats)
7234 return old_estats;
7235
7236 ESTAT_ADD(rx_octets);
7237 ESTAT_ADD(rx_fragments);
7238 ESTAT_ADD(rx_ucast_packets);
7239 ESTAT_ADD(rx_mcast_packets);
7240 ESTAT_ADD(rx_bcast_packets);
7241 ESTAT_ADD(rx_fcs_errors);
7242 ESTAT_ADD(rx_align_errors);
7243 ESTAT_ADD(rx_xon_pause_rcvd);
7244 ESTAT_ADD(rx_xoff_pause_rcvd);
7245 ESTAT_ADD(rx_mac_ctrl_rcvd);
7246 ESTAT_ADD(rx_xoff_entered);
7247 ESTAT_ADD(rx_frame_too_long_errors);
7248 ESTAT_ADD(rx_jabbers);
7249 ESTAT_ADD(rx_undersize_packets);
7250 ESTAT_ADD(rx_in_length_errors);
7251 ESTAT_ADD(rx_out_length_errors);
7252 ESTAT_ADD(rx_64_or_less_octet_packets);
7253 ESTAT_ADD(rx_65_to_127_octet_packets);
7254 ESTAT_ADD(rx_128_to_255_octet_packets);
7255 ESTAT_ADD(rx_256_to_511_octet_packets);
7256 ESTAT_ADD(rx_512_to_1023_octet_packets);
7257 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7258 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7259 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7260 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7261 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7262
7263 ESTAT_ADD(tx_octets);
7264 ESTAT_ADD(tx_collisions);
7265 ESTAT_ADD(tx_xon_sent);
7266 ESTAT_ADD(tx_xoff_sent);
7267 ESTAT_ADD(tx_flow_control);
7268 ESTAT_ADD(tx_mac_errors);
7269 ESTAT_ADD(tx_single_collisions);
7270 ESTAT_ADD(tx_mult_collisions);
7271 ESTAT_ADD(tx_deferred);
7272 ESTAT_ADD(tx_excessive_collisions);
7273 ESTAT_ADD(tx_late_collisions);
7274 ESTAT_ADD(tx_collide_2times);
7275 ESTAT_ADD(tx_collide_3times);
7276 ESTAT_ADD(tx_collide_4times);
7277 ESTAT_ADD(tx_collide_5times);
7278 ESTAT_ADD(tx_collide_6times);
7279 ESTAT_ADD(tx_collide_7times);
7280 ESTAT_ADD(tx_collide_8times);
7281 ESTAT_ADD(tx_collide_9times);
7282 ESTAT_ADD(tx_collide_10times);
7283 ESTAT_ADD(tx_collide_11times);
7284 ESTAT_ADD(tx_collide_12times);
7285 ESTAT_ADD(tx_collide_13times);
7286 ESTAT_ADD(tx_collide_14times);
7287 ESTAT_ADD(tx_collide_15times);
7288 ESTAT_ADD(tx_ucast_packets);
7289 ESTAT_ADD(tx_mcast_packets);
7290 ESTAT_ADD(tx_bcast_packets);
7291 ESTAT_ADD(tx_carrier_sense_errors);
7292 ESTAT_ADD(tx_discards);
7293 ESTAT_ADD(tx_errors);
7294
7295 ESTAT_ADD(dma_writeq_full);
7296 ESTAT_ADD(dma_write_prioq_full);
7297 ESTAT_ADD(rxbds_empty);
7298 ESTAT_ADD(rx_discards);
7299 ESTAT_ADD(rx_errors);
7300 ESTAT_ADD(rx_threshold_hit);
7301
7302 ESTAT_ADD(dma_readq_full);
7303 ESTAT_ADD(dma_read_prioq_full);
7304 ESTAT_ADD(tx_comp_queue_full);
7305
7306 ESTAT_ADD(ring_set_send_prod_index);
7307 ESTAT_ADD(ring_status_update);
7308 ESTAT_ADD(nic_irqs);
7309 ESTAT_ADD(nic_avoided_irqs);
7310 ESTAT_ADD(nic_tx_threshold_hit);
7311
7312 return estats;
7313}
7314
7315static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7316{
7317 struct tg3 *tp = netdev_priv(dev);
7318 struct net_device_stats *stats = &tp->net_stats;
7319 struct net_device_stats *old_stats = &tp->net_stats_prev;
7320 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7321
7322 if (!hw_stats)
7323 return old_stats;
7324
7325 stats->rx_packets = old_stats->rx_packets +
7326 get_stat64(&hw_stats->rx_ucast_packets) +
7327 get_stat64(&hw_stats->rx_mcast_packets) +
7328 get_stat64(&hw_stats->rx_bcast_packets);
7329
7330 stats->tx_packets = old_stats->tx_packets +
7331 get_stat64(&hw_stats->tx_ucast_packets) +
7332 get_stat64(&hw_stats->tx_mcast_packets) +
7333 get_stat64(&hw_stats->tx_bcast_packets);
7334
7335 stats->rx_bytes = old_stats->rx_bytes +
7336 get_stat64(&hw_stats->rx_octets);
7337 stats->tx_bytes = old_stats->tx_bytes +
7338 get_stat64(&hw_stats->tx_octets);
7339
7340 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07007341 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007342 stats->tx_errors = old_stats->tx_errors +
7343 get_stat64(&hw_stats->tx_errors) +
7344 get_stat64(&hw_stats->tx_mac_errors) +
7345 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7346 get_stat64(&hw_stats->tx_discards);
7347
7348 stats->multicast = old_stats->multicast +
7349 get_stat64(&hw_stats->rx_mcast_packets);
7350 stats->collisions = old_stats->collisions +
7351 get_stat64(&hw_stats->tx_collisions);
7352
7353 stats->rx_length_errors = old_stats->rx_length_errors +
7354 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7355 get_stat64(&hw_stats->rx_undersize_packets);
7356
7357 stats->rx_over_errors = old_stats->rx_over_errors +
7358 get_stat64(&hw_stats->rxbds_empty);
7359 stats->rx_frame_errors = old_stats->rx_frame_errors +
7360 get_stat64(&hw_stats->rx_align_errors);
7361 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7362 get_stat64(&hw_stats->tx_discards);
7363 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7364 get_stat64(&hw_stats->tx_carrier_sense_errors);
7365
7366 stats->rx_crc_errors = old_stats->rx_crc_errors +
7367 calc_crc_errors(tp);
7368
John W. Linville4f63b872005-09-12 14:43:18 -07007369 stats->rx_missed_errors = old_stats->rx_missed_errors +
7370 get_stat64(&hw_stats->rx_discards);
7371
Linus Torvalds1da177e2005-04-16 15:20:36 -07007372 return stats;
7373}
7374
7375static inline u32 calc_crc(unsigned char *buf, int len)
7376{
7377 u32 reg;
7378 u32 tmp;
7379 int j, k;
7380
7381 reg = 0xffffffff;
7382
7383 for (j = 0; j < len; j++) {
7384 reg ^= buf[j];
7385
7386 for (k = 0; k < 8; k++) {
7387 tmp = reg & 0x01;
7388
7389 reg >>= 1;
7390
7391 if (tmp) {
7392 reg ^= 0xedb88320;
7393 }
7394 }
7395 }
7396
7397 return ~reg;
7398}
7399
7400static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7401{
7402 /* accept or reject all multicast frames */
7403 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7404 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7405 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7406 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7407}
7408
7409static void __tg3_set_rx_mode(struct net_device *dev)
7410{
7411 struct tg3 *tp = netdev_priv(dev);
7412 u32 rx_mode;
7413
7414 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7415 RX_MODE_KEEP_VLAN_TAG);
7416
7417 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7418 * flag clear.
7419 */
7420#if TG3_VLAN_TAG_USED
7421 if (!tp->vlgrp &&
7422 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7423 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7424#else
7425 /* By definition, VLAN is disabled always in this
7426 * case.
7427 */
7428 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7429 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7430#endif
7431
7432 if (dev->flags & IFF_PROMISC) {
7433 /* Promiscuous mode. */
7434 rx_mode |= RX_MODE_PROMISC;
7435 } else if (dev->flags & IFF_ALLMULTI) {
7436 /* Accept all multicast. */
7437 tg3_set_multi (tp, 1);
7438 } else if (dev->mc_count < 1) {
7439 /* Reject all multicast. */
7440 tg3_set_multi (tp, 0);
7441 } else {
7442 /* Accept one or more multicast(s). */
7443 struct dev_mc_list *mclist;
7444 unsigned int i;
7445 u32 mc_filter[4] = { 0, };
7446 u32 regidx;
7447 u32 bit;
7448 u32 crc;
7449
7450 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7451 i++, mclist = mclist->next) {
7452
7453 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7454 bit = ~crc & 0x7f;
7455 regidx = (bit & 0x60) >> 5;
7456 bit &= 0x1f;
7457 mc_filter[regidx] |= (1 << bit);
7458 }
7459
7460 tw32(MAC_HASH_REG_0, mc_filter[0]);
7461 tw32(MAC_HASH_REG_1, mc_filter[1]);
7462 tw32(MAC_HASH_REG_2, mc_filter[2]);
7463 tw32(MAC_HASH_REG_3, mc_filter[3]);
7464 }
7465
7466 if (rx_mode != tp->rx_mode) {
7467 tp->rx_mode = rx_mode;
7468 tw32_f(MAC_RX_MODE, rx_mode);
7469 udelay(10);
7470 }
7471}
7472
7473static void tg3_set_rx_mode(struct net_device *dev)
7474{
7475 struct tg3 *tp = netdev_priv(dev);
7476
Michael Chane75f7c92006-03-20 21:33:26 -08007477 if (!netif_running(dev))
7478 return;
7479
David S. Millerf47c11e2005-06-24 20:18:35 -07007480 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007481 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07007482 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007483}
7484
7485#define TG3_REGDUMP_LEN (32 * 1024)
7486
7487static int tg3_get_regs_len(struct net_device *dev)
7488{
7489 return TG3_REGDUMP_LEN;
7490}
7491
7492static void tg3_get_regs(struct net_device *dev,
7493 struct ethtool_regs *regs, void *_p)
7494{
7495 u32 *p = _p;
7496 struct tg3 *tp = netdev_priv(dev);
7497 u8 *orig_p = _p;
7498 int i;
7499
7500 regs->version = 0;
7501
7502 memset(p, 0, TG3_REGDUMP_LEN);
7503
Michael Chanbc1c7562006-03-20 17:48:03 -08007504 if (tp->link_config.phy_is_low_power)
7505 return;
7506
David S. Millerf47c11e2005-06-24 20:18:35 -07007507 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007508
7509#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7510#define GET_REG32_LOOP(base,len) \
7511do { p = (u32 *)(orig_p + (base)); \
7512 for (i = 0; i < len; i += 4) \
7513 __GET_REG32((base) + i); \
7514} while (0)
7515#define GET_REG32_1(reg) \
7516do { p = (u32 *)(orig_p + (reg)); \
7517 __GET_REG32((reg)); \
7518} while (0)
7519
7520 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7521 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7522 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7523 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7524 GET_REG32_1(SNDDATAC_MODE);
7525 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7526 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7527 GET_REG32_1(SNDBDC_MODE);
7528 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7529 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7530 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7531 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7532 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7533 GET_REG32_1(RCVDCC_MODE);
7534 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7535 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7536 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7537 GET_REG32_1(MBFREE_MODE);
7538 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7539 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7540 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7541 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7542 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08007543 GET_REG32_1(RX_CPU_MODE);
7544 GET_REG32_1(RX_CPU_STATE);
7545 GET_REG32_1(RX_CPU_PGMCTR);
7546 GET_REG32_1(RX_CPU_HWBKPT);
7547 GET_REG32_1(TX_CPU_MODE);
7548 GET_REG32_1(TX_CPU_STATE);
7549 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7551 GET_REG32_LOOP(FTQ_RESET, 0x120);
7552 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7553 GET_REG32_1(DMAC_MODE);
7554 GET_REG32_LOOP(GRC_MODE, 0x4c);
7555 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7556 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7557
7558#undef __GET_REG32
7559#undef GET_REG32_LOOP
7560#undef GET_REG32_1
7561
David S. Millerf47c11e2005-06-24 20:18:35 -07007562 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007563}
7564
7565static int tg3_get_eeprom_len(struct net_device *dev)
7566{
7567 struct tg3 *tp = netdev_priv(dev);
7568
7569 return tp->nvram_size;
7570}
7571
7572static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Michael Chan18201802006-03-20 22:29:15 -08007573static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007574
7575static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7576{
7577 struct tg3 *tp = netdev_priv(dev);
7578 int ret;
7579 u8 *pd;
7580 u32 i, offset, len, val, b_offset, b_count;
7581
Michael Chanbc1c7562006-03-20 17:48:03 -08007582 if (tp->link_config.phy_is_low_power)
7583 return -EAGAIN;
7584
Linus Torvalds1da177e2005-04-16 15:20:36 -07007585 offset = eeprom->offset;
7586 len = eeprom->len;
7587 eeprom->len = 0;
7588
7589 eeprom->magic = TG3_EEPROM_MAGIC;
7590
7591 if (offset & 3) {
7592 /* adjustments to start on required 4 byte boundary */
7593 b_offset = offset & 3;
7594 b_count = 4 - b_offset;
7595 if (b_count > len) {
7596 /* i.e. offset=1 len=2 */
7597 b_count = len;
7598 }
7599 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7600 if (ret)
7601 return ret;
7602 val = cpu_to_le32(val);
7603 memcpy(data, ((char*)&val) + b_offset, b_count);
7604 len -= b_count;
7605 offset += b_count;
7606 eeprom->len += b_count;
7607 }
7608
7609 /* read bytes upto the last 4 byte boundary */
7610 pd = &data[eeprom->len];
7611 for (i = 0; i < (len - (len & 3)); i += 4) {
7612 ret = tg3_nvram_read(tp, offset + i, &val);
7613 if (ret) {
7614 eeprom->len += i;
7615 return ret;
7616 }
7617 val = cpu_to_le32(val);
7618 memcpy(pd + i, &val, 4);
7619 }
7620 eeprom->len += i;
7621
7622 if (len & 3) {
7623 /* read last bytes not ending on 4 byte boundary */
7624 pd = &data[eeprom->len];
7625 b_count = len & 3;
7626 b_offset = offset + len - b_count;
7627 ret = tg3_nvram_read(tp, b_offset, &val);
7628 if (ret)
7629 return ret;
7630 val = cpu_to_le32(val);
7631 memcpy(pd, ((char*)&val), b_count);
7632 eeprom->len += b_count;
7633 }
7634 return 0;
7635}
7636
7637static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7638
7639static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7640{
7641 struct tg3 *tp = netdev_priv(dev);
7642 int ret;
7643 u32 offset, len, b_offset, odd_len, start, end;
7644 u8 *buf;
7645
Michael Chanbc1c7562006-03-20 17:48:03 -08007646 if (tp->link_config.phy_is_low_power)
7647 return -EAGAIN;
7648
Linus Torvalds1da177e2005-04-16 15:20:36 -07007649 if (eeprom->magic != TG3_EEPROM_MAGIC)
7650 return -EINVAL;
7651
7652 offset = eeprom->offset;
7653 len = eeprom->len;
7654
7655 if ((b_offset = (offset & 3))) {
7656 /* adjustments to start on required 4 byte boundary */
7657 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7658 if (ret)
7659 return ret;
7660 start = cpu_to_le32(start);
7661 len += b_offset;
7662 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07007663 if (len < 4)
7664 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007665 }
7666
7667 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07007668 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007669 /* adjustments to end on required 4 byte boundary */
7670 odd_len = 1;
7671 len = (len + 3) & ~3;
7672 ret = tg3_nvram_read(tp, offset+len-4, &end);
7673 if (ret)
7674 return ret;
7675 end = cpu_to_le32(end);
7676 }
7677
7678 buf = data;
7679 if (b_offset || odd_len) {
7680 buf = kmalloc(len, GFP_KERNEL);
7681 if (buf == 0)
7682 return -ENOMEM;
7683 if (b_offset)
7684 memcpy(buf, &start, 4);
7685 if (odd_len)
7686 memcpy(buf+len-4, &end, 4);
7687 memcpy(buf + b_offset, data, eeprom->len);
7688 }
7689
7690 ret = tg3_nvram_write_block(tp, offset, len, buf);
7691
7692 if (buf != data)
7693 kfree(buf);
7694
7695 return ret;
7696}
7697
7698static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7699{
7700 struct tg3 *tp = netdev_priv(dev);
7701
7702 cmd->supported = (SUPPORTED_Autoneg);
7703
7704 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7705 cmd->supported |= (SUPPORTED_1000baseT_Half |
7706 SUPPORTED_1000baseT_Full);
7707
Karsten Keilef348142006-05-12 12:49:08 -07007708 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007709 cmd->supported |= (SUPPORTED_100baseT_Half |
7710 SUPPORTED_100baseT_Full |
7711 SUPPORTED_10baseT_Half |
7712 SUPPORTED_10baseT_Full |
7713 SUPPORTED_MII);
Karsten Keilef348142006-05-12 12:49:08 -07007714 cmd->port = PORT_TP;
7715 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07007717 cmd->port = PORT_FIBRE;
7718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007719
7720 cmd->advertising = tp->link_config.advertising;
7721 if (netif_running(dev)) {
7722 cmd->speed = tp->link_config.active_speed;
7723 cmd->duplex = tp->link_config.active_duplex;
7724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007725 cmd->phy_address = PHY_ADDR;
7726 cmd->transceiver = 0;
7727 cmd->autoneg = tp->link_config.autoneg;
7728 cmd->maxtxpkt = 0;
7729 cmd->maxrxpkt = 0;
7730 return 0;
7731}
7732
7733static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7734{
7735 struct tg3 *tp = netdev_priv(dev);
7736
Michael Chan37ff2382005-10-26 15:49:51 -07007737 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007738 /* These are the only valid advertisement bits allowed. */
7739 if (cmd->autoneg == AUTONEG_ENABLE &&
7740 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7741 ADVERTISED_1000baseT_Full |
7742 ADVERTISED_Autoneg |
7743 ADVERTISED_FIBRE)))
7744 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07007745 /* Fiber can only do SPEED_1000. */
7746 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7747 (cmd->speed != SPEED_1000))
7748 return -EINVAL;
7749 /* Copper cannot force SPEED_1000. */
7750 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7751 (cmd->speed == SPEED_1000))
7752 return -EINVAL;
7753 else if ((cmd->speed == SPEED_1000) &&
7754 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7755 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007756
David S. Millerf47c11e2005-06-24 20:18:35 -07007757 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007758
7759 tp->link_config.autoneg = cmd->autoneg;
7760 if (cmd->autoneg == AUTONEG_ENABLE) {
7761 tp->link_config.advertising = cmd->advertising;
7762 tp->link_config.speed = SPEED_INVALID;
7763 tp->link_config.duplex = DUPLEX_INVALID;
7764 } else {
7765 tp->link_config.advertising = 0;
7766 tp->link_config.speed = cmd->speed;
7767 tp->link_config.duplex = cmd->duplex;
7768 }
7769
7770 if (netif_running(dev))
7771 tg3_setup_phy(tp, 1);
7772
David S. Millerf47c11e2005-06-24 20:18:35 -07007773 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007774
7775 return 0;
7776}
7777
7778static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7779{
7780 struct tg3 *tp = netdev_priv(dev);
7781
7782 strcpy(info->driver, DRV_MODULE_NAME);
7783 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08007784 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007785 strcpy(info->bus_info, pci_name(tp->pdev));
7786}
7787
7788static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7789{
7790 struct tg3 *tp = netdev_priv(dev);
7791
7792 wol->supported = WAKE_MAGIC;
7793 wol->wolopts = 0;
7794 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7795 wol->wolopts = WAKE_MAGIC;
7796 memset(&wol->sopass, 0, sizeof(wol->sopass));
7797}
7798
7799static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7800{
7801 struct tg3 *tp = netdev_priv(dev);
7802
7803 if (wol->wolopts & ~WAKE_MAGIC)
7804 return -EINVAL;
7805 if ((wol->wolopts & WAKE_MAGIC) &&
7806 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7807 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7808 return -EINVAL;
7809
David S. Millerf47c11e2005-06-24 20:18:35 -07007810 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811 if (wol->wolopts & WAKE_MAGIC)
7812 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7813 else
7814 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
David S. Millerf47c11e2005-06-24 20:18:35 -07007815 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816
7817 return 0;
7818}
7819
7820static u32 tg3_get_msglevel(struct net_device *dev)
7821{
7822 struct tg3 *tp = netdev_priv(dev);
7823 return tp->msg_enable;
7824}
7825
7826static void tg3_set_msglevel(struct net_device *dev, u32 value)
7827{
7828 struct tg3 *tp = netdev_priv(dev);
7829 tp->msg_enable = value;
7830}
7831
7832#if TG3_TSO_SUPPORT != 0
7833static int tg3_set_tso(struct net_device *dev, u32 value)
7834{
7835 struct tg3 *tp = netdev_priv(dev);
7836
7837 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7838 if (value)
7839 return -EINVAL;
7840 return 0;
7841 }
7842 return ethtool_op_set_tso(dev, value);
7843}
7844#endif
7845
7846static int tg3_nway_reset(struct net_device *dev)
7847{
7848 struct tg3 *tp = netdev_priv(dev);
7849 u32 bmcr;
7850 int r;
7851
7852 if (!netif_running(dev))
7853 return -EAGAIN;
7854
Michael Chanc94e3942005-09-27 12:12:42 -07007855 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7856 return -EINVAL;
7857
David S. Millerf47c11e2005-06-24 20:18:35 -07007858 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007859 r = -EINVAL;
7860 tg3_readphy(tp, MII_BMCR, &bmcr);
7861 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
Michael Chanc94e3942005-09-27 12:12:42 -07007862 ((bmcr & BMCR_ANENABLE) ||
7863 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7864 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7865 BMCR_ANENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007866 r = 0;
7867 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007868 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007869
7870 return r;
7871}
7872
7873static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7874{
7875 struct tg3 *tp = netdev_priv(dev);
7876
7877 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7878 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08007879 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7880 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7881 else
7882 ering->rx_jumbo_max_pending = 0;
7883
7884 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007885
7886 ering->rx_pending = tp->rx_pending;
7887 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08007888 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7889 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7890 else
7891 ering->rx_jumbo_pending = 0;
7892
Linus Torvalds1da177e2005-04-16 15:20:36 -07007893 ering->tx_pending = tp->tx_pending;
7894}
7895
7896static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7897{
7898 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007899 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900
7901 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7902 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7903 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7904 return -EINVAL;
7905
Michael Chanbbe832c2005-06-24 20:20:04 -07007906 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007907 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007908 irq_sync = 1;
7909 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007910
Michael Chanbbe832c2005-06-24 20:20:04 -07007911 tg3_full_lock(tp, irq_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007912
7913 tp->rx_pending = ering->rx_pending;
7914
7915 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7916 tp->rx_pending > 63)
7917 tp->rx_pending = 63;
7918 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7919 tp->tx_pending = ering->tx_pending;
7920
7921 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007922 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007923 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007924 tg3_netif_start(tp);
7925 }
7926
David S. Millerf47c11e2005-06-24 20:18:35 -07007927 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007928
7929 return 0;
7930}
7931
7932static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7933{
7934 struct tg3 *tp = netdev_priv(dev);
7935
7936 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7937 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7938 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7939}
7940
7941static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7942{
7943 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007944 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007945
Michael Chanbbe832c2005-06-24 20:20:04 -07007946 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007948 irq_sync = 1;
7949 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007950
Michael Chanbbe832c2005-06-24 20:20:04 -07007951 tg3_full_lock(tp, irq_sync);
David S. Millerf47c11e2005-06-24 20:18:35 -07007952
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953 if (epause->autoneg)
7954 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7955 else
7956 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7957 if (epause->rx_pause)
7958 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7959 else
7960 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7961 if (epause->tx_pause)
7962 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7963 else
7964 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7965
7966 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007967 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007968 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007969 tg3_netif_start(tp);
7970 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007971
7972 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007973
7974 return 0;
7975}
7976
7977static u32 tg3_get_rx_csum(struct net_device *dev)
7978{
7979 struct tg3 *tp = netdev_priv(dev);
7980 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7981}
7982
7983static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7984{
7985 struct tg3 *tp = netdev_priv(dev);
7986
7987 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7988 if (data != 0)
7989 return -EINVAL;
7990 return 0;
7991 }
7992
David S. Millerf47c11e2005-06-24 20:18:35 -07007993 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007994 if (data)
7995 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7996 else
7997 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07007998 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007999
8000 return 0;
8001}
8002
8003static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8004{
8005 struct tg3 *tp = netdev_priv(dev);
8006
8007 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8008 if (data != 0)
8009 return -EINVAL;
8010 return 0;
8011 }
8012
Michael Chanaf36e6b2006-03-23 01:28:06 -08008013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan9c27dbd2006-03-20 22:28:27 -08008015 ethtool_op_set_tx_hw_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008016 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08008017 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008018
8019 return 0;
8020}
8021
8022static int tg3_get_stats_count (struct net_device *dev)
8023{
8024 return TG3_NUM_STATS;
8025}
8026
Michael Chan4cafd3f2005-05-29 14:56:34 -07008027static int tg3_get_test_count (struct net_device *dev)
8028{
8029 return TG3_NUM_TEST;
8030}
8031
Linus Torvalds1da177e2005-04-16 15:20:36 -07008032static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8033{
8034 switch (stringset) {
8035 case ETH_SS_STATS:
8036 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8037 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07008038 case ETH_SS_TEST:
8039 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8040 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008041 default:
8042 WARN_ON(1); /* we need a WARN() */
8043 break;
8044 }
8045}
8046
Michael Chan4009a932005-09-05 17:52:54 -07008047static int tg3_phys_id(struct net_device *dev, u32 data)
8048{
8049 struct tg3 *tp = netdev_priv(dev);
8050 int i;
8051
8052 if (!netif_running(tp->dev))
8053 return -EAGAIN;
8054
8055 if (data == 0)
8056 data = 2;
8057
8058 for (i = 0; i < (data * 2); i++) {
8059 if ((i % 2) == 0)
8060 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8061 LED_CTRL_1000MBPS_ON |
8062 LED_CTRL_100MBPS_ON |
8063 LED_CTRL_10MBPS_ON |
8064 LED_CTRL_TRAFFIC_OVERRIDE |
8065 LED_CTRL_TRAFFIC_BLINK |
8066 LED_CTRL_TRAFFIC_LED);
8067
8068 else
8069 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8070 LED_CTRL_TRAFFIC_OVERRIDE);
8071
8072 if (msleep_interruptible(500))
8073 break;
8074 }
8075 tw32(MAC_LED_CTRL, tp->led_ctrl);
8076 return 0;
8077}
8078
Linus Torvalds1da177e2005-04-16 15:20:36 -07008079static void tg3_get_ethtool_stats (struct net_device *dev,
8080 struct ethtool_stats *estats, u64 *tmp_stats)
8081{
8082 struct tg3 *tp = netdev_priv(dev);
8083 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8084}
8085
Michael Chan566f86a2005-05-29 14:56:58 -07008086#define NVRAM_TEST_SIZE 0x100
Michael Chan1b277772006-03-20 22:27:48 -08008087#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
Michael Chan566f86a2005-05-29 14:56:58 -07008088
8089static int tg3_test_nvram(struct tg3 *tp)
8090{
Michael Chan1b277772006-03-20 22:27:48 -08008091 u32 *buf, csum, magic;
8092 int i, j, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07008093
Michael Chan18201802006-03-20 22:29:15 -08008094 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08008095 return -EIO;
8096
Michael Chan1b277772006-03-20 22:27:48 -08008097 if (magic == TG3_EEPROM_MAGIC)
8098 size = NVRAM_TEST_SIZE;
8099 else if ((magic & 0xff000000) == 0xa5000000) {
8100 if ((magic & 0xe00000) == 0x200000)
8101 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8102 else
8103 return 0;
8104 } else
8105 return -EIO;
8106
8107 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07008108 if (buf == NULL)
8109 return -ENOMEM;
8110
Michael Chan1b277772006-03-20 22:27:48 -08008111 err = -EIO;
8112 for (i = 0, j = 0; i < size; i += 4, j++) {
Michael Chan566f86a2005-05-29 14:56:58 -07008113 u32 val;
8114
8115 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8116 break;
8117 buf[j] = cpu_to_le32(val);
8118 }
Michael Chan1b277772006-03-20 22:27:48 -08008119 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07008120 goto out;
8121
Michael Chan1b277772006-03-20 22:27:48 -08008122 /* Selfboot format */
8123 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8124 u8 *buf8 = (u8 *) buf, csum8 = 0;
8125
8126 for (i = 0; i < size; i++)
8127 csum8 += buf8[i];
8128
Adrian Bunkad96b482006-04-05 22:21:04 -07008129 if (csum8 == 0) {
8130 err = 0;
8131 goto out;
8132 }
8133
8134 err = -EIO;
8135 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08008136 }
Michael Chan566f86a2005-05-29 14:56:58 -07008137
8138 /* Bootstrap checksum at offset 0x10 */
8139 csum = calc_crc((unsigned char *) buf, 0x10);
8140 if(csum != cpu_to_le32(buf[0x10/4]))
8141 goto out;
8142
8143 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8144 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8145 if (csum != cpu_to_le32(buf[0xfc/4]))
8146 goto out;
8147
8148 err = 0;
8149
8150out:
8151 kfree(buf);
8152 return err;
8153}
8154
Michael Chanca430072005-05-29 14:57:23 -07008155#define TG3_SERDES_TIMEOUT_SEC 2
8156#define TG3_COPPER_TIMEOUT_SEC 6
8157
8158static int tg3_test_link(struct tg3 *tp)
8159{
8160 int i, max;
8161
8162 if (!netif_running(tp->dev))
8163 return -ENODEV;
8164
Michael Chan4c987482005-09-05 17:52:38 -07008165 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07008166 max = TG3_SERDES_TIMEOUT_SEC;
8167 else
8168 max = TG3_COPPER_TIMEOUT_SEC;
8169
8170 for (i = 0; i < max; i++) {
8171 if (netif_carrier_ok(tp->dev))
8172 return 0;
8173
8174 if (msleep_interruptible(1000))
8175 break;
8176 }
8177
8178 return -EIO;
8179}
8180
Michael Chana71116d2005-05-29 14:58:11 -07008181/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08008182static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07008183{
8184 int i, is_5705;
8185 u32 offset, read_mask, write_mask, val, save_val, read_val;
8186 static struct {
8187 u16 offset;
8188 u16 flags;
8189#define TG3_FL_5705 0x1
8190#define TG3_FL_NOT_5705 0x2
8191#define TG3_FL_NOT_5788 0x4
8192 u32 read_mask;
8193 u32 write_mask;
8194 } reg_tbl[] = {
8195 /* MAC Control Registers */
8196 { MAC_MODE, TG3_FL_NOT_5705,
8197 0x00000000, 0x00ef6f8c },
8198 { MAC_MODE, TG3_FL_5705,
8199 0x00000000, 0x01ef6b8c },
8200 { MAC_STATUS, TG3_FL_NOT_5705,
8201 0x03800107, 0x00000000 },
8202 { MAC_STATUS, TG3_FL_5705,
8203 0x03800100, 0x00000000 },
8204 { MAC_ADDR_0_HIGH, 0x0000,
8205 0x00000000, 0x0000ffff },
8206 { MAC_ADDR_0_LOW, 0x0000,
8207 0x00000000, 0xffffffff },
8208 { MAC_RX_MTU_SIZE, 0x0000,
8209 0x00000000, 0x0000ffff },
8210 { MAC_TX_MODE, 0x0000,
8211 0x00000000, 0x00000070 },
8212 { MAC_TX_LENGTHS, 0x0000,
8213 0x00000000, 0x00003fff },
8214 { MAC_RX_MODE, TG3_FL_NOT_5705,
8215 0x00000000, 0x000007fc },
8216 { MAC_RX_MODE, TG3_FL_5705,
8217 0x00000000, 0x000007dc },
8218 { MAC_HASH_REG_0, 0x0000,
8219 0x00000000, 0xffffffff },
8220 { MAC_HASH_REG_1, 0x0000,
8221 0x00000000, 0xffffffff },
8222 { MAC_HASH_REG_2, 0x0000,
8223 0x00000000, 0xffffffff },
8224 { MAC_HASH_REG_3, 0x0000,
8225 0x00000000, 0xffffffff },
8226
8227 /* Receive Data and Receive BD Initiator Control Registers. */
8228 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8229 0x00000000, 0xffffffff },
8230 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8231 0x00000000, 0xffffffff },
8232 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8233 0x00000000, 0x00000003 },
8234 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8235 0x00000000, 0xffffffff },
8236 { RCVDBDI_STD_BD+0, 0x0000,
8237 0x00000000, 0xffffffff },
8238 { RCVDBDI_STD_BD+4, 0x0000,
8239 0x00000000, 0xffffffff },
8240 { RCVDBDI_STD_BD+8, 0x0000,
8241 0x00000000, 0xffff0002 },
8242 { RCVDBDI_STD_BD+0xc, 0x0000,
8243 0x00000000, 0xffffffff },
8244
8245 /* Receive BD Initiator Control Registers. */
8246 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8247 0x00000000, 0xffffffff },
8248 { RCVBDI_STD_THRESH, TG3_FL_5705,
8249 0x00000000, 0x000003ff },
8250 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8251 0x00000000, 0xffffffff },
8252
8253 /* Host Coalescing Control Registers. */
8254 { HOSTCC_MODE, TG3_FL_NOT_5705,
8255 0x00000000, 0x00000004 },
8256 { HOSTCC_MODE, TG3_FL_5705,
8257 0x00000000, 0x000000f6 },
8258 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8259 0x00000000, 0xffffffff },
8260 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8261 0x00000000, 0x000003ff },
8262 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8263 0x00000000, 0xffffffff },
8264 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8265 0x00000000, 0x000003ff },
8266 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8267 0x00000000, 0xffffffff },
8268 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8269 0x00000000, 0x000000ff },
8270 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8271 0x00000000, 0xffffffff },
8272 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8273 0x00000000, 0x000000ff },
8274 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8275 0x00000000, 0xffffffff },
8276 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8277 0x00000000, 0xffffffff },
8278 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8279 0x00000000, 0xffffffff },
8280 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8281 0x00000000, 0x000000ff },
8282 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8283 0x00000000, 0xffffffff },
8284 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8285 0x00000000, 0x000000ff },
8286 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8287 0x00000000, 0xffffffff },
8288 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8289 0x00000000, 0xffffffff },
8290 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8291 0x00000000, 0xffffffff },
8292 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8293 0x00000000, 0xffffffff },
8294 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8295 0x00000000, 0xffffffff },
8296 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8297 0xffffffff, 0x00000000 },
8298 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8299 0xffffffff, 0x00000000 },
8300
8301 /* Buffer Manager Control Registers. */
8302 { BUFMGR_MB_POOL_ADDR, 0x0000,
8303 0x00000000, 0x007fff80 },
8304 { BUFMGR_MB_POOL_SIZE, 0x0000,
8305 0x00000000, 0x007fffff },
8306 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8307 0x00000000, 0x0000003f },
8308 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8309 0x00000000, 0x000001ff },
8310 { BUFMGR_MB_HIGH_WATER, 0x0000,
8311 0x00000000, 0x000001ff },
8312 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8313 0xffffffff, 0x00000000 },
8314 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8315 0xffffffff, 0x00000000 },
8316
8317 /* Mailbox Registers */
8318 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8319 0x00000000, 0x000001ff },
8320 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8321 0x00000000, 0x000001ff },
8322 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8323 0x00000000, 0x000007ff },
8324 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8325 0x00000000, 0x000001ff },
8326
8327 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8328 };
8329
8330 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8331 is_5705 = 1;
8332 else
8333 is_5705 = 0;
8334
8335 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8336 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8337 continue;
8338
8339 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8340 continue;
8341
8342 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8343 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8344 continue;
8345
8346 offset = (u32) reg_tbl[i].offset;
8347 read_mask = reg_tbl[i].read_mask;
8348 write_mask = reg_tbl[i].write_mask;
8349
8350 /* Save the original register content */
8351 save_val = tr32(offset);
8352
8353 /* Determine the read-only value. */
8354 read_val = save_val & read_mask;
8355
8356 /* Write zero to the register, then make sure the read-only bits
8357 * are not changed and the read/write bits are all zeros.
8358 */
8359 tw32(offset, 0);
8360
8361 val = tr32(offset);
8362
8363 /* Test the read-only and read/write bits. */
8364 if (((val & read_mask) != read_val) || (val & write_mask))
8365 goto out;
8366
8367 /* Write ones to all the bits defined by RdMask and WrMask, then
8368 * make sure the read-only bits are not changed and the
8369 * read/write bits are all ones.
8370 */
8371 tw32(offset, read_mask | write_mask);
8372
8373 val = tr32(offset);
8374
8375 /* Test the read-only bits. */
8376 if ((val & read_mask) != read_val)
8377 goto out;
8378
8379 /* Test the read/write bits. */
8380 if ((val & write_mask) != write_mask)
8381 goto out;
8382
8383 tw32(offset, save_val);
8384 }
8385
8386 return 0;
8387
8388out:
8389 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8390 tw32(offset, save_val);
8391 return -EIO;
8392}
8393
Michael Chan7942e1d2005-05-29 14:58:36 -07008394static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8395{
Arjan van de Venf71e1302006-03-03 21:33:57 -05008396 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07008397 int i;
8398 u32 j;
8399
8400 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8401 for (j = 0; j < len; j += 4) {
8402 u32 val;
8403
8404 tg3_write_mem(tp, offset + j, test_pattern[i]);
8405 tg3_read_mem(tp, offset + j, &val);
8406 if (val != test_pattern[i])
8407 return -EIO;
8408 }
8409 }
8410 return 0;
8411}
8412
8413static int tg3_test_memory(struct tg3 *tp)
8414{
8415 static struct mem_entry {
8416 u32 offset;
8417 u32 len;
8418 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08008419 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07008420 { 0x00002000, 0x1c000},
8421 { 0xffffffff, 0x00000}
8422 }, mem_tbl_5705[] = {
8423 { 0x00000100, 0x0000c},
8424 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07008425 { 0x00004000, 0x00800},
8426 { 0x00006000, 0x01000},
8427 { 0x00008000, 0x02000},
8428 { 0x00010000, 0x0e000},
8429 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08008430 }, mem_tbl_5755[] = {
8431 { 0x00000200, 0x00008},
8432 { 0x00004000, 0x00800},
8433 { 0x00006000, 0x00800},
8434 { 0x00008000, 0x02000},
8435 { 0x00010000, 0x0c000},
8436 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07008437 };
8438 struct mem_entry *mem_tbl;
8439 int err = 0;
8440 int i;
8441
Michael Chan79f4d132006-03-20 22:28:57 -08008442 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08008443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan79f4d132006-03-20 22:28:57 -08008445 mem_tbl = mem_tbl_5755;
8446 else
8447 mem_tbl = mem_tbl_5705;
8448 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07008449 mem_tbl = mem_tbl_570x;
8450
8451 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8452 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8453 mem_tbl[i].len)) != 0)
8454 break;
8455 }
8456
8457 return err;
8458}
8459
Michael Chan9f40dea2005-09-05 17:53:06 -07008460#define TG3_MAC_LOOPBACK 0
8461#define TG3_PHY_LOOPBACK 1
8462
8463static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07008464{
Michael Chan9f40dea2005-09-05 17:53:06 -07008465 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07008466 u32 desc_idx;
8467 struct sk_buff *skb, *rx_skb;
8468 u8 *tx_data;
8469 dma_addr_t map;
8470 int num_pkts, tx_len, rx_len, i, err;
8471 struct tg3_rx_buffer_desc *desc;
8472
Michael Chan9f40dea2005-09-05 17:53:06 -07008473 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07008474 /* HW errata - mac loopback fails in some cases on 5780.
8475 * Normal traffic and PHY loopback are not affected by
8476 * errata.
8477 */
8478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8479 return 0;
8480
Michael Chan9f40dea2005-09-05 17:53:06 -07008481 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8482 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8483 MAC_MODE_PORT_MODE_GMII;
8484 tw32(MAC_MODE, mac_mode);
8485 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07008486 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8487 BMCR_SPEED1000);
8488 udelay(40);
8489 /* reset to prevent losing 1st rx packet intermittently */
8490 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8491 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8492 udelay(10);
8493 tw32_f(MAC_RX_MODE, tp->rx_mode);
8494 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008495 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8496 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
Michael Chanff18ff02006-03-27 23:17:27 -08008497 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
Michael Chan9f40dea2005-09-05 17:53:06 -07008498 mac_mode &= ~MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -08008499 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8500 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8501 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008502 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -07008503 }
8504 else
8505 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -07008506
8507 err = -EIO;
8508
Michael Chanc76949a2005-05-29 14:58:59 -07008509 tx_len = 1514;
8510 skb = dev_alloc_skb(tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -07008511 if (!skb)
8512 return -ENOMEM;
8513
Michael Chanc76949a2005-05-29 14:58:59 -07008514 tx_data = skb_put(skb, tx_len);
8515 memcpy(tx_data, tp->dev->dev_addr, 6);
8516 memset(tx_data + 6, 0x0, 8);
8517
8518 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8519
8520 for (i = 14; i < tx_len; i++)
8521 tx_data[i] = (u8) (i & 0xff);
8522
8523 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8524
8525 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8526 HOSTCC_MODE_NOW);
8527
8528 udelay(10);
8529
8530 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8531
Michael Chanc76949a2005-05-29 14:58:59 -07008532 num_pkts = 0;
8533
Michael Chan9f40dea2005-09-05 17:53:06 -07008534 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -07008535
Michael Chan9f40dea2005-09-05 17:53:06 -07008536 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -07008537 num_pkts++;
8538
Michael Chan9f40dea2005-09-05 17:53:06 -07008539 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8540 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -07008541 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -07008542
8543 udelay(10);
8544
8545 for (i = 0; i < 10; i++) {
8546 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8547 HOSTCC_MODE_NOW);
8548
8549 udelay(10);
8550
8551 tx_idx = tp->hw_status->idx[0].tx_consumer;
8552 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -07008553 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -07008554 (rx_idx == (rx_start_idx + num_pkts)))
8555 break;
8556 }
8557
8558 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8559 dev_kfree_skb(skb);
8560
Michael Chan9f40dea2005-09-05 17:53:06 -07008561 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -07008562 goto out;
8563
8564 if (rx_idx != rx_start_idx + num_pkts)
8565 goto out;
8566
8567 desc = &tp->rx_rcb[rx_start_idx];
8568 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8569 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8570 if (opaque_key != RXD_OPAQUE_RING_STD)
8571 goto out;
8572
8573 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8574 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8575 goto out;
8576
8577 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8578 if (rx_len != tx_len)
8579 goto out;
8580
8581 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8582
8583 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8584 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8585
8586 for (i = 14; i < tx_len; i++) {
8587 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8588 goto out;
8589 }
8590 err = 0;
8591
8592 /* tg3_free_rings will unmap and free the rx_skb */
8593out:
8594 return err;
8595}
8596
Michael Chan9f40dea2005-09-05 17:53:06 -07008597#define TG3_MAC_LOOPBACK_FAILED 1
8598#define TG3_PHY_LOOPBACK_FAILED 2
8599#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8600 TG3_PHY_LOOPBACK_FAILED)
8601
8602static int tg3_test_loopback(struct tg3 *tp)
8603{
8604 int err = 0;
8605
8606 if (!netif_running(tp->dev))
8607 return TG3_LOOPBACK_FAILED;
8608
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008609 tg3_reset_hw(tp, 1);
Michael Chan9f40dea2005-09-05 17:53:06 -07008610
8611 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8612 err |= TG3_MAC_LOOPBACK_FAILED;
8613 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8614 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8615 err |= TG3_PHY_LOOPBACK_FAILED;
8616 }
8617
8618 return err;
8619}
8620
Michael Chan4cafd3f2005-05-29 14:56:34 -07008621static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8622 u64 *data)
8623{
Michael Chan566f86a2005-05-29 14:56:58 -07008624 struct tg3 *tp = netdev_priv(dev);
8625
Michael Chanbc1c7562006-03-20 17:48:03 -08008626 if (tp->link_config.phy_is_low_power)
8627 tg3_set_power_state(tp, PCI_D0);
8628
Michael Chan566f86a2005-05-29 14:56:58 -07008629 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8630
8631 if (tg3_test_nvram(tp) != 0) {
8632 etest->flags |= ETH_TEST_FL_FAILED;
8633 data[0] = 1;
8634 }
Michael Chanca430072005-05-29 14:57:23 -07008635 if (tg3_test_link(tp) != 0) {
8636 etest->flags |= ETH_TEST_FL_FAILED;
8637 data[1] = 1;
8638 }
Michael Chana71116d2005-05-29 14:58:11 -07008639 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chanec41c7d2006-01-17 02:40:55 -08008640 int err, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -07008641
Michael Chanbbe832c2005-06-24 20:20:04 -07008642 if (netif_running(dev)) {
8643 tg3_netif_stop(tp);
8644 irq_sync = 1;
8645 }
8646
8647 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -07008648
8649 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -08008650 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008651 tg3_halt_cpu(tp, RX_CPU_BASE);
8652 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8653 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08008654 if (!err)
8655 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008656
Michael Chand9ab5ad2006-03-20 22:27:35 -08008657 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8658 tg3_phy_reset(tp);
8659
Michael Chana71116d2005-05-29 14:58:11 -07008660 if (tg3_test_registers(tp) != 0) {
8661 etest->flags |= ETH_TEST_FL_FAILED;
8662 data[2] = 1;
8663 }
Michael Chan7942e1d2005-05-29 14:58:36 -07008664 if (tg3_test_memory(tp) != 0) {
8665 etest->flags |= ETH_TEST_FL_FAILED;
8666 data[3] = 1;
8667 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008668 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -07008669 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -07008670
David S. Millerf47c11e2005-06-24 20:18:35 -07008671 tg3_full_unlock(tp);
8672
Michael Chand4bc3922005-05-29 14:59:20 -07008673 if (tg3_test_interrupt(tp) != 0) {
8674 etest->flags |= ETH_TEST_FL_FAILED;
8675 data[5] = 1;
8676 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008677
8678 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -07008679
Michael Chana71116d2005-05-29 14:58:11 -07008680 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8681 if (netif_running(dev)) {
8682 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008683 tg3_init_hw(tp, 1);
Michael Chana71116d2005-05-29 14:58:11 -07008684 tg3_netif_start(tp);
8685 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008686
8687 tg3_full_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008688 }
Michael Chanbc1c7562006-03-20 17:48:03 -08008689 if (tp->link_config.phy_is_low_power)
8690 tg3_set_power_state(tp, PCI_D3hot);
8691
Michael Chan4cafd3f2005-05-29 14:56:34 -07008692}
8693
Linus Torvalds1da177e2005-04-16 15:20:36 -07008694static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8695{
8696 struct mii_ioctl_data *data = if_mii(ifr);
8697 struct tg3 *tp = netdev_priv(dev);
8698 int err;
8699
8700 switch(cmd) {
8701 case SIOCGMIIPHY:
8702 data->phy_id = PHY_ADDR;
8703
8704 /* fallthru */
8705 case SIOCGMIIREG: {
8706 u32 mii_regval;
8707
8708 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8709 break; /* We have no PHY */
8710
Michael Chanbc1c7562006-03-20 17:48:03 -08008711 if (tp->link_config.phy_is_low_power)
8712 return -EAGAIN;
8713
David S. Millerf47c11e2005-06-24 20:18:35 -07008714 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008715 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -07008716 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008717
8718 data->val_out = mii_regval;
8719
8720 return err;
8721 }
8722
8723 case SIOCSMIIREG:
8724 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8725 break; /* We have no PHY */
8726
8727 if (!capable(CAP_NET_ADMIN))
8728 return -EPERM;
8729
Michael Chanbc1c7562006-03-20 17:48:03 -08008730 if (tp->link_config.phy_is_low_power)
8731 return -EAGAIN;
8732
David S. Millerf47c11e2005-06-24 20:18:35 -07008733 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008734 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -07008735 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008736
8737 return err;
8738
8739 default:
8740 /* do nothing */
8741 break;
8742 }
8743 return -EOPNOTSUPP;
8744}
8745
8746#if TG3_VLAN_TAG_USED
8747static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8748{
8749 struct tg3 *tp = netdev_priv(dev);
8750
David S. Millerf47c11e2005-06-24 20:18:35 -07008751 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008752
8753 tp->vlgrp = grp;
8754
8755 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8756 __tg3_set_rx_mode(dev);
8757
David S. Millerf47c11e2005-06-24 20:18:35 -07008758 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008759}
8760
8761static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8762{
8763 struct tg3 *tp = netdev_priv(dev);
8764
David S. Millerf47c11e2005-06-24 20:18:35 -07008765 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008766 if (tp->vlgrp)
8767 tp->vlgrp->vlan_devices[vid] = NULL;
David S. Millerf47c11e2005-06-24 20:18:35 -07008768 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008769}
8770#endif
8771
David S. Miller15f98502005-05-18 22:49:26 -07008772static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8773{
8774 struct tg3 *tp = netdev_priv(dev);
8775
8776 memcpy(ec, &tp->coal, sizeof(*ec));
8777 return 0;
8778}
8779
Michael Chand244c892005-07-05 14:42:33 -07008780static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8781{
8782 struct tg3 *tp = netdev_priv(dev);
8783 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8784 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8785
8786 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8787 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8788 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8789 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8790 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8791 }
8792
8793 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8794 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8795 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8796 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8797 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8798 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8799 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8800 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8801 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8802 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8803 return -EINVAL;
8804
8805 /* No rx interrupts will be generated if both are zero */
8806 if ((ec->rx_coalesce_usecs == 0) &&
8807 (ec->rx_max_coalesced_frames == 0))
8808 return -EINVAL;
8809
8810 /* No tx interrupts will be generated if both are zero */
8811 if ((ec->tx_coalesce_usecs == 0) &&
8812 (ec->tx_max_coalesced_frames == 0))
8813 return -EINVAL;
8814
8815 /* Only copy relevant parameters, ignore all others. */
8816 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8817 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8818 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8819 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8820 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8821 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8822 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8823 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8824 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8825
8826 if (netif_running(dev)) {
8827 tg3_full_lock(tp, 0);
8828 __tg3_set_coalesce(tp, &tp->coal);
8829 tg3_full_unlock(tp);
8830 }
8831 return 0;
8832}
8833
Linus Torvalds1da177e2005-04-16 15:20:36 -07008834static struct ethtool_ops tg3_ethtool_ops = {
8835 .get_settings = tg3_get_settings,
8836 .set_settings = tg3_set_settings,
8837 .get_drvinfo = tg3_get_drvinfo,
8838 .get_regs_len = tg3_get_regs_len,
8839 .get_regs = tg3_get_regs,
8840 .get_wol = tg3_get_wol,
8841 .set_wol = tg3_set_wol,
8842 .get_msglevel = tg3_get_msglevel,
8843 .set_msglevel = tg3_set_msglevel,
8844 .nway_reset = tg3_nway_reset,
8845 .get_link = ethtool_op_get_link,
8846 .get_eeprom_len = tg3_get_eeprom_len,
8847 .get_eeprom = tg3_get_eeprom,
8848 .set_eeprom = tg3_set_eeprom,
8849 .get_ringparam = tg3_get_ringparam,
8850 .set_ringparam = tg3_set_ringparam,
8851 .get_pauseparam = tg3_get_pauseparam,
8852 .set_pauseparam = tg3_set_pauseparam,
8853 .get_rx_csum = tg3_get_rx_csum,
8854 .set_rx_csum = tg3_set_rx_csum,
8855 .get_tx_csum = ethtool_op_get_tx_csum,
8856 .set_tx_csum = tg3_set_tx_csum,
8857 .get_sg = ethtool_op_get_sg,
8858 .set_sg = ethtool_op_set_sg,
8859#if TG3_TSO_SUPPORT != 0
8860 .get_tso = ethtool_op_get_tso,
8861 .set_tso = tg3_set_tso,
8862#endif
Michael Chan4cafd3f2005-05-29 14:56:34 -07008863 .self_test_count = tg3_get_test_count,
8864 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008865 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -07008866 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008867 .get_stats_count = tg3_get_stats_count,
8868 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -07008869 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -07008870 .set_coalesce = tg3_set_coalesce,
John W. Linville2ff43692005-09-12 14:44:20 -07008871 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008872};
8873
8874static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8875{
Michael Chan1b277772006-03-20 22:27:48 -08008876 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008877
8878 tp->nvram_size = EEPROM_CHIP_SIZE;
8879
Michael Chan18201802006-03-20 22:29:15 -08008880 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008881 return;
8882
Michael Chan1b277772006-03-20 22:27:48 -08008883 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008884 return;
8885
8886 /*
8887 * Size the chip by reading offsets at increasing powers of two.
8888 * When we encounter our validation signature, we know the addressing
8889 * has wrapped around, and thus have our chip size.
8890 */
Michael Chan1b277772006-03-20 22:27:48 -08008891 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008892
8893 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -08008894 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008895 return;
8896
Michael Chan18201802006-03-20 22:29:15 -08008897 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008898 break;
8899
8900 cursize <<= 1;
8901 }
8902
8903 tp->nvram_size = cursize;
8904}
8905
8906static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8907{
8908 u32 val;
8909
Michael Chan18201802006-03-20 22:29:15 -08008910 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08008911 return;
8912
8913 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -08008914 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -08008915 tg3_get_eeprom_size(tp);
8916 return;
8917 }
8918
Linus Torvalds1da177e2005-04-16 15:20:36 -07008919 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8920 if (val != 0) {
8921 tp->nvram_size = (val >> 16) * 1024;
8922 return;
8923 }
8924 }
8925 tp->nvram_size = 0x20000;
8926}
8927
8928static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8929{
8930 u32 nvcfg1;
8931
8932 nvcfg1 = tr32(NVRAM_CFG1);
8933 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8934 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8935 }
8936 else {
8937 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8938 tw32(NVRAM_CFG1, nvcfg1);
8939 }
8940
Michael Chan4c987482005-09-05 17:52:38 -07008941 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -07008942 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008943 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8944 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8945 tp->nvram_jedecnum = JEDEC_ATMEL;
8946 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8947 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8948 break;
8949 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8950 tp->nvram_jedecnum = JEDEC_ATMEL;
8951 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8952 break;
8953 case FLASH_VENDOR_ATMEL_EEPROM:
8954 tp->nvram_jedecnum = JEDEC_ATMEL;
8955 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8956 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8957 break;
8958 case FLASH_VENDOR_ST:
8959 tp->nvram_jedecnum = JEDEC_ST;
8960 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8961 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8962 break;
8963 case FLASH_VENDOR_SAIFUN:
8964 tp->nvram_jedecnum = JEDEC_SAIFUN;
8965 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8966 break;
8967 case FLASH_VENDOR_SST_SMALL:
8968 case FLASH_VENDOR_SST_LARGE:
8969 tp->nvram_jedecnum = JEDEC_SST;
8970 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8971 break;
8972 }
8973 }
8974 else {
8975 tp->nvram_jedecnum = JEDEC_ATMEL;
8976 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8977 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8978 }
8979}
8980
Michael Chan361b4ac2005-04-21 17:11:21 -07008981static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8982{
8983 u32 nvcfg1;
8984
8985 nvcfg1 = tr32(NVRAM_CFG1);
8986
Michael Chane6af3012005-04-21 17:12:05 -07008987 /* NVRAM protection for TPM */
8988 if (nvcfg1 & (1 << 27))
8989 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8990
Michael Chan361b4ac2005-04-21 17:11:21 -07008991 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8992 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8993 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8994 tp->nvram_jedecnum = JEDEC_ATMEL;
8995 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8996 break;
8997 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8998 tp->nvram_jedecnum = JEDEC_ATMEL;
8999 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9000 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9001 break;
9002 case FLASH_5752VENDOR_ST_M45PE10:
9003 case FLASH_5752VENDOR_ST_M45PE20:
9004 case FLASH_5752VENDOR_ST_M45PE40:
9005 tp->nvram_jedecnum = JEDEC_ST;
9006 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9007 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9008 break;
9009 }
9010
9011 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9012 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9013 case FLASH_5752PAGE_SIZE_256:
9014 tp->nvram_pagesize = 256;
9015 break;
9016 case FLASH_5752PAGE_SIZE_512:
9017 tp->nvram_pagesize = 512;
9018 break;
9019 case FLASH_5752PAGE_SIZE_1K:
9020 tp->nvram_pagesize = 1024;
9021 break;
9022 case FLASH_5752PAGE_SIZE_2K:
9023 tp->nvram_pagesize = 2048;
9024 break;
9025 case FLASH_5752PAGE_SIZE_4K:
9026 tp->nvram_pagesize = 4096;
9027 break;
9028 case FLASH_5752PAGE_SIZE_264:
9029 tp->nvram_pagesize = 264;
9030 break;
9031 }
9032 }
9033 else {
9034 /* For eeprom, set pagesize to maximum eeprom size */
9035 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9036
9037 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9038 tw32(NVRAM_CFG1, nvcfg1);
9039 }
9040}
9041
Michael Chand3c7b882006-03-23 01:28:25 -08009042static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9043{
9044 u32 nvcfg1;
9045
9046 nvcfg1 = tr32(NVRAM_CFG1);
9047
9048 /* NVRAM protection for TPM */
9049 if (nvcfg1 & (1 << 27))
9050 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9051
9052 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9053 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9054 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9055 tp->nvram_jedecnum = JEDEC_ATMEL;
9056 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9057 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9058
9059 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9060 tw32(NVRAM_CFG1, nvcfg1);
9061 break;
9062 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9063 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9064 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9065 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9066 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9067 tp->nvram_jedecnum = JEDEC_ATMEL;
9068 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9069 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9070 tp->nvram_pagesize = 264;
9071 break;
9072 case FLASH_5752VENDOR_ST_M45PE10:
9073 case FLASH_5752VENDOR_ST_M45PE20:
9074 case FLASH_5752VENDOR_ST_M45PE40:
9075 tp->nvram_jedecnum = JEDEC_ST;
9076 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9077 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9078 tp->nvram_pagesize = 256;
9079 break;
9080 }
9081}
9082
Michael Chan1b277772006-03-20 22:27:48 -08009083static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9084{
9085 u32 nvcfg1;
9086
9087 nvcfg1 = tr32(NVRAM_CFG1);
9088
9089 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9090 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9091 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9092 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9093 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9094 tp->nvram_jedecnum = JEDEC_ATMEL;
9095 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9096 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9097
9098 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9099 tw32(NVRAM_CFG1, nvcfg1);
9100 break;
9101 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9102 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9103 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9104 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9105 tp->nvram_jedecnum = JEDEC_ATMEL;
9106 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9107 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9108 tp->nvram_pagesize = 264;
9109 break;
9110 case FLASH_5752VENDOR_ST_M45PE10:
9111 case FLASH_5752VENDOR_ST_M45PE20:
9112 case FLASH_5752VENDOR_ST_M45PE40:
9113 tp->nvram_jedecnum = JEDEC_ST;
9114 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9115 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9116 tp->nvram_pagesize = 256;
9117 break;
9118 }
9119}
9120
Linus Torvalds1da177e2005-04-16 15:20:36 -07009121/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9122static void __devinit tg3_nvram_init(struct tg3 *tp)
9123{
9124 int j;
9125
Linus Torvalds1da177e2005-04-16 15:20:36 -07009126 tw32_f(GRC_EEPROM_ADDR,
9127 (EEPROM_ADDR_FSM_RESET |
9128 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9129 EEPROM_ADDR_CLKPERD_SHIFT)));
9130
9131 /* XXX schedule_timeout() ... */
9132 for (j = 0; j < 100; j++)
9133 udelay(10);
9134
9135 /* Enable seeprom accesses. */
9136 tw32_f(GRC_LOCAL_CTRL,
9137 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9138 udelay(100);
9139
9140 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9141 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9142 tp->tg3_flags |= TG3_FLAG_NVRAM;
9143
Michael Chanec41c7d2006-01-17 02:40:55 -08009144 if (tg3_nvram_lock(tp)) {
9145 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9146 "tg3_nvram_init failed.\n", tp->dev->name);
9147 return;
9148 }
Michael Chane6af3012005-04-21 17:12:05 -07009149 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009150
Michael Chan361b4ac2005-04-21 17:11:21 -07009151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9152 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -08009153 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9154 tg3_get_5755_nvram_info(tp);
Michael Chan1b277772006-03-20 22:27:48 -08009155 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9156 tg3_get_5787_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -07009157 else
9158 tg3_get_nvram_info(tp);
9159
Linus Torvalds1da177e2005-04-16 15:20:36 -07009160 tg3_get_nvram_size(tp);
9161
Michael Chane6af3012005-04-21 17:12:05 -07009162 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -08009163 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009164
9165 } else {
9166 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9167
9168 tg3_get_eeprom_size(tp);
9169 }
9170}
9171
9172static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9173 u32 offset, u32 *val)
9174{
9175 u32 tmp;
9176 int i;
9177
9178 if (offset > EEPROM_ADDR_ADDR_MASK ||
9179 (offset % 4) != 0)
9180 return -EINVAL;
9181
9182 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9183 EEPROM_ADDR_DEVID_MASK |
9184 EEPROM_ADDR_READ);
9185 tw32(GRC_EEPROM_ADDR,
9186 tmp |
9187 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9188 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9189 EEPROM_ADDR_ADDR_MASK) |
9190 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9191
9192 for (i = 0; i < 10000; i++) {
9193 tmp = tr32(GRC_EEPROM_ADDR);
9194
9195 if (tmp & EEPROM_ADDR_COMPLETE)
9196 break;
9197 udelay(100);
9198 }
9199 if (!(tmp & EEPROM_ADDR_COMPLETE))
9200 return -EBUSY;
9201
9202 *val = tr32(GRC_EEPROM_DATA);
9203 return 0;
9204}
9205
9206#define NVRAM_CMD_TIMEOUT 10000
9207
9208static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9209{
9210 int i;
9211
9212 tw32(NVRAM_CMD, nvram_cmd);
9213 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9214 udelay(10);
9215 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9216 udelay(10);
9217 break;
9218 }
9219 }
9220 if (i == NVRAM_CMD_TIMEOUT) {
9221 return -EBUSY;
9222 }
9223 return 0;
9224}
9225
Michael Chan18201802006-03-20 22:29:15 -08009226static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9227{
9228 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9229 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9230 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9231 (tp->nvram_jedecnum == JEDEC_ATMEL))
9232
9233 addr = ((addr / tp->nvram_pagesize) <<
9234 ATMEL_AT45DB0X1B_PAGE_POS) +
9235 (addr % tp->nvram_pagesize);
9236
9237 return addr;
9238}
9239
Michael Chanc4e65752006-03-20 22:29:32 -08009240static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9241{
9242 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9243 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9244 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9245 (tp->nvram_jedecnum == JEDEC_ATMEL))
9246
9247 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9248 tp->nvram_pagesize) +
9249 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9250
9251 return addr;
9252}
9253
Linus Torvalds1da177e2005-04-16 15:20:36 -07009254static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9255{
9256 int ret;
9257
Linus Torvalds1da177e2005-04-16 15:20:36 -07009258 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9259 return tg3_nvram_read_using_eeprom(tp, offset, val);
9260
Michael Chan18201802006-03-20 22:29:15 -08009261 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009262
9263 if (offset > NVRAM_ADDR_MSK)
9264 return -EINVAL;
9265
Michael Chanec41c7d2006-01-17 02:40:55 -08009266 ret = tg3_nvram_lock(tp);
9267 if (ret)
9268 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009269
Michael Chane6af3012005-04-21 17:12:05 -07009270 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009271
9272 tw32(NVRAM_ADDR, offset);
9273 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9274 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9275
9276 if (ret == 0)
9277 *val = swab32(tr32(NVRAM_RDDATA));
9278
Michael Chane6af3012005-04-21 17:12:05 -07009279 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009280
Michael Chan381291b2005-12-13 21:08:21 -08009281 tg3_nvram_unlock(tp);
9282
Linus Torvalds1da177e2005-04-16 15:20:36 -07009283 return ret;
9284}
9285
Michael Chan18201802006-03-20 22:29:15 -08009286static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9287{
9288 int err;
9289 u32 tmp;
9290
9291 err = tg3_nvram_read(tp, offset, &tmp);
9292 *val = swab32(tmp);
9293 return err;
9294}
9295
Linus Torvalds1da177e2005-04-16 15:20:36 -07009296static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9297 u32 offset, u32 len, u8 *buf)
9298{
9299 int i, j, rc = 0;
9300 u32 val;
9301
9302 for (i = 0; i < len; i += 4) {
9303 u32 addr, data;
9304
9305 addr = offset + i;
9306
9307 memcpy(&data, buf + i, 4);
9308
9309 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9310
9311 val = tr32(GRC_EEPROM_ADDR);
9312 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9313
9314 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9315 EEPROM_ADDR_READ);
9316 tw32(GRC_EEPROM_ADDR, val |
9317 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9318 (addr & EEPROM_ADDR_ADDR_MASK) |
9319 EEPROM_ADDR_START |
9320 EEPROM_ADDR_WRITE);
9321
9322 for (j = 0; j < 10000; j++) {
9323 val = tr32(GRC_EEPROM_ADDR);
9324
9325 if (val & EEPROM_ADDR_COMPLETE)
9326 break;
9327 udelay(100);
9328 }
9329 if (!(val & EEPROM_ADDR_COMPLETE)) {
9330 rc = -EBUSY;
9331 break;
9332 }
9333 }
9334
9335 return rc;
9336}
9337
9338/* offset and length are dword aligned */
9339static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9340 u8 *buf)
9341{
9342 int ret = 0;
9343 u32 pagesize = tp->nvram_pagesize;
9344 u32 pagemask = pagesize - 1;
9345 u32 nvram_cmd;
9346 u8 *tmp;
9347
9348 tmp = kmalloc(pagesize, GFP_KERNEL);
9349 if (tmp == NULL)
9350 return -ENOMEM;
9351
9352 while (len) {
9353 int j;
Michael Chane6af3012005-04-21 17:12:05 -07009354 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009355
9356 phy_addr = offset & ~pagemask;
9357
9358 for (j = 0; j < pagesize; j += 4) {
9359 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9360 (u32 *) (tmp + j))))
9361 break;
9362 }
9363 if (ret)
9364 break;
9365
9366 page_off = offset & pagemask;
9367 size = pagesize;
9368 if (len < size)
9369 size = len;
9370
9371 len -= size;
9372
9373 memcpy(tmp + page_off, buf, size);
9374
9375 offset = offset + (pagesize - page_off);
9376
Michael Chane6af3012005-04-21 17:12:05 -07009377 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009378
9379 /*
9380 * Before we can erase the flash page, we need
9381 * to issue a special "write enable" command.
9382 */
9383 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9384
9385 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9386 break;
9387
9388 /* Erase the target page */
9389 tw32(NVRAM_ADDR, phy_addr);
9390
9391 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9392 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9393
9394 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9395 break;
9396
9397 /* Issue another write enable to start the write. */
9398 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9399
9400 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9401 break;
9402
9403 for (j = 0; j < pagesize; j += 4) {
9404 u32 data;
9405
9406 data = *((u32 *) (tmp + j));
9407 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9408
9409 tw32(NVRAM_ADDR, phy_addr + j);
9410
9411 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9412 NVRAM_CMD_WR;
9413
9414 if (j == 0)
9415 nvram_cmd |= NVRAM_CMD_FIRST;
9416 else if (j == (pagesize - 4))
9417 nvram_cmd |= NVRAM_CMD_LAST;
9418
9419 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9420 break;
9421 }
9422 if (ret)
9423 break;
9424 }
9425
9426 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9427 tg3_nvram_exec_cmd(tp, nvram_cmd);
9428
9429 kfree(tmp);
9430
9431 return ret;
9432}
9433
9434/* offset and length are dword aligned */
9435static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9436 u8 *buf)
9437{
9438 int i, ret = 0;
9439
9440 for (i = 0; i < len; i += 4, offset += 4) {
9441 u32 data, page_off, phy_addr, nvram_cmd;
9442
9443 memcpy(&data, buf + i, 4);
9444 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9445
9446 page_off = offset % tp->nvram_pagesize;
9447
Michael Chan18201802006-03-20 22:29:15 -08009448 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009449
9450 tw32(NVRAM_ADDR, phy_addr);
9451
9452 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9453
9454 if ((page_off == 0) || (i == 0))
9455 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -07009456 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009457 nvram_cmd |= NVRAM_CMD_LAST;
9458
9459 if (i == (len - 4))
9460 nvram_cmd |= NVRAM_CMD_LAST;
9461
Michael Chan4c987482005-09-05 17:52:38 -07009462 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -08009463 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -08009464 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Michael Chan4c987482005-09-05 17:52:38 -07009465 (tp->nvram_jedecnum == JEDEC_ST) &&
9466 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009467
9468 if ((ret = tg3_nvram_exec_cmd(tp,
9469 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9470 NVRAM_CMD_DONE)))
9471
9472 break;
9473 }
9474 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9475 /* We always do complete word writes to eeprom. */
9476 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9477 }
9478
9479 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9480 break;
9481 }
9482 return ret;
9483}
9484
9485/* offset and length are dword aligned */
9486static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9487{
9488 int ret;
9489
Linus Torvalds1da177e2005-04-16 15:20:36 -07009490 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07009491 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9492 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009493 udelay(40);
9494 }
9495
9496 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9497 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9498 }
9499 else {
9500 u32 grc_mode;
9501
Michael Chanec41c7d2006-01-17 02:40:55 -08009502 ret = tg3_nvram_lock(tp);
9503 if (ret)
9504 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009505
Michael Chane6af3012005-04-21 17:12:05 -07009506 tg3_enable_nvram_access(tp);
9507 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9508 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009509 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009510
9511 grc_mode = tr32(GRC_MODE);
9512 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9513
9514 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9515 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9516
9517 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9518 buf);
9519 }
9520 else {
9521 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9522 buf);
9523 }
9524
9525 grc_mode = tr32(GRC_MODE);
9526 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9527
Michael Chane6af3012005-04-21 17:12:05 -07009528 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009529 tg3_nvram_unlock(tp);
9530 }
9531
9532 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07009533 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009534 udelay(40);
9535 }
9536
9537 return ret;
9538}
9539
9540struct subsys_tbl_ent {
9541 u16 subsys_vendor, subsys_devid;
9542 u32 phy_id;
9543};
9544
9545static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9546 /* Broadcom boards. */
9547 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9548 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9549 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9550 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9551 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9552 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9553 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9554 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9555 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9556 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9557 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9558
9559 /* 3com boards. */
9560 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9561 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9562 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9563 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9564 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9565
9566 /* DELL boards. */
9567 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9568 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9569 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9570 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9571
9572 /* Compaq boards. */
9573 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9574 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9575 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9576 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9577 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9578
9579 /* IBM boards. */
9580 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9581};
9582
9583static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9584{
9585 int i;
9586
9587 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9588 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9589 tp->pdev->subsystem_vendor) &&
9590 (subsys_id_to_phy_id[i].subsys_devid ==
9591 tp->pdev->subsystem_device))
9592 return &subsys_id_to_phy_id[i];
9593 }
9594 return NULL;
9595}
9596
Michael Chan7d0c41e2005-04-21 17:06:20 -07009597static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009598{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009599 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -08009600 u16 pmcsr;
9601
9602 /* On some early chips the SRAM cannot be accessed in D3hot state,
9603 * so need make sure we're in D0.
9604 */
9605 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9606 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9607 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9608 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -07009609
9610 /* Make sure register accesses (indirect or otherwise)
9611 * will function correctly.
9612 */
9613 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9614 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009615
David S. Millerf49639e2006-06-09 11:58:36 -07009616 /* The memory arbiter has to be enabled in order for SRAM accesses
9617 * to succeed. Normally on powerup the tg3 chip firmware will make
9618 * sure it is enabled, but other entities such as system netboot
9619 * code might disable it.
9620 */
9621 val = tr32(MEMARB_MODE);
9622 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9623
Linus Torvalds1da177e2005-04-16 15:20:36 -07009624 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -07009625 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9626
David S. Millerf49639e2006-06-09 11:58:36 -07009627 /* Assume an onboard device by default. */
9628 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
David S. Miller72b845e2006-03-14 14:11:48 -08009629
Linus Torvalds1da177e2005-04-16 15:20:36 -07009630 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9631 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9632 u32 nic_cfg, led_cfg;
Michael Chan7d0c41e2005-04-21 17:06:20 -07009633 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9634 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009635
9636 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9637 tp->nic_sram_data_cfg = nic_cfg;
9638
9639 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9640 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9641 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9642 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9643 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9644 (ver > 0) && (ver < 0x100))
9645 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9646
Linus Torvalds1da177e2005-04-16 15:20:36 -07009647 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9648 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9649 eeprom_phy_serdes = 1;
9650
9651 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9652 if (nic_phy_id != 0) {
9653 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9654 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9655
9656 eeprom_phy_id = (id1 >> 16) << 10;
9657 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9658 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9659 } else
9660 eeprom_phy_id = 0;
9661
Michael Chan7d0c41e2005-04-21 17:06:20 -07009662 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -07009663 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -07009664 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -07009665 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9666 else
9667 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9668 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07009669
John W. Linvillecbf46852005-04-21 17:01:29 -07009670 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009671 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9672 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -07009673 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07009674 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9675
9676 switch (led_cfg) {
9677 default:
9678 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9679 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9680 break;
9681
9682 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9683 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9684 break;
9685
9686 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9687 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -07009688
9689 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9690 * read on some older 5700/5701 bootcode.
9691 */
9692 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9693 ASIC_REV_5700 ||
9694 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9695 ASIC_REV_5701)
9696 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9697
Linus Torvalds1da177e2005-04-16 15:20:36 -07009698 break;
9699
9700 case SHASTA_EXT_LED_SHARED:
9701 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9702 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9703 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9704 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9705 LED_CTRL_MODE_PHY_2);
9706 break;
9707
9708 case SHASTA_EXT_LED_MAC:
9709 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9710 break;
9711
9712 case SHASTA_EXT_LED_COMBO:
9713 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9714 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9715 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9716 LED_CTRL_MODE_PHY_2);
9717 break;
9718
9719 };
9720
9721 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9722 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9723 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9724 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9725
Michael Chanbbadf502006-04-06 21:46:34 -07009726 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009727 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
David S. Millerf49639e2006-06-09 11:58:36 -07009728 else
9729 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009730
9731 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9732 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07009733 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009734 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9735 }
9736 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9737 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9738
9739 if (cfg2 & (1 << 17))
9740 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9741
9742 /* serdes signal pre-emphasis in register 0x590 set by */
9743 /* bootcode if bit 18 is set */
9744 if (cfg2 & (1 << 18))
9745 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9746 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07009747}
9748
9749static int __devinit tg3_phy_probe(struct tg3 *tp)
9750{
9751 u32 hw_phy_id_1, hw_phy_id_2;
9752 u32 hw_phy_id, hw_phy_id_masked;
9753 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009754
9755 /* Reading the PHY ID register can conflict with ASF
9756 * firwmare access to the PHY hardware.
9757 */
9758 err = 0;
9759 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9760 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9761 } else {
9762 /* Now read the physical PHY_ID from the chip and verify
9763 * that it is sane. If it doesn't look good, we fall back
9764 * to either the hard-coded table based PHY_ID and failing
9765 * that the value found in the eeprom area.
9766 */
9767 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9768 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9769
9770 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9771 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9772 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9773
9774 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9775 }
9776
9777 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9778 tp->phy_id = hw_phy_id;
9779 if (hw_phy_id_masked == PHY_ID_BCM8002)
9780 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -07009781 else
9782 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009783 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -07009784 if (tp->phy_id != PHY_ID_INVALID) {
9785 /* Do nothing, phy ID already set up in
9786 * tg3_get_eeprom_hw_cfg().
9787 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009788 } else {
9789 struct subsys_tbl_ent *p;
9790
9791 /* No eeprom signature? Try the hardcoded
9792 * subsys device table.
9793 */
9794 p = lookup_by_subsys(tp);
9795 if (!p)
9796 return -ENODEV;
9797
9798 tp->phy_id = p->phy_id;
9799 if (!tp->phy_id ||
9800 tp->phy_id == PHY_ID_BCM8002)
9801 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9802 }
9803 }
9804
Michael Chan747e8f82005-07-25 12:33:22 -07009805 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009806 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9807 u32 bmsr, adv_reg, tg3_ctrl;
9808
9809 tg3_readphy(tp, MII_BMSR, &bmsr);
9810 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9811 (bmsr & BMSR_LSTATUS))
9812 goto skip_phy_reset;
9813
9814 err = tg3_phy_reset(tp);
9815 if (err)
9816 return err;
9817
9818 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9819 ADVERTISE_100HALF | ADVERTISE_100FULL |
9820 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9821 tg3_ctrl = 0;
9822 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9823 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9824 MII_TG3_CTRL_ADV_1000_FULL);
9825 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9826 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9827 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9828 MII_TG3_CTRL_ENABLE_AS_MASTER);
9829 }
9830
9831 if (!tg3_copper_is_advertising_all(tp)) {
9832 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9833
9834 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9835 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9836
9837 tg3_writephy(tp, MII_BMCR,
9838 BMCR_ANENABLE | BMCR_ANRESTART);
9839 }
9840 tg3_phy_set_wirespeed(tp);
9841
9842 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9843 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9844 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9845 }
9846
9847skip_phy_reset:
9848 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9849 err = tg3_init_5401phy_dsp(tp);
9850 if (err)
9851 return err;
9852 }
9853
9854 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9855 err = tg3_init_5401phy_dsp(tp);
9856 }
9857
Michael Chan747e8f82005-07-25 12:33:22 -07009858 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009859 tp->link_config.advertising =
9860 (ADVERTISED_1000baseT_Half |
9861 ADVERTISED_1000baseT_Full |
9862 ADVERTISED_Autoneg |
9863 ADVERTISED_FIBRE);
9864 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9865 tp->link_config.advertising &=
9866 ~(ADVERTISED_1000baseT_Half |
9867 ADVERTISED_1000baseT_Full);
9868
9869 return err;
9870}
9871
9872static void __devinit tg3_read_partno(struct tg3 *tp)
9873{
9874 unsigned char vpd_data[256];
9875 int i;
Michael Chan1b277772006-03-20 22:27:48 -08009876 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009877
Michael Chan18201802006-03-20 22:29:15 -08009878 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -07009879 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009880
Michael Chan18201802006-03-20 22:29:15 -08009881 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -08009882 for (i = 0; i < 256; i += 4) {
9883 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009884
Michael Chan1b277772006-03-20 22:27:48 -08009885 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9886 goto out_not_found;
9887
9888 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9889 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9890 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9891 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9892 }
9893 } else {
9894 int vpd_cap;
9895
9896 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9897 for (i = 0; i < 256; i += 4) {
9898 u32 tmp, j = 0;
9899 u16 tmp16;
9900
9901 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9902 i);
9903 while (j++ < 100) {
9904 pci_read_config_word(tp->pdev, vpd_cap +
9905 PCI_VPD_ADDR, &tmp16);
9906 if (tmp16 & 0x8000)
9907 break;
9908 msleep(1);
9909 }
David S. Millerf49639e2006-06-09 11:58:36 -07009910 if (!(tmp16 & 0x8000))
9911 goto out_not_found;
9912
Michael Chan1b277772006-03-20 22:27:48 -08009913 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9914 &tmp);
9915 tmp = cpu_to_le32(tmp);
9916 memcpy(&vpd_data[i], &tmp, 4);
9917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009918 }
9919
9920 /* Now parse and find the part number. */
9921 for (i = 0; i < 256; ) {
9922 unsigned char val = vpd_data[i];
9923 int block_end;
9924
9925 if (val == 0x82 || val == 0x91) {
9926 i = (i + 3 +
9927 (vpd_data[i + 1] +
9928 (vpd_data[i + 2] << 8)));
9929 continue;
9930 }
9931
9932 if (val != 0x90)
9933 goto out_not_found;
9934
9935 block_end = (i + 3 +
9936 (vpd_data[i + 1] +
9937 (vpd_data[i + 2] << 8)));
9938 i += 3;
9939 while (i < block_end) {
9940 if (vpd_data[i + 0] == 'P' &&
9941 vpd_data[i + 1] == 'N') {
9942 int partno_len = vpd_data[i + 2];
9943
9944 if (partno_len > 24)
9945 goto out_not_found;
9946
9947 memcpy(tp->board_part_number,
9948 &vpd_data[i + 3],
9949 partno_len);
9950
9951 /* Success. */
9952 return;
9953 }
9954 }
9955
9956 /* Part number not found. */
9957 goto out_not_found;
9958 }
9959
9960out_not_found:
9961 strcpy(tp->board_part_number, "none");
9962}
9963
Michael Chanc4e65752006-03-20 22:29:32 -08009964static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9965{
9966 u32 val, offset, start;
9967
9968 if (tg3_nvram_read_swab(tp, 0, &val))
9969 return;
9970
9971 if (val != TG3_EEPROM_MAGIC)
9972 return;
9973
9974 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9975 tg3_nvram_read_swab(tp, 0x4, &start))
9976 return;
9977
9978 offset = tg3_nvram_logical_addr(tp, offset);
9979 if (tg3_nvram_read_swab(tp, offset, &val))
9980 return;
9981
9982 if ((val & 0xfc000000) == 0x0c000000) {
9983 u32 ver_offset, addr;
9984 int i;
9985
9986 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9987 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9988 return;
9989
9990 if (val != 0)
9991 return;
9992
9993 addr = offset + ver_offset - start;
9994 for (i = 0; i < 16; i += 4) {
9995 if (tg3_nvram_read(tp, addr + i, &val))
9996 return;
9997
9998 val = cpu_to_le32(val);
9999 memcpy(tp->fw_ver + i, &val, 4);
10000 }
10001 }
10002}
10003
Linus Torvalds1da177e2005-04-16 15:20:36 -070010004static int __devinit tg3_get_invariants(struct tg3 *tp)
10005{
10006 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010007 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10008 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
Michael Chan399de502005-10-03 14:02:39 -070010009 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10010 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010011 { },
10012 };
10013 u32 misc_ctrl_reg;
10014 u32 cacheline_sz_reg;
10015 u32 pci_state_reg, grc_misc_cfg;
10016 u32 val;
10017 u16 pci_cmd;
10018 int err;
10019
Linus Torvalds1da177e2005-04-16 15:20:36 -070010020 /* Force memory write invalidate off. If we leave it on,
10021 * then on 5700_BX chips we have to enable a workaround.
10022 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10023 * to match the cacheline size. The Broadcom driver have this
10024 * workaround but turns MWI off all the times so never uses
10025 * it. This seems to suggest that the workaround is insufficient.
10026 */
10027 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10028 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10029 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10030
10031 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10032 * has the register indirect write enable bit set before
10033 * we try to access any of the MMIO registers. It is also
10034 * critical that the PCI-X hw workaround situation is decided
10035 * before that as well.
10036 */
10037 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10038 &misc_ctrl_reg);
10039
10040 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10041 MISC_HOST_CTRL_CHIPREV_SHIFT);
10042
Michael Chanff645be2005-04-21 17:09:53 -070010043 /* Wrong chip ID in 5752 A0. This code can be removed later
10044 * as A0 is not in production.
10045 */
10046 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10047 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10048
Michael Chan68929142005-08-09 20:17:14 -070010049 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10050 * we need to disable memory and use config. cycles
10051 * only to access all registers. The 5702/03 chips
10052 * can mistakenly decode the special cycles from the
10053 * ICH chipsets as memory write cycles, causing corruption
10054 * of register and memory space. Only certain ICH bridges
10055 * will drive special cycles with non-zero data during the
10056 * address phase which can fall within the 5703's address
10057 * range. This is not an ICH bug as the PCI spec allows
10058 * non-zero address during special cycles. However, only
10059 * these ICH bridges are known to drive non-zero addresses
10060 * during special cycles.
10061 *
10062 * Since special cycles do not cross PCI bridges, we only
10063 * enable this workaround if the 5703 is on the secondary
10064 * bus of these ICH bridges.
10065 */
10066 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10067 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10068 static struct tg3_dev_id {
10069 u32 vendor;
10070 u32 device;
10071 u32 rev;
10072 } ich_chipsets[] = {
10073 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10074 PCI_ANY_ID },
10075 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10076 PCI_ANY_ID },
10077 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10078 0xa },
10079 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10080 PCI_ANY_ID },
10081 { },
10082 };
10083 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10084 struct pci_dev *bridge = NULL;
10085
10086 while (pci_id->vendor != 0) {
10087 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10088 bridge);
10089 if (!bridge) {
10090 pci_id++;
10091 continue;
10092 }
10093 if (pci_id->rev != PCI_ANY_ID) {
10094 u8 rev;
10095
10096 pci_read_config_byte(bridge, PCI_REVISION_ID,
10097 &rev);
10098 if (rev > pci_id->rev)
10099 continue;
10100 }
10101 if (bridge->subordinate &&
10102 (bridge->subordinate->number ==
10103 tp->pdev->bus->number)) {
10104
10105 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10106 pci_dev_put(bridge);
10107 break;
10108 }
10109 }
10110 }
10111
Michael Chan4a29cc22006-03-19 13:21:12 -080010112 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10113 * DMA addresses > 40-bit. This bridge may have other additional
10114 * 57xx devices behind it in some 4-port NIC designs for example.
10115 * Any tg3 device found behind the bridge will also need the 40-bit
10116 * DMA workaround.
10117 */
Michael Chana4e2b342005-10-26 15:46:52 -070010118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10120 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080010121 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070010122 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070010123 }
Michael Chan4a29cc22006-03-19 13:21:12 -080010124 else {
10125 struct pci_dev *bridge = NULL;
10126
10127 do {
10128 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10129 PCI_DEVICE_ID_SERVERWORKS_EPB,
10130 bridge);
10131 if (bridge && bridge->subordinate &&
10132 (bridge->subordinate->number <=
10133 tp->pdev->bus->number) &&
10134 (bridge->subordinate->subordinate >=
10135 tp->pdev->bus->number)) {
10136 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10137 pci_dev_put(bridge);
10138 break;
10139 }
10140 } while (bridge);
10141 }
Michael Chan4cf78e42005-07-25 12:29:19 -070010142
Linus Torvalds1da177e2005-04-16 15:20:36 -070010143 /* Initialize misc host control in PCI block. */
10144 tp->misc_host_ctrl |= (misc_ctrl_reg &
10145 MISC_HOST_CTRL_CHIPREV);
10146 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10147 tp->misc_host_ctrl);
10148
10149 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10150 &cacheline_sz_reg);
10151
10152 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10153 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10154 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10155 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10156
John W. Linville2052da92005-04-21 16:56:08 -070010157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070010158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080010159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080010160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Michael Chana4e2b342005-10-26 15:46:52 -070010161 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070010162 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10163
John W. Linville1b440c562005-04-21 17:03:18 -070010164 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10165 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10166 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10167
Michael Chan5a6f3072006-03-20 22:28:05 -080010168 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080010169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
Michael Chan5a6f3072006-03-20 22:28:05 -080010171 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010172 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10173 } else
Michael Chan5a6f3072006-03-20 22:28:05 -080010174 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010176
Michael Chan0f893dc2005-07-25 12:30:38 -070010177 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10178 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
Michael Chand9ab5ad2006-03-20 22:27:35 -080010179 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080010180 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
Michael Chand9ab5ad2006-03-20 22:27:35 -080010181 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
Michael Chan0f893dc2005-07-25 12:30:38 -070010182 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10183
Linus Torvalds1da177e2005-04-16 15:20:36 -070010184 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10185 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10186
Michael Chan399de502005-10-03 14:02:39 -070010187 /* If we have an AMD 762 or VIA K8T800 chipset, write
10188 * reordering to the mailbox registers done by the host
10189 * controller can cause major troubles. We read back from
10190 * every mailbox register write to force the writes to be
10191 * posted to the chip in order.
10192 */
10193 if (pci_dev_present(write_reorder_chipsets) &&
10194 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10195 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10196
Linus Torvalds1da177e2005-04-16 15:20:36 -070010197 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10198 tp->pci_lat_timer < 64) {
10199 tp->pci_lat_timer = 64;
10200
10201 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10202 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10203 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10204 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10205
10206 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10207 cacheline_sz_reg);
10208 }
10209
10210 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10211 &pci_state_reg);
10212
10213 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10214 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10215
10216 /* If this is a 5700 BX chipset, and we are in PCI-X
10217 * mode, enable register write workaround.
10218 *
10219 * The workaround is to use indirect register accesses
10220 * for all chip writes not to mailbox registers.
10221 */
10222 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10223 u32 pm_reg;
10224 u16 pci_cmd;
10225
10226 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10227
10228 /* The chip can have it's power management PCI config
10229 * space registers clobbered due to this bug.
10230 * So explicitly force the chip into D0 here.
10231 */
10232 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10233 &pm_reg);
10234 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10235 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10236 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10237 pm_reg);
10238
10239 /* Also, force SERR#/PERR# in PCI command. */
10240 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10241 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10242 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10243 }
10244 }
10245
Michael Chan087fe252005-08-09 20:17:41 -070010246 /* 5700 BX chips need to have their TX producer index mailboxes
10247 * written twice to workaround a bug.
10248 */
10249 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10250 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10251
Linus Torvalds1da177e2005-04-16 15:20:36 -070010252 /* Back to back register writes can cause problems on this chip,
10253 * the workaround is to read back all reg writes except those to
10254 * mailbox regs. See tg3_write_indirect_reg32().
10255 *
10256 * PCI Express 5750_A0 rev chips need this workaround too.
10257 */
10258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10259 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10260 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10261 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10262
10263 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10264 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10265 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10266 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10267
10268 /* Chip-specific fixup from Broadcom driver */
10269 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10270 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10271 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10272 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10273 }
10274
Michael Chan1ee582d2005-08-09 20:16:46 -070010275 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070010276 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070010277 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070010278 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070010279 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070010280 tp->write32_tx_mbox = tg3_write32;
10281 tp->write32_rx_mbox = tg3_write32;
10282
10283 /* Various workaround register access methods */
10284 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10285 tp->write32 = tg3_write_indirect_reg32;
10286 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10287 tp->write32 = tg3_write_flush_reg32;
10288
10289 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10290 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10291 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10292 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10293 tp->write32_rx_mbox = tg3_write_flush_reg32;
10294 }
Michael Chan20094932005-08-09 20:16:32 -070010295
Michael Chan68929142005-08-09 20:17:14 -070010296 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10297 tp->read32 = tg3_read_indirect_reg32;
10298 tp->write32 = tg3_write_indirect_reg32;
10299 tp->read32_mbox = tg3_read_indirect_mbox;
10300 tp->write32_mbox = tg3_write_indirect_mbox;
10301 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10302 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10303
10304 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070010305 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070010306
10307 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10308 pci_cmd &= ~PCI_COMMAND_MEMORY;
10309 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10310 }
10311
Michael Chanbbadf502006-04-06 21:46:34 -070010312 if (tp->write32 == tg3_write_indirect_reg32 ||
10313 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10314 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070010315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070010316 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10317
Michael Chan7d0c41e2005-04-21 17:06:20 -070010318 /* Get eeprom hw config before calling tg3_set_power_state().
10319 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10320 * determined before calling tg3_set_power_state() so that
10321 * we know whether or not to switch out of Vaux power.
10322 * When the flag is set, it means that GPIO1 is used for eeprom
10323 * write protect and also implies that it is a LOM where GPIOs
10324 * are not used to switch power.
10325 */
10326 tg3_get_eeprom_hw_cfg(tp);
10327
Michael Chan314fba32005-04-21 17:07:04 -070010328 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10329 * GPIO1 driven high will bring 5700's external PHY out of reset.
10330 * It is also used as eeprom write protect on LOMs.
10331 */
10332 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10333 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10334 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10335 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10336 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070010337 /* Unused GPIO3 must be driven as output on 5752 because there
10338 * are no pull-up resistors on unused GPIO pins.
10339 */
10340 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10341 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070010342
Michael Chanaf36e6b2006-03-23 01:28:06 -080010343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10344 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10345
Linus Torvalds1da177e2005-04-16 15:20:36 -070010346 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080010347 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010348 if (err) {
10349 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10350 pci_name(tp->pdev));
10351 return err;
10352 }
10353
10354 /* 5700 B0 chips do not support checksumming correctly due
10355 * to hardware bugs.
10356 */
10357 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10358 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10359
Linus Torvalds1da177e2005-04-16 15:20:36 -070010360 /* Derive initial jumbo mode from MTU assigned in
10361 * ether_setup() via the alloc_etherdev() call
10362 */
Michael Chan0f893dc2005-07-25 12:30:38 -070010363 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070010364 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070010365 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010366
10367 /* Determine WakeOnLan speed to use. */
10368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10369 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10370 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10371 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10372 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10373 } else {
10374 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10375 }
10376
10377 /* A few boards don't want Ethernet@WireSpeed phy feature */
10378 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10379 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10380 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070010381 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10382 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010383 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10384
10385 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10386 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10387 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10388 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10389 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10390
Michael Chanc424cb22006-04-29 18:56:34 -070010391 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10394 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10395 else
10396 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010398
Linus Torvalds1da177e2005-04-16 15:20:36 -070010399 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010400 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10401 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10402 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10403
10404 /* Initialize MAC MI mode, polling disabled. */
10405 tw32_f(MAC_MI_MODE, tp->mi_mode);
10406 udelay(80);
10407
10408 /* Initialize data/descriptor byte/word swapping. */
10409 val = tr32(GRC_MODE);
10410 val &= GRC_MODE_HOST_STACKUP;
10411 tw32(GRC_MODE, val | tp->grc_mode);
10412
10413 tg3_switch_clocks(tp);
10414
10415 /* Clear this out for sanity. */
10416 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10417
10418 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10419 &pci_state_reg);
10420 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10421 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10422 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10423
10424 if (chiprevid == CHIPREV_ID_5701_A0 ||
10425 chiprevid == CHIPREV_ID_5701_B0 ||
10426 chiprevid == CHIPREV_ID_5701_B2 ||
10427 chiprevid == CHIPREV_ID_5701_B5) {
10428 void __iomem *sram_base;
10429
10430 /* Write some dummy words into the SRAM status block
10431 * area, see if it reads back correctly. If the return
10432 * value is bad, force enable the PCIX workaround.
10433 */
10434 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10435
10436 writel(0x00000000, sram_base);
10437 writel(0x00000000, sram_base + 4);
10438 writel(0xffffffff, sram_base + 4);
10439 if (readl(sram_base) != 0x00000000)
10440 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10441 }
10442 }
10443
10444 udelay(50);
10445 tg3_nvram_init(tp);
10446
10447 grc_misc_cfg = tr32(GRC_MISC_CFG);
10448 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10449
10450 /* Broadcom's driver says that CIOBE multisplit has a bug */
10451#if 0
10452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10453 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10454 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10455 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10456 }
10457#endif
10458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10459 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10460 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10461 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10462
David S. Millerfac9b832005-05-18 22:46:34 -070010463 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10464 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10465 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10466 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10467 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10468 HOSTCC_MODE_CLRTICK_TXBD);
10469
10470 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10471 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10472 tp->misc_host_ctrl);
10473 }
10474
Linus Torvalds1da177e2005-04-16 15:20:36 -070010475 /* these are limited to 10/100 only */
10476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10477 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10478 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10479 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10480 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10481 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10482 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10483 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10484 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10485 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10486 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10487
10488 err = tg3_phy_probe(tp);
10489 if (err) {
10490 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10491 pci_name(tp->pdev), err);
10492 /* ... but do not return immediately ... */
10493 }
10494
10495 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080010496 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010497
10498 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10499 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10500 } else {
10501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10502 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10503 else
10504 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10505 }
10506
10507 /* 5700 {AX,BX} chips have a broken status block link
10508 * change bit implementation, so we must use the
10509 * status register in those cases.
10510 */
10511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10512 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10513 else
10514 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10515
10516 /* The led_ctrl is set during tg3_phy_probe, here we might
10517 * have to force the link status polling mechanism based
10518 * upon subsystem IDs.
10519 */
10520 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10521 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10522 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10523 TG3_FLAG_USE_LINKCHG_REG);
10524 }
10525
10526 /* For all SERDES we poll the MAC status register. */
10527 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10528 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10529 else
10530 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10531
Michael Chan5a6f3072006-03-20 22:28:05 -080010532 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070010533 * straddle the 4GB address boundary in some cases.
10534 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080010535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10536 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan5a6f3072006-03-20 22:28:05 -080010537 tp->dev->hard_start_xmit = tg3_start_xmit;
10538 else
10539 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010540
10541 tp->rx_offset = 2;
10542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10543 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10544 tp->rx_offset = 0;
10545
10546 /* By default, disable wake-on-lan. User can change this
10547 * using ETHTOOL_SWOL.
10548 */
10549 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10550
10551 return err;
10552}
10553
10554#ifdef CONFIG_SPARC64
10555static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10556{
10557 struct net_device *dev = tp->dev;
10558 struct pci_dev *pdev = tp->pdev;
10559 struct pcidev_cookie *pcp = pdev->sysdata;
10560
10561 if (pcp != NULL) {
10562 int node = pcp->prom_node;
10563
10564 if (prom_getproplen(node, "local-mac-address") == 6) {
10565 prom_getproperty(node, "local-mac-address",
10566 dev->dev_addr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070010567 memcpy(dev->perm_addr, dev->dev_addr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010568 return 0;
10569 }
10570 }
10571 return -ENODEV;
10572}
10573
10574static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10575{
10576 struct net_device *dev = tp->dev;
10577
10578 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070010579 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010580 return 0;
10581}
10582#endif
10583
10584static int __devinit tg3_get_device_address(struct tg3 *tp)
10585{
10586 struct net_device *dev = tp->dev;
10587 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080010588 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010589
10590#ifdef CONFIG_SPARC64
10591 if (!tg3_get_macaddr_sparc(tp))
10592 return 0;
10593#endif
10594
10595 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070010596 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010597 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010598 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10599 mac_offset = 0xcc;
10600 if (tg3_nvram_lock(tp))
10601 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10602 else
10603 tg3_nvram_unlock(tp);
10604 }
10605
10606 /* First try to get it from MAC address mailbox. */
10607 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10608 if ((hi >> 16) == 0x484b) {
10609 dev->dev_addr[0] = (hi >> 8) & 0xff;
10610 dev->dev_addr[1] = (hi >> 0) & 0xff;
10611
10612 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10613 dev->dev_addr[2] = (lo >> 24) & 0xff;
10614 dev->dev_addr[3] = (lo >> 16) & 0xff;
10615 dev->dev_addr[4] = (lo >> 8) & 0xff;
10616 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010617
Michael Chan008652b2006-03-27 23:14:53 -080010618 /* Some old bootcode may report a 0 MAC address in SRAM */
10619 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10620 }
10621 if (!addr_ok) {
10622 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070010623 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080010624 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10625 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10626 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10627 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10628 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10629 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10630 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10631 }
10632 /* Finally just fetch it out of the MAC control regs. */
10633 else {
10634 hi = tr32(MAC_ADDR_0_HIGH);
10635 lo = tr32(MAC_ADDR_0_LOW);
10636
10637 dev->dev_addr[5] = lo & 0xff;
10638 dev->dev_addr[4] = (lo >> 8) & 0xff;
10639 dev->dev_addr[3] = (lo >> 16) & 0xff;
10640 dev->dev_addr[2] = (lo >> 24) & 0xff;
10641 dev->dev_addr[1] = hi & 0xff;
10642 dev->dev_addr[0] = (hi >> 8) & 0xff;
10643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010644 }
10645
10646 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10647#ifdef CONFIG_SPARC64
10648 if (!tg3_get_default_macaddr_sparc(tp))
10649 return 0;
10650#endif
10651 return -EINVAL;
10652 }
John W. Linville2ff43692005-09-12 14:44:20 -070010653 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010654 return 0;
10655}
10656
David S. Miller59e6b432005-05-18 22:50:10 -070010657#define BOUNDARY_SINGLE_CACHELINE 1
10658#define BOUNDARY_MULTI_CACHELINE 2
10659
10660static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10661{
10662 int cacheline_size;
10663 u8 byte;
10664 int goal;
10665
10666 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10667 if (byte == 0)
10668 cacheline_size = 1024;
10669 else
10670 cacheline_size = (int) byte * 4;
10671
10672 /* On 5703 and later chips, the boundary bits have no
10673 * effect.
10674 */
10675 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10676 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10677 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10678 goto out;
10679
10680#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10681 goal = BOUNDARY_MULTI_CACHELINE;
10682#else
10683#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10684 goal = BOUNDARY_SINGLE_CACHELINE;
10685#else
10686 goal = 0;
10687#endif
10688#endif
10689
10690 if (!goal)
10691 goto out;
10692
10693 /* PCI controllers on most RISC systems tend to disconnect
10694 * when a device tries to burst across a cache-line boundary.
10695 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10696 *
10697 * Unfortunately, for PCI-E there are only limited
10698 * write-side controls for this, and thus for reads
10699 * we will still get the disconnects. We'll also waste
10700 * these PCI cycles for both read and write for chips
10701 * other than 5700 and 5701 which do not implement the
10702 * boundary bits.
10703 */
10704 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10705 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10706 switch (cacheline_size) {
10707 case 16:
10708 case 32:
10709 case 64:
10710 case 128:
10711 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10712 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10713 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10714 } else {
10715 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10716 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10717 }
10718 break;
10719
10720 case 256:
10721 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10722 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10723 break;
10724
10725 default:
10726 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10727 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10728 break;
10729 };
10730 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10731 switch (cacheline_size) {
10732 case 16:
10733 case 32:
10734 case 64:
10735 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10736 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10737 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10738 break;
10739 }
10740 /* fallthrough */
10741 case 128:
10742 default:
10743 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10744 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10745 break;
10746 };
10747 } else {
10748 switch (cacheline_size) {
10749 case 16:
10750 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10751 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10752 DMA_RWCTRL_WRITE_BNDRY_16);
10753 break;
10754 }
10755 /* fallthrough */
10756 case 32:
10757 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10758 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10759 DMA_RWCTRL_WRITE_BNDRY_32);
10760 break;
10761 }
10762 /* fallthrough */
10763 case 64:
10764 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10765 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10766 DMA_RWCTRL_WRITE_BNDRY_64);
10767 break;
10768 }
10769 /* fallthrough */
10770 case 128:
10771 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10772 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10773 DMA_RWCTRL_WRITE_BNDRY_128);
10774 break;
10775 }
10776 /* fallthrough */
10777 case 256:
10778 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10779 DMA_RWCTRL_WRITE_BNDRY_256);
10780 break;
10781 case 512:
10782 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10783 DMA_RWCTRL_WRITE_BNDRY_512);
10784 break;
10785 case 1024:
10786 default:
10787 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10788 DMA_RWCTRL_WRITE_BNDRY_1024);
10789 break;
10790 };
10791 }
10792
10793out:
10794 return val;
10795}
10796
Linus Torvalds1da177e2005-04-16 15:20:36 -070010797static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10798{
10799 struct tg3_internal_buffer_desc test_desc;
10800 u32 sram_dma_descs;
10801 int i, ret;
10802
10803 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10804
10805 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10806 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10807 tw32(RDMAC_STATUS, 0);
10808 tw32(WDMAC_STATUS, 0);
10809
10810 tw32(BUFMGR_MODE, 0);
10811 tw32(FTQ_RESET, 0);
10812
10813 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10814 test_desc.addr_lo = buf_dma & 0xffffffff;
10815 test_desc.nic_mbuf = 0x00002100;
10816 test_desc.len = size;
10817
10818 /*
10819 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10820 * the *second* time the tg3 driver was getting loaded after an
10821 * initial scan.
10822 *
10823 * Broadcom tells me:
10824 * ...the DMA engine is connected to the GRC block and a DMA
10825 * reset may affect the GRC block in some unpredictable way...
10826 * The behavior of resets to individual blocks has not been tested.
10827 *
10828 * Broadcom noted the GRC reset will also reset all sub-components.
10829 */
10830 if (to_device) {
10831 test_desc.cqid_sqid = (13 << 8) | 2;
10832
10833 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10834 udelay(40);
10835 } else {
10836 test_desc.cqid_sqid = (16 << 8) | 7;
10837
10838 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10839 udelay(40);
10840 }
10841 test_desc.flags = 0x00000005;
10842
10843 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10844 u32 val;
10845
10846 val = *(((u32 *)&test_desc) + i);
10847 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10848 sram_dma_descs + (i * sizeof(u32)));
10849 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10850 }
10851 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10852
10853 if (to_device) {
10854 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10855 } else {
10856 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10857 }
10858
10859 ret = -ENODEV;
10860 for (i = 0; i < 40; i++) {
10861 u32 val;
10862
10863 if (to_device)
10864 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10865 else
10866 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10867 if ((val & 0xffff) == sram_dma_descs) {
10868 ret = 0;
10869 break;
10870 }
10871
10872 udelay(100);
10873 }
10874
10875 return ret;
10876}
10877
David S. Millerded73402005-05-23 13:59:47 -070010878#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070010879
10880static int __devinit tg3_test_dma(struct tg3 *tp)
10881{
10882 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070010883 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010884 int ret;
10885
10886 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10887 if (!buf) {
10888 ret = -ENOMEM;
10889 goto out_nofree;
10890 }
10891
10892 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10893 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10894
David S. Miller59e6b432005-05-18 22:50:10 -070010895 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010896
10897 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10898 /* DMA read watermark not used on PCIE */
10899 tp->dma_rwctrl |= 0x00180000;
10900 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070010901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010903 tp->dma_rwctrl |= 0x003f0000;
10904 else
10905 tp->dma_rwctrl |= 0x003f000f;
10906 } else {
10907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10909 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10910
Michael Chan4a29cc22006-03-19 13:21:12 -080010911 /* If the 5704 is behind the EPB bridge, we can
10912 * do the less restrictive ONE_DMA workaround for
10913 * better performance.
10914 */
10915 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10917 tp->dma_rwctrl |= 0x8000;
10918 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010919 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10920
David S. Miller59e6b432005-05-18 22:50:10 -070010921 /* Set bit 23 to enable PCIX hw bug fix */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010922 tp->dma_rwctrl |= 0x009f0000;
Michael Chan4cf78e42005-07-25 12:29:19 -070010923 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10924 /* 5780 always in PCIX mode */
10925 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070010926 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10927 /* 5714 always in PCIX mode */
10928 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010929 } else {
10930 tp->dma_rwctrl |= 0x001b000f;
10931 }
10932 }
10933
10934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10936 tp->dma_rwctrl &= 0xfffffff0;
10937
10938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10940 /* Remove this if it causes problems for some boards. */
10941 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10942
10943 /* On 5700/5701 chips, we need to set this bit.
10944 * Otherwise the chip will issue cacheline transactions
10945 * to streamable DMA memory with not all the byte
10946 * enables turned on. This is an error on several
10947 * RISC PCI controllers, in particular sparc64.
10948 *
10949 * On 5703/5704 chips, this bit has been reassigned
10950 * a different meaning. In particular, it is used
10951 * on those chips to enable a PCI-X workaround.
10952 */
10953 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10954 }
10955
10956 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10957
10958#if 0
10959 /* Unneeded, already done by tg3_get_invariants. */
10960 tg3_switch_clocks(tp);
10961#endif
10962
10963 ret = 0;
10964 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10965 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10966 goto out;
10967
David S. Miller59e6b432005-05-18 22:50:10 -070010968 /* It is best to perform DMA test with maximum write burst size
10969 * to expose the 5700/5701 write DMA bug.
10970 */
10971 saved_dma_rwctrl = tp->dma_rwctrl;
10972 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10973 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10974
Linus Torvalds1da177e2005-04-16 15:20:36 -070010975 while (1) {
10976 u32 *p = buf, i;
10977
10978 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10979 p[i] = i;
10980
10981 /* Send the buffer to the chip. */
10982 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10983 if (ret) {
10984 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10985 break;
10986 }
10987
10988#if 0
10989 /* validate data reached card RAM correctly. */
10990 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10991 u32 val;
10992 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10993 if (le32_to_cpu(val) != p[i]) {
10994 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10995 /* ret = -ENODEV here? */
10996 }
10997 p[i] = 0;
10998 }
10999#endif
11000 /* Now read it back. */
11001 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11002 if (ret) {
11003 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11004
11005 break;
11006 }
11007
11008 /* Verify it. */
11009 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11010 if (p[i] == i)
11011 continue;
11012
David S. Miller59e6b432005-05-18 22:50:10 -070011013 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11014 DMA_RWCTRL_WRITE_BNDRY_16) {
11015 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011016 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11017 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11018 break;
11019 } else {
11020 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11021 ret = -ENODEV;
11022 goto out;
11023 }
11024 }
11025
11026 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11027 /* Success. */
11028 ret = 0;
11029 break;
11030 }
11031 }
David S. Miller59e6b432005-05-18 22:50:10 -070011032 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11033 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070011034 static struct pci_device_id dma_wait_state_chipsets[] = {
11035 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11036 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11037 { },
11038 };
11039
David S. Miller59e6b432005-05-18 22:50:10 -070011040 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070011041 * now look for chipsets that are known to expose the
11042 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070011043 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070011044 if (pci_dev_present(dma_wait_state_chipsets)) {
11045 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11046 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11047 }
11048 else
11049 /* Safe to use the calculated DMA boundary. */
11050 tp->dma_rwctrl = saved_dma_rwctrl;
11051
David S. Miller59e6b432005-05-18 22:50:10 -070011052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011054
11055out:
11056 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11057out_nofree:
11058 return ret;
11059}
11060
11061static void __devinit tg3_init_link_config(struct tg3 *tp)
11062{
11063 tp->link_config.advertising =
11064 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11065 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11066 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11067 ADVERTISED_Autoneg | ADVERTISED_MII);
11068 tp->link_config.speed = SPEED_INVALID;
11069 tp->link_config.duplex = DUPLEX_INVALID;
11070 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011071 tp->link_config.active_speed = SPEED_INVALID;
11072 tp->link_config.active_duplex = DUPLEX_INVALID;
11073 tp->link_config.phy_is_low_power = 0;
11074 tp->link_config.orig_speed = SPEED_INVALID;
11075 tp->link_config.orig_duplex = DUPLEX_INVALID;
11076 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11077}
11078
11079static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11080{
Michael Chanfdfec172005-07-25 12:31:48 -070011081 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11082 tp->bufmgr_config.mbuf_read_dma_low_water =
11083 DEFAULT_MB_RDMA_LOW_WATER_5705;
11084 tp->bufmgr_config.mbuf_mac_rx_low_water =
11085 DEFAULT_MB_MACRX_LOW_WATER_5705;
11086 tp->bufmgr_config.mbuf_high_water =
11087 DEFAULT_MB_HIGH_WATER_5705;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011088
Michael Chanfdfec172005-07-25 12:31:48 -070011089 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11090 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11091 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11092 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11093 tp->bufmgr_config.mbuf_high_water_jumbo =
11094 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11095 } else {
11096 tp->bufmgr_config.mbuf_read_dma_low_water =
11097 DEFAULT_MB_RDMA_LOW_WATER;
11098 tp->bufmgr_config.mbuf_mac_rx_low_water =
11099 DEFAULT_MB_MACRX_LOW_WATER;
11100 tp->bufmgr_config.mbuf_high_water =
11101 DEFAULT_MB_HIGH_WATER;
11102
11103 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11104 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11105 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11106 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11107 tp->bufmgr_config.mbuf_high_water_jumbo =
11108 DEFAULT_MB_HIGH_WATER_JUMBO;
11109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011110
11111 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11112 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11113}
11114
11115static char * __devinit tg3_phy_string(struct tg3 *tp)
11116{
11117 switch (tp->phy_id & PHY_ID_MASK) {
11118 case PHY_ID_BCM5400: return "5400";
11119 case PHY_ID_BCM5401: return "5401";
11120 case PHY_ID_BCM5411: return "5411";
11121 case PHY_ID_BCM5701: return "5701";
11122 case PHY_ID_BCM5703: return "5703";
11123 case PHY_ID_BCM5704: return "5704";
11124 case PHY_ID_BCM5705: return "5705";
11125 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070011126 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070011127 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070011128 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080011129 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080011130 case PHY_ID_BCM5787: return "5787";
Linus Torvalds1da177e2005-04-16 15:20:36 -070011131 case PHY_ID_BCM8002: return "8002/serdes";
11132 case 0: return "serdes";
11133 default: return "unknown";
11134 };
11135}
11136
Michael Chanf9804dd2005-09-27 12:13:10 -070011137static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11138{
11139 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11140 strcpy(str, "PCI Express");
11141 return str;
11142 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11143 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11144
11145 strcpy(str, "PCIX:");
11146
11147 if ((clock_ctrl == 7) ||
11148 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11149 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11150 strcat(str, "133MHz");
11151 else if (clock_ctrl == 0)
11152 strcat(str, "33MHz");
11153 else if (clock_ctrl == 2)
11154 strcat(str, "50MHz");
11155 else if (clock_ctrl == 4)
11156 strcat(str, "66MHz");
11157 else if (clock_ctrl == 6)
11158 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070011159 } else {
11160 strcpy(str, "PCI:");
11161 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11162 strcat(str, "66MHz");
11163 else
11164 strcat(str, "33MHz");
11165 }
11166 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11167 strcat(str, ":32-bit");
11168 else
11169 strcat(str, ":64-bit");
11170 return str;
11171}
11172
Michael Chan8c2dc7e2005-12-19 16:26:02 -080011173static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011174{
11175 struct pci_dev *peer;
11176 unsigned int func, devnr = tp->pdev->devfn & ~7;
11177
11178 for (func = 0; func < 8; func++) {
11179 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11180 if (peer && peer != tp->pdev)
11181 break;
11182 pci_dev_put(peer);
11183 }
Michael Chan16fe9d72005-12-13 21:09:54 -080011184 /* 5704 can be configured in single-port mode, set peer to
11185 * tp->pdev in that case.
11186 */
11187 if (!peer) {
11188 peer = tp->pdev;
11189 return peer;
11190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011191
11192 /*
11193 * We don't need to keep the refcount elevated; there's no way
11194 * to remove one half of this device without removing the other
11195 */
11196 pci_dev_put(peer);
11197
11198 return peer;
11199}
11200
David S. Miller15f98502005-05-18 22:49:26 -070011201static void __devinit tg3_init_coal(struct tg3 *tp)
11202{
11203 struct ethtool_coalesce *ec = &tp->coal;
11204
11205 memset(ec, 0, sizeof(*ec));
11206 ec->cmd = ETHTOOL_GCOALESCE;
11207 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11208 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11209 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11210 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11211 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11212 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11213 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11214 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11215 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11216
11217 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11218 HOSTCC_MODE_CLRTICK_TXBD)) {
11219 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11220 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11221 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11222 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11223 }
Michael Chand244c892005-07-05 14:42:33 -070011224
11225 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11226 ec->rx_coalesce_usecs_irq = 0;
11227 ec->tx_coalesce_usecs_irq = 0;
11228 ec->stats_block_coalesce_usecs = 0;
11229 }
David S. Miller15f98502005-05-18 22:49:26 -070011230}
11231
Linus Torvalds1da177e2005-04-16 15:20:36 -070011232static int __devinit tg3_init_one(struct pci_dev *pdev,
11233 const struct pci_device_id *ent)
11234{
11235 static int tg3_version_printed = 0;
11236 unsigned long tg3reg_base, tg3reg_len;
11237 struct net_device *dev;
11238 struct tg3 *tp;
Michael Chan72f2afb2006-03-06 19:28:35 -080011239 int i, err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070011240 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080011241 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011242
11243 if (tg3_version_printed++ == 0)
11244 printk(KERN_INFO "%s", version);
11245
11246 err = pci_enable_device(pdev);
11247 if (err) {
11248 printk(KERN_ERR PFX "Cannot enable PCI device, "
11249 "aborting.\n");
11250 return err;
11251 }
11252
11253 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11254 printk(KERN_ERR PFX "Cannot find proper PCI device "
11255 "base address, aborting.\n");
11256 err = -ENODEV;
11257 goto err_out_disable_pdev;
11258 }
11259
11260 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11261 if (err) {
11262 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11263 "aborting.\n");
11264 goto err_out_disable_pdev;
11265 }
11266
11267 pci_set_master(pdev);
11268
11269 /* Find power-management capability. */
11270 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11271 if (pm_cap == 0) {
11272 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11273 "aborting.\n");
11274 err = -EIO;
11275 goto err_out_free_res;
11276 }
11277
Linus Torvalds1da177e2005-04-16 15:20:36 -070011278 tg3reg_base = pci_resource_start(pdev, 0);
11279 tg3reg_len = pci_resource_len(pdev, 0);
11280
11281 dev = alloc_etherdev(sizeof(*tp));
11282 if (!dev) {
11283 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11284 err = -ENOMEM;
11285 goto err_out_free_res;
11286 }
11287
11288 SET_MODULE_OWNER(dev);
11289 SET_NETDEV_DEV(dev, &pdev->dev);
11290
Linus Torvalds1da177e2005-04-16 15:20:36 -070011291 dev->features |= NETIF_F_LLTX;
11292#if TG3_VLAN_TAG_USED
11293 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11294 dev->vlan_rx_register = tg3_vlan_rx_register;
11295 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11296#endif
11297
11298 tp = netdev_priv(dev);
11299 tp->pdev = pdev;
11300 tp->dev = dev;
11301 tp->pm_cap = pm_cap;
11302 tp->mac_mode = TG3_DEF_MAC_MODE;
11303 tp->rx_mode = TG3_DEF_RX_MODE;
11304 tp->tx_mode = TG3_DEF_TX_MODE;
11305 tp->mi_mode = MAC_MI_MODE_BASE;
11306 if (tg3_debug > 0)
11307 tp->msg_enable = tg3_debug;
11308 else
11309 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11310
11311 /* The word/byte swap controls here control register access byte
11312 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11313 * setting below.
11314 */
11315 tp->misc_host_ctrl =
11316 MISC_HOST_CTRL_MASK_PCI_INT |
11317 MISC_HOST_CTRL_WORD_SWAP |
11318 MISC_HOST_CTRL_INDIR_ACCESS |
11319 MISC_HOST_CTRL_PCISTATE_RW;
11320
11321 /* The NONFRM (non-frame) byte/word swap controls take effect
11322 * on descriptor entries, anything which isn't packet data.
11323 *
11324 * The StrongARM chips on the board (one for tx, one for rx)
11325 * are running in big-endian mode.
11326 */
11327 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11328 GRC_MODE_WSWAP_NONFRM_DATA);
11329#ifdef __BIG_ENDIAN
11330 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11331#endif
11332 spin_lock_init(&tp->lock);
11333 spin_lock_init(&tp->tx_lock);
11334 spin_lock_init(&tp->indirect_lock);
11335 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11336
11337 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11338 if (tp->regs == 0UL) {
11339 printk(KERN_ERR PFX "Cannot map device registers, "
11340 "aborting.\n");
11341 err = -ENOMEM;
11342 goto err_out_free_dev;
11343 }
11344
11345 tg3_init_link_config(tp);
11346
Linus Torvalds1da177e2005-04-16 15:20:36 -070011347 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11348 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11349 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11350
11351 dev->open = tg3_open;
11352 dev->stop = tg3_close;
11353 dev->get_stats = tg3_get_stats;
11354 dev->set_multicast_list = tg3_set_rx_mode;
11355 dev->set_mac_address = tg3_set_mac_addr;
11356 dev->do_ioctl = tg3_ioctl;
11357 dev->tx_timeout = tg3_tx_timeout;
11358 dev->poll = tg3_poll;
11359 dev->ethtool_ops = &tg3_ethtool_ops;
11360 dev->weight = 64;
11361 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11362 dev->change_mtu = tg3_change_mtu;
11363 dev->irq = pdev->irq;
11364#ifdef CONFIG_NET_POLL_CONTROLLER
11365 dev->poll_controller = tg3_poll_controller;
11366#endif
11367
11368 err = tg3_get_invariants(tp);
11369 if (err) {
11370 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11371 "aborting.\n");
11372 goto err_out_iounmap;
11373 }
11374
Michael Chan4a29cc22006-03-19 13:21:12 -080011375 /* The EPB bridge inside 5714, 5715, and 5780 and any
11376 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080011377 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11378 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11379 * do DMA address check in tg3_start_xmit().
11380 */
Michael Chan4a29cc22006-03-19 13:21:12 -080011381 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11382 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11383 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080011384 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11385#ifdef CONFIG_HIGHMEM
11386 dma_mask = DMA_64BIT_MASK;
11387#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080011388 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080011389 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11390
11391 /* Configure DMA attributes. */
11392 if (dma_mask > DMA_32BIT_MASK) {
11393 err = pci_set_dma_mask(pdev, dma_mask);
11394 if (!err) {
11395 dev->features |= NETIF_F_HIGHDMA;
11396 err = pci_set_consistent_dma_mask(pdev,
11397 persist_dma_mask);
11398 if (err < 0) {
11399 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11400 "DMA for consistent allocations\n");
11401 goto err_out_iounmap;
11402 }
11403 }
11404 }
11405 if (err || dma_mask == DMA_32BIT_MASK) {
11406 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11407 if (err) {
11408 printk(KERN_ERR PFX "No usable DMA configuration, "
11409 "aborting.\n");
11410 goto err_out_iounmap;
11411 }
11412 }
11413
Michael Chanfdfec172005-07-25 12:31:48 -070011414 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011415
11416#if TG3_TSO_SUPPORT != 0
11417 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11418 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11419 }
11420 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11422 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11423 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11424 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11425 } else {
11426 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11427 }
11428
Michael Chan4e3a7aa2006-03-20 17:47:44 -080011429 /* TSO is on by default on chips that support hardware TSO.
11430 * Firmware TSO on older chips gives lower performance, so it
11431 * is off by default, but can be enabled using ethtool.
11432 */
11433 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011434 dev->features |= NETIF_F_TSO;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011435
11436#endif
11437
11438 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11439 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11440 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11441 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11442 tp->rx_pending = 63;
11443 }
11444
Michael Chan8c2dc7e2005-12-19 16:26:02 -080011445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11446 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11447 tp->pdev_peer = tg3_find_peer(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011448
11449 err = tg3_get_device_address(tp);
11450 if (err) {
11451 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11452 "aborting.\n");
11453 goto err_out_iounmap;
11454 }
11455
11456 /*
11457 * Reset chip in case UNDI or EFI driver did not shutdown
11458 * DMA self test will enable WDMAC and we'll see (spurious)
11459 * pending DMA on the PCI bus at that point.
11460 */
11461 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11462 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11463 pci_save_state(tp->pdev);
11464 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Michael Chan944d9802005-05-29 14:57:48 -070011465 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011466 }
11467
11468 err = tg3_test_dma(tp);
11469 if (err) {
11470 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11471 goto err_out_iounmap;
11472 }
11473
11474 /* Tigon3 can do ipv4 only... and some chips have buggy
11475 * checksumming.
11476 */
11477 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080011478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan9c27dbd2006-03-20 22:28:27 -080011480 dev->features |= NETIF_F_HW_CSUM;
11481 else
11482 dev->features |= NETIF_F_IP_CSUM;
11483 dev->features |= NETIF_F_SG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011484 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11485 } else
11486 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11487
Linus Torvalds1da177e2005-04-16 15:20:36 -070011488 /* flow control autonegotiation is default behavior */
11489 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11490
David S. Miller15f98502005-05-18 22:49:26 -070011491 tg3_init_coal(tp);
11492
David S. Miller7d3f4c92005-08-06 06:35:48 -070011493 /* Now that we have fully setup the chip, save away a snapshot
11494 * of the PCI config space. We need to restore this after
11495 * GRC_MISC_CFG core clock resets and some resume events.
11496 */
11497 pci_save_state(tp->pdev);
11498
Linus Torvalds1da177e2005-04-16 15:20:36 -070011499 err = register_netdev(dev);
11500 if (err) {
11501 printk(KERN_ERR PFX "Cannot register net device, "
11502 "aborting.\n");
11503 goto err_out_iounmap;
11504 }
11505
11506 pci_set_drvdata(pdev, dev);
11507
Michael Chanf9804dd2005-09-27 12:13:10 -070011508 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
Linus Torvalds1da177e2005-04-16 15:20:36 -070011509 dev->name,
11510 tp->board_part_number,
11511 tp->pci_chip_rev_id,
11512 tg3_phy_string(tp),
Michael Chanf9804dd2005-09-27 12:13:10 -070011513 tg3_bus_string(tp, str),
Linus Torvalds1da177e2005-04-16 15:20:36 -070011514 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11515
11516 for (i = 0; i < 6; i++)
11517 printk("%2.2x%c", dev->dev_addr[i],
11518 i == 5 ? '\n' : ':');
11519
11520 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11521 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11522 "TSOcap[%d] \n",
11523 dev->name,
11524 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11525 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11526 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11527 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11528 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11529 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11530 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080011531 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11532 dev->name, tp->dma_rwctrl,
11533 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11534 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011535
Jeff Mahoney59f17412006-03-20 22:39:21 -080011536 netif_carrier_off(tp->dev);
11537
Linus Torvalds1da177e2005-04-16 15:20:36 -070011538 return 0;
11539
11540err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070011541 if (tp->regs) {
11542 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070011543 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070011544 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011545
11546err_out_free_dev:
11547 free_netdev(dev);
11548
11549err_out_free_res:
11550 pci_release_regions(pdev);
11551
11552err_out_disable_pdev:
11553 pci_disable_device(pdev);
11554 pci_set_drvdata(pdev, NULL);
11555 return err;
11556}
11557
11558static void __devexit tg3_remove_one(struct pci_dev *pdev)
11559{
11560 struct net_device *dev = pci_get_drvdata(pdev);
11561
11562 if (dev) {
11563 struct tg3 *tp = netdev_priv(dev);
11564
Michael Chan7faa0062006-02-02 17:29:28 -080011565 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070011566 unregister_netdev(dev);
Michael Chan68929142005-08-09 20:17:14 -070011567 if (tp->regs) {
11568 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070011569 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070011570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011571 free_netdev(dev);
11572 pci_release_regions(pdev);
11573 pci_disable_device(pdev);
11574 pci_set_drvdata(pdev, NULL);
11575 }
11576}
11577
11578static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11579{
11580 struct net_device *dev = pci_get_drvdata(pdev);
11581 struct tg3 *tp = netdev_priv(dev);
11582 int err;
11583
11584 if (!netif_running(dev))
11585 return 0;
11586
Michael Chan7faa0062006-02-02 17:29:28 -080011587 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070011588 tg3_netif_stop(tp);
11589
11590 del_timer_sync(&tp->timer);
11591
David S. Millerf47c11e2005-06-24 20:18:35 -070011592 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011593 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070011594 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011595
11596 netif_device_detach(dev);
11597
David S. Millerf47c11e2005-06-24 20:18:35 -070011598 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070011599 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080011600 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070011601 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011602
11603 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11604 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070011605 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011606
Michael Chan6a9eba12005-12-13 21:08:58 -080011607 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070011608 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011609
11610 tp->timer.expires = jiffies + tp->timer_offset;
11611 add_timer(&tp->timer);
11612
11613 netif_device_attach(dev);
11614 tg3_netif_start(tp);
11615
David S. Millerf47c11e2005-06-24 20:18:35 -070011616 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011617 }
11618
11619 return err;
11620}
11621
11622static int tg3_resume(struct pci_dev *pdev)
11623{
11624 struct net_device *dev = pci_get_drvdata(pdev);
11625 struct tg3 *tp = netdev_priv(dev);
11626 int err;
11627
11628 if (!netif_running(dev))
11629 return 0;
11630
11631 pci_restore_state(tp->pdev);
11632
Michael Chanbc1c7562006-03-20 17:48:03 -080011633 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011634 if (err)
11635 return err;
11636
11637 netif_device_attach(dev);
11638
David S. Millerf47c11e2005-06-24 20:18:35 -070011639 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011640
Michael Chan6a9eba12005-12-13 21:08:58 -080011641 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070011642 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011643
11644 tp->timer.expires = jiffies + tp->timer_offset;
11645 add_timer(&tp->timer);
11646
Linus Torvalds1da177e2005-04-16 15:20:36 -070011647 tg3_netif_start(tp);
11648
David S. Millerf47c11e2005-06-24 20:18:35 -070011649 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011650
11651 return 0;
11652}
11653
11654static struct pci_driver tg3_driver = {
11655 .name = DRV_MODULE_NAME,
11656 .id_table = tg3_pci_tbl,
11657 .probe = tg3_init_one,
11658 .remove = __devexit_p(tg3_remove_one),
11659 .suspend = tg3_suspend,
11660 .resume = tg3_resume
11661};
11662
11663static int __init tg3_init(void)
11664{
11665 return pci_module_init(&tg3_driver);
11666}
11667
11668static void __exit tg3_cleanup(void)
11669{
11670 pci_unregister_driver(&tg3_driver);
11671}
11672
11673module_init(tg3_init);
11674module_exit(tg3_cleanup);